Talvos  0.1
SPIR-V interpreter and dynamic analysis framework
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Memory.cpp
Go to the documentation of this file.
1 // Copyright (c) 2018 the Talvos developers. All rights reserved.
2 //
3 // This file is distributed under a three-clause BSD license. For full license
4 // terms please see the LICENSE file distributed with this source code.
5 
8 
9 #include <algorithm>
10 #include <cassert>
11 #include <iomanip>
12 #include <iostream>
13 #include <sstream>
14 
15 #include <spirv/unified1/spirv.h>
16 
17 #include "talvos/Device.h"
18 #include "talvos/Memory.h"
19 
20 // TODO: Allow different number of buffer bits depending on address space
21 
23 #define BUFFER_BITS (16)
24 
26 #define OFFSET_BITS (64 - BUFFER_BITS)
27 
28 // Macros for locking/unlocking atomic mutexes if necessary.
29 #define LOCK_ATOMIC_MUTEX(Address) \
30  if (this->Scope == MemoryScope::Device) \
31  AtomicMutexes[Address % NUM_ATOMIC_MUTEXES].lock()
32 #define UNLOCK_ATOMIC_MUTEX(Address) \
33  if (this->Scope == MemoryScope::Device) \
34  AtomicMutexes[Address % NUM_ATOMIC_MUTEXES].unlock()
35 
36 namespace talvos
37 {
38 
39 Memory::Memory(Device &D, MemoryScope Scope) : Dev(D), Scope(Scope)
40 {
41  // Skip the first buffer identifier (0).
42  Buffers.resize(1);
43 }
44 
46 {
47  // Release all allocations.
48  for (size_t Id = 1; Id < Buffers.size(); Id++)
49  delete[] Buffers[Id].Data;
50 }
51 
52 uint64_t Memory::allocate(uint64_t NumBytes)
53 {
54  std::lock_guard<std::mutex> Lock(Mutex);
55 
56  // Allocate buffer.
57  Buffer B;
58  B.NumBytes = NumBytes;
59  B.Data = new uint8_t[NumBytes];
60 
61  // Get the next available buffer identifier.
62  uint64_t Id;
63  if (FreeBuffers.size())
64  {
65  // Re-use previously released buffer identifier.
66  Id = FreeBuffers.back();
67  FreeBuffers.pop_back();
68  Buffers[Id] = B;
69  }
70  else
71  {
72  // Allocate new buffer identifier.
73  Id = Buffers.size();
74  Buffers.push_back(B);
75  }
76 
77  return (Id << OFFSET_BITS);
78 }
79 
80 template <typename T>
81 T Memory::atomic(uint64_t Address, uint32_t Opcode, uint32_t Scope,
82  uint32_t Semantics, T Value)
83 {
84  assert(sizeof(T) == 4);
85 
86  Dev.reportAtomicAccess(this, Address, 4, Opcode, Scope, Semantics);
87 
88  if (!isAccessValid(Address, 4))
89  {
90  std::stringstream Err;
91  Err << "Invalid atomic access of 4 bytes"
92  << " at address 0x" << std::hex << Address << " ("
93  << scopeToString(this->Scope) << " scope) ";
94  Dev.reportError(Err.str());
95 
96  return 0;
97  }
98 
99  // Get pointer to memory location.
100  uint64_t Id = (Address >> OFFSET_BITS);
101  uint64_t Offset = (Address & (((uint64_t)-1) >> BUFFER_BITS));
102  T *Pointer = (T *)(Buffers[Id].Data + Offset);
103 
104  LOCK_ATOMIC_MUTEX(Address);
105 
106  // Perform atomic operation and store result to memory.
107  T OldValue = *Pointer;
108  switch (Opcode)
109  {
110  case SpvOpAtomicAnd:
111  *Pointer = OldValue & Value;
112  break;
113  case SpvOpAtomicExchange:
114  *Pointer = Value;
115  break;
116  case SpvOpAtomicIAdd:
117  *Pointer = OldValue + Value;
118  break;
119  case SpvOpAtomicIDecrement:
120  *Pointer = OldValue - 1;
121  break;
122  case SpvOpAtomicIIncrement:
123  *Pointer = OldValue + 1;
124  break;
125  case SpvOpAtomicISub:
126  *Pointer = OldValue - Value;
127  break;
128  case SpvOpAtomicLoad:
129  break;
130  case SpvOpAtomicOr:
131  *Pointer = OldValue | Value;
132  break;
133  case SpvOpAtomicSMax:
134  case SpvOpAtomicUMax:
135  *Pointer = std::max(OldValue, Value);
136  break;
137  case SpvOpAtomicSMin:
138  case SpvOpAtomicUMin:
139  *Pointer = std::min(OldValue, Value);
140  break;
141  case SpvOpAtomicStore:
142  *Pointer = Value;
143  break;
144  case SpvOpAtomicXor:
145  *Pointer = OldValue ^ Value;
146  break;
147  default:
148  Dev.reportError("Unhandled atomic operation", true);
149  }
150 
151  UNLOCK_ATOMIC_MUTEX(Address);
152 
153  return OldValue;
154 }
155 
156 uint32_t Memory::atomicCmpXchg(uint64_t Address, uint32_t Scope,
157  uint32_t EqualSemantics,
158  uint32_t UnequalSemantics, uint32_t Value,
159  uint32_t Comparator)
160 {
161  if (!isAccessValid(Address, 4))
162  {
163  // Make sure we still report the access for any plugins to observe.
164  Dev.reportAtomicAccess(this, Address, 4, SpvOpAtomicCompareExchange, Scope,
165  UnequalSemantics);
166 
167  std::stringstream Err;
168  Err << "Invalid atomic access of 4 bytes"
169  << " at address 0x" << std::hex << Address << " ("
170  << scopeToString(this->Scope) << " scope) ";
171  Dev.reportError(Err.str());
172 
173  return 0;
174  }
175 
176  // Get pointer to memory location.
177  uint64_t Id = (Address >> OFFSET_BITS);
178  uint64_t Offset = (Address & (((uint64_t)-1) >> BUFFER_BITS));
179  uint32_t *Pointer = (uint32_t *)(Buffers[Id].Data + Offset);
180 
181  LOCK_ATOMIC_MUTEX(Address);
182 
183  // Compare values and exchange if necessary.
184  uint32_t OldValue = *Pointer;
185  if (OldValue == Comparator)
186  {
187  Dev.reportAtomicAccess(this, Address, 4, SpvOpAtomicCompareExchange, Scope,
188  EqualSemantics);
189  *Pointer = Value;
190  }
191  else
192  {
193  Dev.reportAtomicAccess(this, Address, 4, SpvOpAtomicCompareExchange, Scope,
194  UnequalSemantics);
195  }
196 
197  UNLOCK_ATOMIC_MUTEX(Address);
198 
199  return OldValue;
200 }
201 
202 void Memory::dump() const
203 {
204  for (size_t Id = 1; Id < Buffers.size(); Id++)
205  {
206  if (Buffers[Id].Data)
207  dump(Id << OFFSET_BITS);
208  }
209 }
210 
211 void Memory::dump(uint64_t Address) const
212 {
213  uint64_t Id = (Address >> OFFSET_BITS);
214 
215  if (!Buffers[Id].Data)
216  {
217  std::cerr << "Memory::dump() invalid address: " << Address << std::endl;
218  return;
219  }
220 
221  for (uint64_t i = 0; i < Buffers[Id].NumBytes; i++)
222  {
223  if (i % 4 == 0)
224  {
225  std::cout << std::endl
226  << std::hex << std::uppercase << std::setw(16)
227  << std::setfill(' ') << std::right
228  << ((((uint64_t)Id) << OFFSET_BITS) | i) << ":";
229  }
230  std::cout << " " << std::hex << std::uppercase << std::setw(2)
231  << std::setfill('0') << (int)Buffers[Id].Data[i];
232  }
233  std::cout << std::endl;
234 }
235 
236 bool Memory::isAccessValid(uint64_t Address, uint64_t NumBytes) const
237 {
238  uint64_t Id = (Address >> OFFSET_BITS);
239  uint64_t Offset = (Address & (((uint64_t)-1) >> BUFFER_BITS));
240  if (Id >= Buffers.size())
241  return false;
242  if (!Buffers[Id].Data)
243  return false;
244  if ((Offset + NumBytes) > Buffers[Id].NumBytes)
245  return false;
246  return true;
247 }
248 
249 void Memory::load(uint8_t *Data, uint64_t Address, uint64_t NumBytes) const
250 {
251  uint64_t Id = (Address >> OFFSET_BITS);
252  uint64_t Offset = (Address & (((uint64_t)-1) >> BUFFER_BITS));
253 
254  Dev.reportMemoryLoad(this, Address, NumBytes);
255 
256  if (!isAccessValid(Address, NumBytes))
257  {
258  std::stringstream Err;
259  Err << "Invalid load of " << NumBytes << " bytes"
260  << " from address 0x" << std::hex << Address << " ("
261  << scopeToString(Scope) << " scope) ";
262  Dev.reportError(Err.str());
263 
264  // Zero output data to conform to robust buffer access feature.
265  memset(Data, 0, NumBytes);
266 
267  return;
268  }
269 
270  memcpy(Data, Buffers[Id].Data + Offset, NumBytes);
271 }
272 
273 uint8_t *Memory::map(uint64_t Base, uint64_t Offset, uint64_t NumBytes)
274 {
275  uint64_t Id = (Base >> OFFSET_BITS);
276 
277  Dev.reportMemoryMap(this, Base, Offset, NumBytes);
278 
279  if (!isAccessValid(Base + Offset, NumBytes))
280  {
281  std::stringstream Err;
282  Err << "Invalid mapping of " << NumBytes << " bytes"
283  << " from address 0x" << std::hex << (Base + Offset) << " ("
284  << scopeToString(Scope) << " scope) ";
285  Dev.reportError(Err.str());
286  return nullptr;
287  }
288 
289  return Buffers[Id].Data + Offset;
290 }
291 
292 void Memory::release(uint64_t Address)
293 {
294  std::lock_guard<std::mutex> Lock(Mutex);
295 
296  uint64_t Id = (Address >> OFFSET_BITS);
297  assert(Buffers[Id].Data != nullptr);
298 
299  // Release memory used by buffer.
300  delete[] Buffers[Id].Data;
301  Buffers[Id].Data = nullptr;
302 
303  FreeBuffers.push_back(Id);
304 }
305 
306 void Memory::store(uint64_t Address, uint64_t NumBytes, const uint8_t *Data)
307 {
308  uint64_t Id = (Address >> OFFSET_BITS);
309  uint64_t Offset = (Address & (((uint64_t)-1) >> BUFFER_BITS));
310 
311  Dev.reportMemoryStore(this, Address, NumBytes, Data);
312 
313  if (!isAccessValid(Address, NumBytes))
314  {
315  std::stringstream Err;
316  Err << "Invalid store of " << NumBytes << " bytes"
317  << " to address 0x" << std::hex << Address << " ("
318  << scopeToString(Scope) << " scope) ";
319  Dev.reportError(Err.str());
320  return;
321  }
322 
323  memcpy(Buffers[Id].Data + Offset, Data, NumBytes);
324 }
325 
326 void Memory::unmap(uint64_t Base) { Dev.reportMemoryUnmap(this, Base); }
327 
328 void Memory::copy(uint64_t DstAddress, Memory &DstMem, uint64_t SrcAddress,
329  const Memory &SrcMem, uint64_t NumBytes)
330 {
331  uint64_t SrcId = (SrcAddress >> OFFSET_BITS);
332  uint64_t SrcOffset = (SrcAddress & (((uint64_t)-1) >> BUFFER_BITS));
333 
334  SrcMem.Dev.reportMemoryLoad(&SrcMem, SrcAddress, NumBytes);
335 
336  if (!SrcMem.isAccessValid(SrcAddress, NumBytes))
337  {
338  std::stringstream Err;
339  Err << "Invalid load of " << NumBytes << " bytes"
340  << " from address 0x" << std::hex << SrcAddress << " ("
341  << scopeToString(SrcMem.Scope) << " scope) ";
342  SrcMem.Dev.reportError(Err.str());
343  return;
344  }
345 
346  DstMem.store(DstAddress, NumBytes, SrcMem.Buffers[SrcId].Data + SrcOffset);
347 }
348 
349 // Explicit instantiations for types valid for atomic operations.
350 template uint32_t Memory::atomic(uint64_t Address, uint32_t Opcode,
351  uint32_t Scope, uint32_t Semantics,
352  uint32_t Value);
353 template int32_t Memory::atomic(uint64_t Address, uint32_t Opcode,
354  uint32_t Scope, uint32_t Semantics,
355  int32_t Value);
356 
357 } // namespace talvos
uint8_t * map(uint64_t Base, uint64_t Offset, uint64_t NumBytes)
Map a region of memory and return a pointer to it.
Definition: Memory.cpp:273
void reportError(const std::string &Error, bool Fatal=false)
Report an error that has occurred during emulation.
Definition: Device.cpp:146
#define OFFSET_BITS
Number of bits used for the address offset.
Definition: Memory.cpp:26
This file declares the Device class.
#define UNLOCK_ATOMIC_MUTEX(Address)
Definition: Memory.cpp:32
void unmap(uint64_t Base)
Unmap a previously mapped region of memory.
Definition: Memory.cpp:326
void dump() const
Dump the entire contents of this memory to stdout.
Definition: Memory.cpp:202
static void copy(uint64_t DstAddress, Memory &DstMem, uint64_t SrcAddress, const Memory &SrcMem, uint64_t NumBytes)
Copy data between memory instances.
Definition: Memory.cpp:328
uint32_t atomicCmpXchg(uint64_t Address, uint32_t Scope, uint32_t EqualSemantics, uint32_t UnequalSemantics, uint32_t Value, uint32_t Comparator)
Perform an atomic compare-exchange operation at Address.
Definition: Memory.cpp:156
MemoryScope
Describes the scope of a memory instance.
Definition: Memory.h:23
void release(uint64_t Address)
Release the allocation with base address Address.
Definition: Memory.cpp:292
uint64_t NumBytes
The size of the allocation in bytes.
Definition: Memory.h:128
void reportMemoryMap(const Memory *Mem, uint64_t Base, uint64_t Offset, uint64_t NumBytes)
Definition: Device.cpp:285
#define LOCK_ATOMIC_MUTEX(Address)
Definition: Memory.cpp:29
#define BUFFER_BITS
Number of bits used for the buffer ID.
Definition: Memory.cpp:23
T atomic(uint64_t Address, uint32_t Opcode, uint32_t Scope, uint32_t Semantics, T Value=0)
Atomically apply operation defined by Opcode to Address.
Definition: Memory.cpp:81
This class represents an address space in the virtual device.
Definition: Memory.h:37
std::vector< uint64_t > FreeBuffers
Base addresses available for reuse.
Definition: Memory.h:132
std::mutex Mutex
Mutex for guarding allocate/release operations.
Definition: Memory.h:117
A Device instance encapsulates properties and state for the virtual device.
Definition: Device.h:29
This file declares the Memory class.
void reportMemoryStore(const Memory *Mem, uint64_t Address, uint64_t NumBytes, const uint8_t *Data)
Definition: Device.cpp:291
void load(uint8_t *Result, uint64_t Address, uint64_t NumBytes) const
Load NumBytes of data from Address into Result.
Definition: Memory.cpp:249
MemoryScope Scope
The scope of this memory instance.
Definition: Memory.h:115
void reportAtomicAccess(const Memory *Mem, uint64_t Address, uint64_t NumBytes, uint32_t Opcode, uint32_t Scope, uint32_t Semantics)
Definition: Device.cpp:234
uint64_t allocate(uint64_t NumBytes)
Allocate a new buffer of size NumBytes.
Definition: Memory.cpp:52
void store(uint64_t Address, uint64_t NumBytes, const uint8_t *Data)
Store NumBytes of data from Data to Address.
Definition: Memory.cpp:306
An allocation within this memory instance.
Definition: Memory.h:126
void reportMemoryLoad(const Memory *Mem, uint64_t Address, uint64_t NumBytes)
Definition: Device.cpp:269
static const char * scopeToString(MemoryScope Scope)
Returns the string representation of Scope.
Definition: Memory.h:97
Memory(Device &D, MemoryScope Scope)
Create a new Memory instance.
Definition: Memory.cpp:39
bool isAccessValid(uint64_t Address, uint64_t NumBytes) const
Check whether an access resides in an allocated region of memory.
Definition: Memory.cpp:236
uint8_t * Data
The raw data backing the allocation.
Definition: Memory.h:129
std::vector< Buffer > Buffers
List of allocations.
Definition: Memory.h:131
Device & Dev
The device this memory instance is part of.
Definition: Memory.h:113
void reportMemoryUnmap(const Memory *Mem, uint64_t Base)
Definition: Device.cpp:307