implement MemMMap.mmap_syscall
[openpower-isa.git] / src / openpower / decoder / isa / mem.py
1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Funded by NLnet http://nlnet.nl
4 """core of the python-based POWER9 simulator
5
6 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
7 not speed, it is for both learning and educational purposes, as well as
8 a method of verifying the HDL.
9
10 related bugs:
11
12 * https://bugs.libre-soc.org/show_bug.cgi?id=424
13 """
14
15 from collections import defaultdict
16 from openpower.decoder.selectable_int import SelectableInt
17 from openpower.util import log, LogType
18 import math
19 import enum
20 from cached_property import cached_property
21 import mmap
22 from pickle import PicklingError
23 import ctypes
24 from nmutil import plain_data
25 from pathlib import Path
26 from openpower.syscalls import ppc_flags
27 import os
28
29
30 def swap_order(x, nbytes):
31 x = x.to_bytes(nbytes, byteorder='little')
32 x = int.from_bytes(x, byteorder='big', signed=False)
33 return x
34
35
36 class MemException(Exception):
37 pass
38
39
40 def process_mem(initial_mem, row_bytes=8):
41 res = {}
42 # different types of memory data structures recognised (for convenience)
43 if isinstance(initial_mem, list):
44 initial_mem = (0, initial_mem)
45 if isinstance(initial_mem, tuple):
46 startaddr, mem = initial_mem
47 initial_mem = {}
48 for i, val in enumerate(mem):
49 initial_mem[startaddr + row_bytes*i] = (val, row_bytes)
50
51 for addr, val in initial_mem.items():
52 if isinstance(val, tuple):
53 (val, width) = val
54 else:
55 width = row_bytes # assume same width
56 # val = swap_order(val, width)
57 res[addr] = (val, width)
58
59 return res
60
61
62 @enum.unique
63 class _ReadReason(enum.Enum):
64 Read = enum.auto()
65 SubWordWrite = enum.auto()
66 Dump = enum.auto()
67 Execute = enum.auto()
68
69 @cached_property
70 def read_default(self):
71 if self in (self.SubWordWrite, self.Dump):
72 return 0
73 return None
74
75 @cached_property
76 def needed_mmap_page_flag(self):
77 if self is self.Execute:
78 return MMapPageFlags.X
79 return MMapPageFlags.R
80
81
82 class MemCommon:
83 def __init__(self, row_bytes, initial_mem, misaligned_ok):
84 self.bytes_per_word = row_bytes
85 self.word_log2 = math.ceil(math.log2(row_bytes))
86 self.last_ld_addr = None
87 self.last_st_addr = None
88 self.misaligned_ok = misaligned_ok
89 log("Sim-Mem", initial_mem, self.bytes_per_word, self.word_log2)
90 if not initial_mem:
91 return
92
93 self.initialize(row_bytes, initial_mem)
94
95 def initialize(self, row_bytes, initial_mem):
96 for addr, (val, width) in process_mem(initial_mem, row_bytes).items():
97 # val = swap_order(val, width)
98 self.st(addr, val, width, swap=False)
99
100 def _read_word(self, word_idx, reason):
101 raise NotImplementedError
102
103 def _write_word(self, word_idx, value):
104 raise NotImplementedError
105
106 def word_idxs(self):
107 raise NotImplementedError
108 yield 0
109
110 def _get_shifter_mask(self, wid, remainder):
111 shifter = ((self.bytes_per_word - wid) - remainder) * \
112 8 # bits per byte
113 # XXX https://bugs.libre-soc.org/show_bug.cgi?id=377
114 # BE/LE mode?
115 shifter = remainder * 8
116 mask = (1 << (wid * 8)) - 1
117 log("width,rem,shift,mask", wid, remainder, hex(shifter), hex(mask))
118 return shifter, mask
119
120 # TODO: Implement ld/st of lesser width
121 def ld(self, address, width=8, swap=True, check_in_mem=False,
122 instr_fetch=False, reason=None):
123 log("ld from addr 0x%x width %d" % (address, width),
124 swap, check_in_mem, instr_fetch)
125 self.last_ld_addr = address # record last load
126 ldaddr = address
127 remainder = address & (self.bytes_per_word - 1)
128 address = address >> self.word_log2
129 if remainder & (width - 1) != 0:
130 exc = MemException("unaligned",
131 "Unaligned access: remainder %x width %d" %
132 (remainder, width))
133 exc.dar = ldaddr
134 raise exc
135 if reason is None:
136 reason = _ReadReason.Execute if instr_fetch else _ReadReason.Read
137 val = self._read_word(address, reason)
138 if val is None:
139 if check_in_mem:
140 return None
141 else:
142 val = 0
143 log("ld mem @ 0x%x rem %d : 0x%x" % (ldaddr, remainder, val))
144
145 if width != self.bytes_per_word:
146 shifter, mask = self._get_shifter_mask(width, remainder)
147 log("masking", hex(val), hex(mask << shifter), shifter)
148 val = val & (mask << shifter)
149 val >>= shifter
150 if swap:
151 val = swap_order(val, width)
152 log("Read 0x%x from addr 0x%x" % (val, ldaddr))
153 return val
154
155 def _st(self, addr, v, width=8, swap=True):
156 staddr = addr
157 remainder = addr & (self.bytes_per_word - 1)
158 addr = addr >> self.word_log2
159 log("Writing 0x%x to ST 0x%x memaddr 0x%x/%x swap %s" %
160 (v, staddr, addr, remainder, str(swap)))
161 if not self.misaligned_ok and remainder & (width - 1) != 0:
162 exc = MemException("unaligned",
163 "Unaligned access: remainder %x width %d" %
164 (remainder, width))
165 exc.dar = staddr
166 raise exc
167 if swap:
168 v = swap_order(v, width)
169 if width != self.bytes_per_word:
170 val = self._read_word(addr, _ReadReason.SubWordWrite)
171 shifter, mask = self._get_shifter_mask(width, remainder)
172 val &= ~(mask << shifter)
173 val |= v << shifter
174 self._write_word(addr, val)
175 else:
176 val = v
177 self._write_word(addr, v)
178 log("mem @ 0x%x: 0x%x" % (staddr, val))
179
180 def st(self, st_addr, v, width=8, swap=True):
181 self.last_st_addr = st_addr # record last store
182 # misaligned not allowed: pass straight to Mem._st
183 if not self.misaligned_ok:
184 return self._st(st_addr, v, width, swap)
185 remainder = st_addr & (self.bytes_per_word - 1)
186 if swap:
187 v = swap_order(v, width)
188 # not misaligned: pass through to Mem._st but we've swapped already
189 misaligned = remainder & (width - 1)
190 if misaligned == 0 or (remainder + width <= self.bytes_per_word):
191 return self._st(st_addr, v, width, swap=False)
192 shifter, mask = self._get_shifter_mask(width, remainder)
193 # split into two halves. lower first
194 maxmask = (1 << (self.bytes_per_word)*8) - 1
195 val1 = ((v << shifter) & maxmask) >> shifter
196 self._st(st_addr, val1, width=width-misaligned, swap=False)
197 # now upper.
198 val2 = v >> ((width-misaligned)*8)
199 addr2 = (st_addr >> self.word_log2) << self.word_log2
200 addr2 += self.bytes_per_word
201 print("v, val2", hex(v), hex(val2), "ad", addr2)
202 self._st(addr2, val2, width=width-misaligned, swap=False)
203
204 def __call__(self, addr, sz):
205 val = self.ld(addr.value, sz, swap=False)
206 log("memread", addr, sz, hex(val), kind=LogType.InstrInOuts)
207 return SelectableInt(val, sz*8)
208
209 def memassign(self, addr, sz, val):
210 log("memassign", addr, sz, val, kind=LogType.InstrInOuts)
211 self.st(addr.value, val.value, sz, swap=False)
212
213 def dump(self, printout=True, asciidump=False):
214 keys = list(self.word_idxs())
215 keys.sort()
216 res = []
217 for k in keys:
218 v = self._read_word(k, _ReadReason.Dump)
219 res.append((k*8, v))
220 if not printout:
221 continue
222 s = ""
223 if asciidump:
224 for i in range(8):
225 c = chr(v >> (i*8) & 0xff)
226 if not c.isprintable():
227 c = "."
228 s += c
229 print("%016x: %016x" % ((k*8) & 0xffffffffffffffff, v), s)
230 return res
231
232 def log_fancy(self, *, kind=LogType.Default, name="Memory",
233 log2_line_size=4, log2_column_chunk_size=3, log=log):
234 line_size = 1 << log2_line_size
235 subline_mask = line_size - 1
236 column_chunk_size = 1 << log2_column_chunk_size
237
238 def make_line():
239 return bytearray(line_size)
240 mem_lines = defaultdict(make_line)
241 subword_range = range(1 << self.word_log2)
242 for k in self.word_idxs():
243 addr = k << self.word_log2
244 for _ in subword_range:
245 v = self.ld(addr, width=1, reason=_ReadReason.Dump)
246 mem_lines[addr >> log2_line_size][addr & subline_mask] = v
247 addr += 1
248
249 lines = []
250 last_line_index = None
251 for line_index in sorted(mem_lines.keys()):
252 line_addr = line_index << log2_line_size
253 if last_line_index is not None \
254 and last_line_index + 1 != line_index:
255 lines.append("*")
256 last_line_index = line_index
257 line_bytes = mem_lines[line_index]
258 line_str = f"0x{line_addr:08X}:"
259 for col_chunk in range(0, line_size,
260 column_chunk_size):
261 line_str += " "
262 for i in range(column_chunk_size):
263 line_str += f" {line_bytes[col_chunk + i]:02X}"
264 line_str += " |"
265 for i in range(line_size):
266 if 0x20 <= line_bytes[i] <= 0x7E:
267 line_str += chr(line_bytes[i])
268 else:
269 line_str += "."
270 line_str += "|"
271 lines.append(line_str)
272 lines = "\n".join(lines)
273 log(f"\n{name}:\n{lines}\n", kind=kind)
274
275
276 class Mem(MemCommon):
277 def __init__(self, row_bytes=8, initial_mem=None, misaligned_ok=False):
278 self.mem = {}
279 super().__init__(row_bytes, initial_mem, misaligned_ok)
280
281 def _read_word(self, word_idx, reason):
282 return self.mem.get(word_idx, reason.read_default)
283
284 def _write_word(self, word_idx, value):
285 self.mem[word_idx] = value
286
287 def word_idxs(self):
288 return self.mem.keys()
289
290
291 class MMapPageFlags(enum.IntFlag):
292 """ flags on each mmap-ped page
293
294 Note: these are *not* PowerISA MMU pages, but instead internal to Mem so
295 it can detect invalid accesses and assert rather than segfaulting.
296 """
297 R = 1
298 W = 2
299 X = 4
300 "readable when instr_fetch=True"
301
302 S = 8
303 "shared -- aka. not copy-on-write"
304
305 GROWS_DOWN = 16
306 """this memory block will grow when the address one page before the
307 beginning is accessed"""
308
309 RWX = R | W | X
310 NONE = 0
311
312
313 _ALLOWED_MMAP_NORMAL_FLAGS = MMapPageFlags.RWX | MMapPageFlags.S
314 _ALLOWED_MMAP_STACK_FLAGS = MMapPageFlags.RWX | MMapPageFlags.GROWS_DOWN
315
316
317 MMAP_PAGE_SIZE = 1 << 16 # size of chunk that we track
318 _PAGE_COUNT = (1 << 48) // MMAP_PAGE_SIZE # 48-bit address space
319 _NEG_PG_IDX_START = _PAGE_COUNT // 2 # start of negative half of address space
320
321 # code assumes BLOCK_SIZE is a power of two
322 # BLOCK_SIZE = 1 << 32
323 BLOCK_SIZE = 1 << 28 # reduced so it works on armv7a
324
325 assert BLOCK_SIZE % MMAP_PAGE_SIZE == 0
326 DEFAULT_BLOCK_ADDRS = (
327 0, # low end of user space
328 2 ** 47 - BLOCK_SIZE, # high end of user space
329 )
330
331
332 @plain_data.plain_data(frozen=True, unsafe_hash=True)
333 class MMapEmuBlock:
334 __slots__ = ("addrs", "flags", "file", "file_off")
335
336 def __init__(self, addrs, flags=MMapPageFlags.NONE, file=None, file_off=0):
337 # type: (range, MMapPageFlags, Path | str | None, int) -> None
338 if addrs.step != 1:
339 raise ValueError("bad address range, step must be 1")
340 if len(addrs) <= 0:
341 raise ValueError("bad address range, must be non-empty")
342 if addrs.start < 0:
343 raise ValueError("bad address range, must be non-negative")
344 if addrs.stop > 2 ** 64:
345 raise ValueError("bad address range -- goes beyond 2 ** 64")
346 if addrs.start % MMAP_PAGE_SIZE:
347 raise ValueError("bad address range -- start isn't page-aligned")
348 if addrs.stop % MMAP_PAGE_SIZE:
349 raise ValueError("bad address range -- stop isn't page-aligned")
350 if addrs[0] // BLOCK_SIZE != addrs[-1] // BLOCK_SIZE:
351 raise ValueError(
352 "bad address range -- crosses underlying block boundaries")
353 if file is not None:
354 if file_off < 0:
355 raise ValueError("bad file_off, must be non-negative")
356 if file_off % MMAP_PAGE_SIZE:
357 raise ValueError("bad file_off, must be page-aligned")
358 if flags & ~_ALLOWED_MMAP_NORMAL_FLAGS:
359 raise ValueError("invalid flags for mmap with file")
360 file = Path(file)
361 else:
362 if flags & ~_ALLOWED_MMAP_NORMAL_FLAGS:
363 if flags & ~_ALLOWED_MMAP_STACK_FLAGS:
364 raise ValueError("invalid flags for anonymous mmap")
365 file_off = 0 # no file -- clear offset
366 self.addrs = addrs
367 self.flags = flags
368 self.file = file
369 self.file_off = file_off
370 self.page_indexes # check that addresses can be mapped to pages
371
372 def intersects(self, other):
373 # type: (MMapEmuBlock) -> bool
374 return (other.addrs.start < self.addrs.stop
375 and self.addrs.start < other.addrs.stop)
376
377 @property
378 def is_private_mem(self):
379 return self.file is None and not self.flags & MMapPageFlags.S
380
381 @property
382 def underlying_block_key(self):
383 offset = self.addrs.start % BLOCK_SIZE
384 return self.addrs.start - offset
385
386 @property
387 def underlying_block_offsets(self):
388 start = self.addrs.start % BLOCK_SIZE
389 return range(start, start + len(self.addrs))
390
391 @property
392 def page_indexes(self):
393 first_page = MemMMap.addr_to_mmap_page_idx(self.addrs[0])
394 # can't just use stop, since that may be out-of-range
395 last_page = MemMMap.addr_to_mmap_page_idx(self.addrs[-1])
396 if first_page < _NEG_PG_IDX_START and last_page >= _NEG_PG_IDX_START:
397 raise ValueError(
398 "bad address range, crosses transition from positive "
399 "canonical addresses to negative canonical addresses")
400 return range(first_page, last_page + 1)
401
402 def difference(self, remove):
403 # type: (MMapEmuBlock) -> list[MMapEmuBlock]
404 """returns the blocks left after removing `remove` from `self`"""
405 if not self.intersects(remove):
406 return [self]
407 retval = []
408 addrs = range(self.addrs.start, remove.addrs.start)
409 if len(addrs):
410 retval.append(plain_data.replace(self, addrs=addrs))
411 addrs = range(remove.addrs.stop, self.addrs.stop)
412 if len(addrs):
413 file_off = self.file_off + addrs.start - self.addrs.start
414 retval.append(plain_data.replace(
415 self, addrs=addrs, file_off=file_off))
416 return retval
417
418
419 # stuff marked "not available" is not in the powerpc64le headers on my system
420 LEGACY_MAP_MASK = (
421 ppc_flags.MAP_SHARED
422 | ppc_flags.MAP_PRIVATE
423 | ppc_flags.MAP_FIXED
424 | ppc_flags.MAP_ANONYMOUS
425 | ppc_flags.MAP_DENYWRITE
426 | ppc_flags.MAP_EXECUTABLE
427 # | ppc_flags.MAP_UNINITIALIZED # not available -- ignored for now
428 | ppc_flags.MAP_GROWSDOWN
429 | ppc_flags.MAP_LOCKED
430 | ppc_flags.MAP_NORESERVE
431 | ppc_flags.MAP_POPULATE
432 | ppc_flags.MAP_NONBLOCK
433 | ppc_flags.MAP_STACK
434 | ppc_flags.MAP_HUGETLB
435 # | ppc_flags.MAP_32BIT # not available -- ignored for now
436 # | ppc_flags.MAP_ABOVE4G # not available -- ignored for now
437 # | ppc_flags.MAP_HUGE_2MB # not available -- ignored for now
438 # | ppc_flags.MAP_HUGE_1GB # not available -- ignored for now
439 )
440
441 _MAP_GROWS = ppc_flags.MAP_GROWSDOWN
442 # _MAP_GROWS |= ppc_flags.MAP_GROWSUP # not available -- ignored for now
443
444 def len_(r):
445 """ len(), but with fix for len(range(2**64)) raising OverflowError """
446 try:
447 return len(r)
448 except OverflowError:
449 assert isinstance(r, range)
450 return 1 + (r.stop - r.start - 1) // r.step
451
452
453 class MemMMap(MemCommon):
454 def __init__(self, row_bytes=8, initial_mem=None, misaligned_ok=False,
455 block_addrs=DEFAULT_BLOCK_ADDRS, emulating_mmap=False,
456 mmap_emu_data_block=None):
457 # we can't allocate the entire 2 ** 47 byte address space, so split
458 # it into smaller blocks
459 self.mem_blocks = {
460 addr: mmap.mmap(-1, BLOCK_SIZE) for addr in sorted(block_addrs)}
461 assert all(addr % BLOCK_SIZE == 0 for addr in self.mem_blocks), \
462 "misaligned block address not supported"
463 self.__page_flags = {}
464 self.modified_pages = set()
465 self.mmap_emu_data_block = mmap_emu_data_block
466 self.__mmap_emu_alloc_blocks = set() # type: set[MMapEmuBlock] | None
467
468 # build the list of unbacked blocks -- those address ranges that have
469 # no backing memory so mmap can't allocate there. These are maintained
470 # separately from __mmap_emu_alloc_blocks so munmap/mremap can't
471 # remove/modify them
472 addr_ranges = [
473 range(a, a + len(b)) for a, b in self.mem_blocks.items()]
474 self.__mmap_emu_unbacked_blocks = tuple(self.__gaps_in(addr_ranges))
475
476 if emulating_mmap:
477 if mmap_emu_data_block is not None:
478 if not isinstance(mmap_emu_data_block, MMapEmuBlock):
479 raise TypeError(
480 "mmap_emu_data_block must be a MMapEmuBlock")
481 if mmap_emu_data_block.file is not None:
482 raise ValueError(
483 "mmap_emu_data_block must be an anonymous mapping")
484 if not self.__mmap_emu_map_fixed(block=mmap_emu_data_block,
485 replace=False, dry_run=False):
486 raise ValueError("invalid mmap_emu_data_block")
487 else:
488 self.__mmap_emu_alloc_blocks = None
489 if mmap_emu_data_block is not None:
490 raise ValueError("can't set mmap_emu_data_block "
491 "without emulating_mmap=True")
492 # mark blocks as readable/writable
493 for addr, block in self.mem_blocks.items():
494 start_page = self.addr_to_mmap_page_idx(addr)
495 end_page = start_page + len(block) // MMAP_PAGE_SIZE
496 for page_idx in range(start_page, end_page):
497 self.__page_flags[page_idx] = MMapPageFlags.RWX
498
499 super().__init__(row_bytes, initial_mem, misaligned_ok)
500
501 @staticmethod
502 def __gaps_in(sorted_ranges, start=0, stop=2 ** 64):
503 # type: (list[range] | tuple[range], int, int) -> list[range]
504 start = 0
505 gaps = []
506 for r in sorted_ranges:
507 gap = range(start, r.start)
508 if len(gap):
509 gaps.append(gap)
510 start = r.stop
511 gap = range(start, stop)
512 if len_(gap):
513 gaps.append(gap)
514 return gaps
515
516 @property
517 def emulating_mmap(self):
518 return self.__mmap_emu_alloc_blocks is not None
519
520 def __mmap_emu_map_fixed(self, block, replace, dry_run):
521 # type: (MMapEmuBlock, bool, bool) -> bool
522 """insert the block at the fixed address passed in, replacing the
523 parts of any other blocks that overlap if `replace` is `True`.
524
525 If `dry_run`, then don't make any changes, just check if it would
526 succeed.
527
528 This function requires the caller to check `block`'s permissions and to
529 perform the underlying `mmap` first.
530 """
531 if block.underlying_block_key not in self.mem_blocks:
532 return False # unbacked block
533 # intersecting_blocks must be separate list so we don't iterate while
534 # we modify self.__mmap_emu_alloc_blocks
535 intersecting_blocks = [
536 b for b in self.__mmap_emu_alloc_blocks if block.intersects(b)]
537 for b in intersecting_blocks:
538 if not replace:
539 return False
540 if self.mmap_emu_data_block == b:
541 # FIXME: what does linux do here?
542 raise NotImplementedError(
543 "mmap overlapping the data block isn't implemented")
544 if not dry_run:
545 self.__mmap_emu_alloc_blocks.remove(b)
546 for replacement in b.difference(block):
547 self.__mmap_emu_alloc_blocks.add(replacement)
548 if not dry_run:
549 self.__mmap_emu_alloc_blocks.add(block)
550 for page_idx in block.page_indexes:
551 self.__page_flags[page_idx] = block.flags
552 return True
553
554 def __mmap_emu_resize_map_fixed(self, block, new_size):
555 # type: (MMapEmuBlock, int) -> MMapEmuBlock | None
556 assert block in self.__mmap_emu_alloc_blocks, \
557 "can't resize unmapped block"
558 if new_size == len(block.addrs):
559 return block
560 addrs = range(block.addrs.start, block.addrs.start + new_size)
561 new_block = plain_data.replace(block, addrs=addrs)
562 self.__mmap_emu_alloc_blocks.remove(block)
563 try:
564 if not self.__mmap_emu_map_fixed(
565 new_block, replace=False, dry_run=True):
566 return None
567 finally:
568 self.__mmap_emu_alloc_blocks.add(block)
569 if not block.is_private_mem:
570 # FIXME: implement resizing underlying mapping
571 raise NotImplementedError
572 else:
573 # clear newly mapped bytes
574 clear_addrs = range(block.addrs.stop, new_block.addrs.stop)
575 if len(clear_addrs):
576 clear_block = MMapEmuBlock(clear_addrs)
577 mem_block = self.mem_blocks[clear_block.underlying_block_key]
578 assert mem_block is not None
579 clear_size = len(clear_addrs)
580 arr = (ctypes.c_ubyte * clear_size).from_buffer(
581 mem_block, clear_block.underlying_block_offsets.start)
582 ctypes.memset(arr, 0, clear_size)
583 if self.mmap_emu_data_block == block:
584 self.mmap_emu_data_block = new_block
585 self.__mmap_emu_alloc_blocks.remove(block)
586 self.__mmap_emu_alloc_blocks.add(new_block)
587
588 if new_size < len(block.addrs):
589 # shrinking -- unmap pages at end
590 r = range(new_block.page_indexes.stop, block.page_indexes.stop)
591 for page_idx in r:
592 self.__page_flags.pop(page_idx)
593 self.modified_pages.remove(page_idx)
594 else:
595 # expanding -- map pages at end, they're cleared already
596 r = range(block.page_indexes.stop, new_block.page_indexes.stop)
597 for page_idx in r:
598 self.__page_flags[page_idx] = block.flags
599 self.modified_pages.remove(page_idx) # cleared page
600 return new_block
601
602 def __mmap_emu_find_free_addr(self, block):
603 # type: (MMapEmuBlock) -> MMapEmuBlock | None
604 """find a spot where `block` will fit, returning the new block"""
605 blocks = [*self.__mmap_emu_alloc_blocks,
606 *self.__mmap_emu_unbacked_blocks]
607 blocks.sort(key=lambda b: b.addrs.start)
608 biggest_gap = range(0)
609 for gap in self.__gaps_in([b.addrs for b in blocks]):
610 if len(biggest_gap) < len(gap):
611 biggest_gap = gap
612 extra_size = len(biggest_gap) - len(block.addrs)
613 if extra_size < 0:
614 return None # no space anywhere
615 # try to allocate in the middle of the gap, so mmaps can grow later
616 offset = extra_size // 2
617
618 # align to page -- this depends on gap being aligned already.
619 #
620 # rounds down offset, so no need to check size again since it can't
621 # ever get closer to the end of the gap
622 offset -= offset % MMAP_PAGE_SIZE
623 start = biggest_gap.start + offset
624 addrs = range(start, start + len(block))
625 return plain_data.replace(block, addrs=addrs)
626
627 def __mmap_emu_try_grow_down(self, addr, needed_flag):
628 # type: (int, MMapPageFlags) -> bool
629 """ if addr is the page just before a GROW_DOWN block, try to grow it.
630 returns True if successful. """
631 raise NotImplementedError # FIXME: implement
632
633 def brk_syscall(self, addr):
634 assert self.emulating_mmap, "brk syscall requires emulating_mmap=True"
635 assert self.mmap_emu_data_block is not None, \
636 "brk syscall requires a data block/segment"
637
638 # round addr up to the nearest page
639 addr_div_page_size = -(-addr // MMAP_PAGE_SIZE) # ceil(addr / size)
640 addr = addr_div_page_size * MMAP_PAGE_SIZE
641
642 raise NotImplementedError # FIXME: finish
643
644 def mmap_syscall(self, addr, length, prot, flags, fd, offset, is_mmap2):
645 if is_mmap2:
646 offset *= 4096 # specifically *not* the page size
647 prot_read = bool(prot & ppc_flags.PROT_READ)
648 prot_write = bool(prot & ppc_flags.PROT_WRITE)
649 prot_exec = bool(prot & ppc_flags.PROT_EXEC)
650 prot_all = (ppc_flags.PROT_READ | ppc_flags.PROT_WRITE
651 | ppc_flags.PROT_EXEC)
652 # checks based off the checks in linux
653 if prot & ~prot_all:
654 return -ppc_flags.EINVAL
655 if offset % MMAP_PAGE_SIZE:
656 return -ppc_flags.EINVAL
657 if flags & ppc_flags.MAP_HUGETLB:
658 # not supported
659 return -ppc_flags.EINVAL
660 if length <= 0 or offset < 0:
661 return -ppc_flags.EINVAL
662 if flags & ppc_flags.MAP_FIXED_NOREPLACE:
663 flags |= ppc_flags.MAP_FIXED
664 if not (flags & ppc_flags.MAP_FIXED):
665 addr &= MMAP_PAGE_SIZE - 1 # page-align address, rounding down
666 # page-align length, rounding up
667 length = (length + MMAP_PAGE_SIZE - 1) & ~(MMAP_PAGE_SIZE - 1)
668 if length + offset >= 2 ** 64:
669 # overflowed
670 return -ppc_flags.ENOMEM
671 block_flags = MMapPageFlags.NONE
672 if prot_read:
673 block_flags |= MMapPageFlags.R
674 if prot_write:
675 block_flags |= MMapPageFlags.W
676 if prot_exec:
677 block_flags |= MMapPageFlags.X
678 if flags & ppc_flags.MAP_GROWSDOWN:
679 block_flags |= MMapPageFlags.GROWS_DOWN
680 file = None
681 if fd >= 0:
682 try:
683 file = os.readlink("/proc/self/fd/%i" % fd)
684 except IOError:
685 return -ppc_flags.EBADF
686 try:
687 block = MMapEmuBlock(
688 range(addr, addr + length), block_flags, file, offset)
689 except (ValueError, MemException):
690 return -ppc_flags.EINVAL
691 if not (flags & ppc_flags.MAP_FIXED):
692 block = self.__mmap_emu_find_free_addr(block)
693 if block is None:
694 return -ppc_flags.ENOMEM
695 if flags & ppc_flags.MAP_LOCKED:
696 return -ppc_flags.EPERM
697 map_ty = flags & ppc_flags.MAP_TYPE
698 if file is not None:
699 fallthrough = False
700 if map_ty == ppc_flags.MAP_SHARED:
701 flags &= LEGACY_MAP_MASK
702 fallthrough = True
703 if fallthrough or map_ty == ppc_flags.MAP_SHARED_VALIDATE:
704 if flags & ~LEGACY_MAP_MASK:
705 return -ppc_flags.EOPNOTSUPP
706 raise NotImplementedError("MAP_SHARED on file")
707 fallthrough = True
708 if fallthrough or map_ty == ppc_flags.MAP_PRIVATE:
709 if flags & _MAP_GROWS:
710 return -ppc_flags.EINVAL
711 else:
712 return -ppc_flags.EINVAL
713 elif map_ty == ppc_flags.MAP_SHARED:
714 if flags & _MAP_GROWS:
715 return -ppc_flags.EINVAL
716 raise NotImplementedError("MAP_SHARED on memory")
717 elif map_ty != ppc_flags.MAP_PRIVATE:
718 return -ppc_flags.EINVAL
719 replace = not (flags & ppc_flags.MAP_FIXED_NOREPLACE)
720 if not self.__mmap_emu_map_fixed(block, replace, dry_run=True):
721 # failed, was that because there's an existing memory block or
722 # that was an invalid address?
723 if self.__mmap_emu_map_fixed(block, replace=True, dry_run=True):
724 return -ppc_flags.EEXIST # existing memory block
725 else:
726 return -ppc_flags.EINVAL # invalid address
727 mblock = self.mem_blocks[block.underlying_block_key]
728 offsets = block.underlying_block_offsets
729 buf = (ctypes.c_ubyte * len(offsets)).from_buffer(mblock, offsets[0])
730 buf_addr = ctypes.addressof(buf)
731 libc = ctypes.CDLL(None)
732 syscall = libc.syscall
733 restype = syscall.restype
734 argtypes = syscall.argtypes
735 syscall.restype = ctypes.c_long
736 syscall.argtypes = (ctypes.c_long,) * 6
737 call_no = ctypes.c_long(ppc_flags.host_defines['SYS_mmap'])
738 host_prot = ppc_flags.host_defines['PROT_READ']
739 if block.flags & prot_write:
740 host_prot = ppc_flags.host_defines['PROT_WRITE']
741 host_flags = ppc_flags.host_defines['MAP_FIXED']
742 host_flags |= ppc_flags.host_defines['MAP_PRIVATE']
743 if file is None:
744 host_flags |= ppc_flags.host_defines['MAP_ANONYMOUS']
745 res = int(syscall(
746 call_no, ctypes.c_long(buf_addr), ctypes.c_long(len(offsets)),
747 ctypes.c_long(host_prot), ctypes.c_long(host_flags),
748 ctypes.c_long(fd), ctypes.c_long(offset)))
749 syscall.restype = restype
750 syscall.argtypes = argtypes
751 if res == -1:
752 return -ctypes.get_errno()
753 self.__mmap_emu_map_fixed(block, replace=True, dry_run=False)
754 return block.addrs.start
755
756 @staticmethod
757 def mmap_page_idx_to_addr(page_idx):
758 assert 0 <= page_idx < _PAGE_COUNT
759 if page_idx >= _NEG_PG_IDX_START:
760 page_idx -= _PAGE_COUNT
761 return (page_idx * MMAP_PAGE_SIZE) % 2 ** 64
762
763 @staticmethod
764 def addr_to_mmap_page_idx(addr):
765 page_idx, offset = divmod(addr, MMAP_PAGE_SIZE)
766 page_idx %= _PAGE_COUNT
767 expected = MemMMap.mmap_page_idx_to_addr(page_idx) + offset
768 if addr != expected:
769 exc = MemException("not sign extended",
770 ("address not sign extended: 0x%X "
771 "expected 0x%X") % (addr, expected))
772 exc.dar = addr
773 raise exc
774 return page_idx
775
776 def __reduce_ex__(self, protocol):
777 raise PicklingError("MemMMap can't be deep-copied or pickled")
778
779 def __access_addr_range_err(self, start_addr, size, needed_flag):
780 assert needed_flag != MMapPageFlags.W, \
781 "can't write to address 0x%X size 0x%X" % (start_addr, size)
782 return None, 0
783
784 def __access_addr_range(self, start_addr, size, needed_flag):
785 assert size > 0, "invalid size"
786 page_idx = self.addr_to_mmap_page_idx(start_addr)
787 last_addr = start_addr + size - 1
788 last_page_idx = self.addr_to_mmap_page_idx(last_addr)
789 block_addr = start_addr % BLOCK_SIZE
790 block_k = start_addr - block_addr
791 last_block_addr = last_addr % BLOCK_SIZE
792 last_block_k = last_addr - last_block_addr
793 if block_k != last_block_k:
794 return self.__access_addr_range_err(start_addr, size, needed_flag)
795 for i in range(page_idx, last_page_idx + 1):
796 flags = self.__page_flags.get(i, 0)
797 if flags & needed_flag == 0:
798 if not self.__mmap_emu_try_grow_down(start_addr, needed_flag):
799 return self.__access_addr_range_err(
800 start_addr, size, needed_flag)
801 if needed_flag is MMapPageFlags.W:
802 self.modified_pages.add(page_idx)
803 return self.mem_blocks[block_k], block_addr
804
805 def get_ctypes(self, start_addr, size, is_write):
806 """ returns a ctypes ubyte array referring to the memory at
807 `start_addr` with size `size`
808 """
809 flag = MMapPageFlags.W if is_write else MMapPageFlags.R
810 block, block_addr = self.__access_addr_range(start_addr, size, flag)
811 assert block is not None, \
812 f"can't read from address 0x{start_addr:X} size 0x{size:X}"
813 return (ctypes.c_ubyte * size).from_buffer(block, block_addr)
814
815 def _read_word(self, word_idx, reason):
816 block, block_addr = self.__access_addr_range(
817 word_idx * self.bytes_per_word, self.bytes_per_word,
818 reason.needed_mmap_page_flag)
819 if block is None:
820 return reason.read_default
821 bytes_ = block[block_addr:block_addr + self.bytes_per_word]
822 return int.from_bytes(bytes_, 'little')
823
824 def _write_word(self, word_idx, value):
825 block, block_addr = self.__access_addr_range(
826 word_idx * self.bytes_per_word, self.bytes_per_word,
827 MMapPageFlags.W)
828 bytes_ = value.to_bytes(self.bytes_per_word, 'little')
829 block[block_addr:block_addr + self.bytes_per_word] = bytes_
830
831 def word_idxs(self):
832 zeros = bytes(self.bytes_per_word)
833 for page_idx in self.modified_pages:
834 start = self.mmap_page_idx_to_addr(page_idx)
835 block, block_addr = self.__access_addr_range(
836 start, MMAP_PAGE_SIZE, MMapPageFlags.R)
837 end = start + MMAP_PAGE_SIZE
838 for word_idx in range(start // self.bytes_per_word,
839 end // self.bytes_per_word):
840 next_block_addr = block_addr + self.bytes_per_word
841 bytes_ = block[block_addr:next_block_addr]
842 block_addr = next_block_addr
843 if bytes_ != zeros:
844 yield word_idx