1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Funded by NLnet http://nlnet.nl
4 """core of the python-based POWER9 simulator
6 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
7 not speed, it is for both learning and educational purposes, as well as
8 a method of verifying the HDL.
12 * https://bugs.libre-soc.org/show_bug.cgi?id=424
15 from collections
import defaultdict
16 from openpower
.decoder
.selectable_int
import SelectableInt
17 from openpower
.util
import log
, LogType
20 from cached_property
import cached_property
22 from pickle
import PicklingError
24 from nmutil
import plain_data
25 from pathlib
import Path
26 from openpower
.syscalls
import ppc_flags
30 def swap_order(x
, nbytes
):
31 x
= x
.to_bytes(nbytes
, byteorder
='little')
32 x
= int.from_bytes(x
, byteorder
='big', signed
=False)
36 class MemException(Exception):
40 def process_mem(initial_mem
, row_bytes
=8):
42 # different types of memory data structures recognised (for convenience)
43 if isinstance(initial_mem
, list):
44 initial_mem
= (0, initial_mem
)
45 if isinstance(initial_mem
, tuple):
46 startaddr
, mem
= initial_mem
48 for i
, val
in enumerate(mem
):
49 initial_mem
[startaddr
+ row_bytes
*i
] = (val
, row_bytes
)
51 for addr
, val
in initial_mem
.items():
52 if isinstance(val
, tuple):
55 width
= row_bytes
# assume same width
56 # val = swap_order(val, width)
57 res
[addr
] = (val
, width
)
63 class _ReadReason(enum
.Enum
):
65 SubWordWrite
= enum
.auto()
70 def read_default(self
):
71 if self
in (self
.SubWordWrite
, self
.Dump
):
76 def needed_mmap_page_flag(self
):
77 if self
is self
.Execute
:
78 return MMapPageFlags
.X
79 return MMapPageFlags
.R
83 def __init__(self
, row_bytes
, initial_mem
, misaligned_ok
):
84 self
.bytes_per_word
= row_bytes
85 self
.word_log2
= math
.ceil(math
.log2(row_bytes
))
86 self
.last_ld_addr
= None
87 self
.last_st_addr
= None
88 self
.misaligned_ok
= misaligned_ok
89 log("Sim-Mem", initial_mem
, self
.bytes_per_word
, self
.word_log2
)
93 self
.initialize(row_bytes
, initial_mem
)
95 def initialize(self
, row_bytes
, initial_mem
):
96 for addr
, (val
, width
) in process_mem(initial_mem
, row_bytes
).items():
97 # val = swap_order(val, width)
98 self
.st(addr
, val
, width
, swap
=False)
100 def _read_word(self
, word_idx
, reason
):
101 raise NotImplementedError
103 def _write_word(self
, word_idx
, value
):
104 raise NotImplementedError
107 raise NotImplementedError
110 def _get_shifter_mask(self
, wid
, remainder
):
111 shifter
= ((self
.bytes_per_word
- wid
) - remainder
) * \
113 # XXX https://bugs.libre-soc.org/show_bug.cgi?id=377
115 shifter
= remainder
* 8
116 mask
= (1 << (wid
* 8)) - 1
117 log("width,rem,shift,mask", wid
, remainder
, hex(shifter
), hex(mask
))
120 # TODO: Implement ld/st of lesser width
121 def ld(self
, address
, width
=8, swap
=True, check_in_mem
=False,
122 instr_fetch
=False, reason
=None):
123 log("ld from addr 0x%x width %d" % (address
, width
),
124 swap
, check_in_mem
, instr_fetch
)
125 self
.last_ld_addr
= address
# record last load
127 remainder
= address
& (self
.bytes_per_word
- 1)
128 address
= address
>> self
.word_log2
129 if remainder
& (width
- 1) != 0:
130 exc
= MemException("unaligned",
131 "Unaligned access: remainder %x width %d" %
136 reason
= _ReadReason
.Execute
if instr_fetch
else _ReadReason
.Read
137 val
= self
._read
_word
(address
, reason
)
143 log("ld mem @ 0x%x rem %d : 0x%x" % (ldaddr
, remainder
, val
))
145 if width
!= self
.bytes_per_word
:
146 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
147 log("masking", hex(val
), hex(mask
<< shifter
), shifter
)
148 val
= val
& (mask
<< shifter
)
151 val
= swap_order(val
, width
)
152 log("Read 0x%x from addr 0x%x" % (val
, ldaddr
))
155 def _st(self
, addr
, v
, width
=8, swap
=True):
157 remainder
= addr
& (self
.bytes_per_word
- 1)
158 addr
= addr
>> self
.word_log2
159 log("Writing 0x%x to ST 0x%x memaddr 0x%x/%x swap %s" %
160 (v
, staddr
, addr
, remainder
, str(swap
)))
161 if not self
.misaligned_ok
and remainder
& (width
- 1) != 0:
162 exc
= MemException("unaligned",
163 "Unaligned access: remainder %x width %d" %
168 v
= swap_order(v
, width
)
169 if width
!= self
.bytes_per_word
:
170 val
= self
._read
_word
(addr
, _ReadReason
.SubWordWrite
)
171 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
172 val
&= ~
(mask
<< shifter
)
174 self
._write
_word
(addr
, val
)
177 self
._write
_word
(addr
, v
)
178 log("mem @ 0x%x: 0x%x" % (staddr
, val
))
180 def st(self
, st_addr
, v
, width
=8, swap
=True):
181 self
.last_st_addr
= st_addr
# record last store
182 # misaligned not allowed: pass straight to Mem._st
183 if not self
.misaligned_ok
:
184 return self
._st
(st_addr
, v
, width
, swap
)
185 remainder
= st_addr
& (self
.bytes_per_word
- 1)
187 v
= swap_order(v
, width
)
188 # not misaligned: pass through to Mem._st but we've swapped already
189 misaligned
= remainder
& (width
- 1)
190 if misaligned
== 0 or (remainder
+ width
<= self
.bytes_per_word
):
191 return self
._st
(st_addr
, v
, width
, swap
=False)
192 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
193 # split into two halves. lower first
194 maxmask
= (1 << (self
.bytes_per_word
)*8) - 1
195 val1
= ((v
<< shifter
) & maxmask
) >> shifter
196 self
._st
(st_addr
, val1
, width
=width
-misaligned
, swap
=False)
198 val2
= v
>> ((width
-misaligned
)*8)
199 addr2
= (st_addr
>> self
.word_log2
) << self
.word_log2
200 addr2
+= self
.bytes_per_word
201 print("v, val2", hex(v
), hex(val2
), "ad", addr2
)
202 self
._st
(addr2
, val2
, width
=width
-misaligned
, swap
=False)
204 def __call__(self
, addr
, sz
):
205 val
= self
.ld(addr
.value
, sz
, swap
=False)
206 log("memread", addr
, sz
, hex(val
), kind
=LogType
.InstrInOuts
)
207 return SelectableInt(val
, sz
*8)
209 def memassign(self
, addr
, sz
, val
):
210 log("memassign", addr
, sz
, val
, kind
=LogType
.InstrInOuts
)
211 self
.st(addr
.value
, val
.value
, sz
, swap
=False)
213 def dump(self
, printout
=True, asciidump
=False):
214 keys
= list(self
.word_idxs())
218 v
= self
._read
_word
(k
, _ReadReason
.Dump
)
225 c
= chr(v
>> (i
*8) & 0xff)
226 if not c
.isprintable():
229 print("%016x: %016x" % ((k
*8) & 0xffffffffffffffff, v
), s
)
232 def log_fancy(self
, *, kind
=LogType
.Default
, name
="Memory",
233 log2_line_size
=4, log2_column_chunk_size
=3, log
=log
):
234 line_size
= 1 << log2_line_size
235 subline_mask
= line_size
- 1
236 column_chunk_size
= 1 << log2_column_chunk_size
239 return bytearray(line_size
)
240 mem_lines
= defaultdict(make_line
)
241 subword_range
= range(1 << self
.word_log2
)
242 for k
in self
.word_idxs():
243 addr
= k
<< self
.word_log2
244 for _
in subword_range
:
245 v
= self
.ld(addr
, width
=1, reason
=_ReadReason
.Dump
)
246 mem_lines
[addr
>> log2_line_size
][addr
& subline_mask
] = v
250 last_line_index
= None
251 for line_index
in sorted(mem_lines
.keys()):
252 line_addr
= line_index
<< log2_line_size
253 if last_line_index
is not None \
254 and last_line_index
+ 1 != line_index
:
256 last_line_index
= line_index
257 line_bytes
= mem_lines
[line_index
]
258 line_str
= f
"0x{line_addr:08X}:"
259 for col_chunk
in range(0, line_size
,
262 for i
in range(column_chunk_size
):
263 line_str
+= f
" {line_bytes[col_chunk + i]:02X}"
265 for i
in range(line_size
):
266 if 0x20 <= line_bytes
[i
] <= 0x7E:
267 line_str
+= chr(line_bytes
[i
])
271 lines
.append(line_str
)
272 lines
= "\n".join(lines
)
273 log(f
"\n{name}:\n{lines}\n", kind
=kind
)
276 class Mem(MemCommon
):
277 def __init__(self
, row_bytes
=8, initial_mem
=None, misaligned_ok
=False):
279 super().__init
__(row_bytes
, initial_mem
, misaligned_ok
)
281 def _read_word(self
, word_idx
, reason
):
282 return self
.mem
.get(word_idx
, reason
.read_default
)
284 def _write_word(self
, word_idx
, value
):
285 self
.mem
[word_idx
] = value
288 return self
.mem
.keys()
291 class MMapPageFlags(enum
.IntFlag
):
292 """ flags on each mmap-ped page
294 Note: these are *not* PowerISA MMU pages, but instead internal to Mem so
295 it can detect invalid accesses and assert rather than segfaulting.
300 "readable when instr_fetch=True"
303 "shared -- aka. not copy-on-write"
306 """this memory block will grow when the address one page before the
307 beginning is accessed"""
313 _ALLOWED_MMAP_NORMAL_FLAGS
= MMapPageFlags
.RWX | MMapPageFlags
.S
314 _ALLOWED_MMAP_STACK_FLAGS
= MMapPageFlags
.RWX | MMapPageFlags
.GROWS_DOWN
317 MMAP_PAGE_SIZE
= 1 << 16 # size of chunk that we track
318 _PAGE_COUNT
= (1 << 48) // MMAP_PAGE_SIZE
# 48-bit address space
319 _NEG_PG_IDX_START
= _PAGE_COUNT
// 2 # start of negative half of address space
321 # code assumes BLOCK_SIZE is a power of two
322 # BLOCK_SIZE = 1 << 32
323 BLOCK_SIZE
= 1 << 28 # reduced so it works on armv7a
325 assert BLOCK_SIZE
% MMAP_PAGE_SIZE
== 0
326 DEFAULT_BLOCK_ADDRS
= (
327 0, # low end of user space
328 2 ** 47 - BLOCK_SIZE
, # high end of user space
332 @plain_data.plain_data(frozen
=True, unsafe_hash
=True)
334 __slots__
= ("addrs", "flags", "file", "file_off")
336 def __init__(self
, addrs
, flags
=MMapPageFlags
.NONE
, file=None, file_off
=0):
337 # type: (range, MMapPageFlags, Path | str | None, int) -> None
339 raise ValueError("bad address range, step must be 1")
341 raise ValueError("bad address range, must be non-empty")
343 raise ValueError("bad address range, must be non-negative")
344 if addrs
.stop
> 2 ** 64:
345 raise ValueError("bad address range -- goes beyond 2 ** 64")
346 if addrs
.start
% MMAP_PAGE_SIZE
:
347 raise ValueError("bad address range -- start isn't page-aligned")
348 if addrs
.stop
% MMAP_PAGE_SIZE
:
349 raise ValueError("bad address range -- stop isn't page-aligned")
350 if addrs
[0] // BLOCK_SIZE
!= addrs
[-1] // BLOCK_SIZE
:
352 "bad address range -- crosses underlying block boundaries")
355 raise ValueError("bad file_off, must be non-negative")
356 if file_off
% MMAP_PAGE_SIZE
:
357 raise ValueError("bad file_off, must be page-aligned")
358 if flags
& ~_ALLOWED_MMAP_NORMAL_FLAGS
:
359 raise ValueError("invalid flags for mmap with file")
362 if flags
& ~_ALLOWED_MMAP_NORMAL_FLAGS
:
363 if flags
& ~_ALLOWED_MMAP_STACK_FLAGS
:
364 raise ValueError("invalid flags for anonymous mmap")
365 file_off
= 0 # no file -- clear offset
369 self
.file_off
= file_off
370 self
.page_indexes
# check that addresses can be mapped to pages
372 def intersects(self
, other
):
373 # type: (MMapEmuBlock) -> bool
374 return (other
.addrs
.start
< self
.addrs
.stop
375 and self
.addrs
.start
< other
.addrs
.stop
)
378 def is_private_mem(self
):
379 return self
.file is None and not self
.flags
& MMapPageFlags
.S
382 def underlying_block_key(self
):
383 offset
= self
.addrs
.start
% BLOCK_SIZE
384 return self
.addrs
.start
- offset
387 def underlying_block_offsets(self
):
388 start
= self
.addrs
.start
% BLOCK_SIZE
389 return range(start
, start
+ len(self
.addrs
))
392 def page_indexes(self
):
393 first_page
= MemMMap
.addr_to_mmap_page_idx(self
.addrs
[0])
394 # can't just use stop, since that may be out-of-range
395 last_page
= MemMMap
.addr_to_mmap_page_idx(self
.addrs
[-1])
396 if first_page
< _NEG_PG_IDX_START
and last_page
>= _NEG_PG_IDX_START
:
398 "bad address range, crosses transition from positive "
399 "canonical addresses to negative canonical addresses")
400 return range(first_page
, last_page
+ 1)
402 def difference(self
, remove
):
403 # type: (MMapEmuBlock) -> list[MMapEmuBlock]
404 """returns the blocks left after removing `remove` from `self`"""
405 if not self
.intersects(remove
):
408 addrs
= range(self
.addrs
.start
, remove
.addrs
.start
)
410 retval
.append(plain_data
.replace(self
, addrs
=addrs
))
411 addrs
= range(remove
.addrs
.stop
, self
.addrs
.stop
)
413 file_off
= self
.file_off
+ addrs
.start
- self
.addrs
.start
414 retval
.append(plain_data
.replace(
415 self
, addrs
=addrs
, file_off
=file_off
))
419 # stuff marked "not available" is not in the powerpc64le headers on my system
422 | ppc_flags
.MAP_PRIVATE
423 | ppc_flags
.MAP_FIXED
424 | ppc_flags
.MAP_ANONYMOUS
425 | ppc_flags
.MAP_DENYWRITE
426 | ppc_flags
.MAP_EXECUTABLE
427 # | ppc_flags.MAP_UNINITIALIZED # not available -- ignored for now
428 | ppc_flags
.MAP_GROWSDOWN
429 | ppc_flags
.MAP_LOCKED
430 | ppc_flags
.MAP_NORESERVE
431 | ppc_flags
.MAP_POPULATE
432 | ppc_flags
.MAP_NONBLOCK
433 | ppc_flags
.MAP_STACK
434 | ppc_flags
.MAP_HUGETLB
435 # | ppc_flags.MAP_32BIT # not available -- ignored for now
436 # | ppc_flags.MAP_ABOVE4G # not available -- ignored for now
437 # | ppc_flags.MAP_HUGE_2MB # not available -- ignored for now
438 # | ppc_flags.MAP_HUGE_1GB # not available -- ignored for now
441 _MAP_GROWS
= ppc_flags
.MAP_GROWSDOWN
442 # _MAP_GROWS |= ppc_flags.MAP_GROWSUP # not available -- ignored for now
445 """ len(), but with fix for len(range(2**64)) raising OverflowError """
448 except OverflowError:
449 assert isinstance(r
, range)
450 return 1 + (r
.stop
- r
.start
- 1) // r
.step
453 class MemMMap(MemCommon
):
454 def __init__(self
, row_bytes
=8, initial_mem
=None, misaligned_ok
=False,
455 block_addrs
=DEFAULT_BLOCK_ADDRS
, emulating_mmap
=False,
456 mmap_emu_data_block
=None):
457 # we can't allocate the entire 2 ** 47 byte address space, so split
458 # it into smaller blocks
460 addr
: mmap
.mmap(-1, BLOCK_SIZE
) for addr
in sorted(block_addrs
)}
461 assert all(addr
% BLOCK_SIZE
== 0 for addr
in self
.mem_blocks
), \
462 "misaligned block address not supported"
463 self
.__page
_flags
= {}
464 self
.modified_pages
= set()
465 self
.mmap_emu_data_block
= mmap_emu_data_block
466 self
.__mmap
_emu
_alloc
_blocks
= set() # type: set[MMapEmuBlock] | None
468 # build the list of unbacked blocks -- those address ranges that have
469 # no backing memory so mmap can't allocate there. These are maintained
470 # separately from __mmap_emu_alloc_blocks so munmap/mremap can't
473 range(a
, a
+ len(b
)) for a
, b
in self
.mem_blocks
.items()]
474 self
.__mmap
_emu
_unbacked
_blocks
= tuple(self
.__gaps
_in
(addr_ranges
))
477 if mmap_emu_data_block
is not None:
478 if not isinstance(mmap_emu_data_block
, MMapEmuBlock
):
480 "mmap_emu_data_block must be a MMapEmuBlock")
481 if mmap_emu_data_block
.file is not None:
483 "mmap_emu_data_block must be an anonymous mapping")
484 if not self
.__mmap
_emu
_map
_fixed
(block
=mmap_emu_data_block
,
485 replace
=False, dry_run
=False):
486 raise ValueError("invalid mmap_emu_data_block")
488 self
.__mmap
_emu
_alloc
_blocks
= None
489 if mmap_emu_data_block
is not None:
490 raise ValueError("can't set mmap_emu_data_block "
491 "without emulating_mmap=True")
492 # mark blocks as readable/writable
493 for addr
, block
in self
.mem_blocks
.items():
494 start_page
= self
.addr_to_mmap_page_idx(addr
)
495 end_page
= start_page
+ len(block
) // MMAP_PAGE_SIZE
496 for page_idx
in range(start_page
, end_page
):
497 self
.__page
_flags
[page_idx
] = MMapPageFlags
.RWX
499 super().__init
__(row_bytes
, initial_mem
, misaligned_ok
)
502 def __gaps_in(sorted_ranges
, start
=0, stop
=2 ** 64):
503 # type: (list[range] | tuple[range], int, int) -> list[range]
506 for r
in sorted_ranges
:
507 gap
= range(start
, r
.start
)
511 gap
= range(start
, stop
)
517 def emulating_mmap(self
):
518 return self
.__mmap
_emu
_alloc
_blocks
is not None
520 def __mmap_emu_map_fixed(self
, block
, replace
, dry_run
):
521 # type: (MMapEmuBlock, bool, bool) -> bool
522 """insert the block at the fixed address passed in, replacing the
523 parts of any other blocks that overlap if `replace` is `True`.
525 If `dry_run`, then don't make any changes, just check if it would
528 This function requires the caller to check `block`'s permissions and to
529 perform the underlying `mmap` first.
531 if block
.underlying_block_key
not in self
.mem_blocks
:
532 return False # unbacked block
533 # intersecting_blocks must be separate list so we don't iterate while
534 # we modify self.__mmap_emu_alloc_blocks
535 intersecting_blocks
= [
536 b
for b
in self
.__mmap
_emu
_alloc
_blocks
if block
.intersects(b
)]
537 for b
in intersecting_blocks
:
540 if self
.mmap_emu_data_block
== b
:
541 # FIXME: what does linux do here?
542 raise NotImplementedError(
543 "mmap overlapping the data block isn't implemented")
545 self
.__mmap
_emu
_alloc
_blocks
.remove(b
)
546 for replacement
in b
.difference(block
):
547 self
.__mmap
_emu
_alloc
_blocks
.add(replacement
)
549 self
.__mmap
_emu
_alloc
_blocks
.add(block
)
550 for page_idx
in block
.page_indexes
:
551 self
.__page
_flags
[page_idx
] = block
.flags
554 def __mmap_emu_resize_map_fixed(self
, block
, new_size
):
555 # type: (MMapEmuBlock, int) -> MMapEmuBlock | None
556 assert block
in self
.__mmap
_emu
_alloc
_blocks
, \
557 "can't resize unmapped block"
558 if new_size
== len(block
.addrs
):
560 addrs
= range(block
.addrs
.start
, block
.addrs
.start
+ new_size
)
561 new_block
= plain_data
.replace(block
, addrs
=addrs
)
562 self
.__mmap
_emu
_alloc
_blocks
.remove(block
)
564 if not self
.__mmap
_emu
_map
_fixed
(
565 new_block
, replace
=False, dry_run
=True):
568 self
.__mmap
_emu
_alloc
_blocks
.add(block
)
569 if not block
.is_private_mem
:
570 # FIXME: implement resizing underlying mapping
571 raise NotImplementedError
573 # clear newly mapped bytes
574 clear_addrs
= range(block
.addrs
.stop
, new_block
.addrs
.stop
)
576 clear_block
= MMapEmuBlock(clear_addrs
)
577 mem_block
= self
.mem_blocks
[clear_block
.underlying_block_key
]
578 assert mem_block
is not None
579 clear_size
= len(clear_addrs
)
580 arr
= (ctypes
.c_ubyte
* clear_size
).from_buffer(
581 mem_block
, clear_block
.underlying_block_offsets
.start
)
582 ctypes
.memset(arr
, 0, clear_size
)
583 if self
.mmap_emu_data_block
== block
:
584 self
.mmap_emu_data_block
= new_block
585 self
.__mmap
_emu
_alloc
_blocks
.remove(block
)
586 self
.__mmap
_emu
_alloc
_blocks
.add(new_block
)
588 if new_size
< len(block
.addrs
):
589 # shrinking -- unmap pages at end
590 r
= range(new_block
.page_indexes
.stop
, block
.page_indexes
.stop
)
592 self
.__page
_flags
.pop(page_idx
)
593 self
.modified_pages
.remove(page_idx
)
595 # expanding -- map pages at end, they're cleared already
596 r
= range(block
.page_indexes
.stop
, new_block
.page_indexes
.stop
)
598 self
.__page
_flags
[page_idx
] = block
.flags
599 self
.modified_pages
.remove(page_idx
) # cleared page
602 def __mmap_emu_find_free_addr(self
, block
):
603 # type: (MMapEmuBlock) -> MMapEmuBlock | None
604 """find a spot where `block` will fit, returning the new block"""
605 blocks
= [*self
.__mmap
_emu
_alloc
_blocks
,
606 *self
.__mmap
_emu
_unbacked
_blocks
]
607 blocks
.sort(key
=lambda b
: b
.addrs
.start
)
608 biggest_gap
= range(0)
609 for gap
in self
.__gaps
_in
([b
.addrs
for b
in blocks
]):
610 if len(biggest_gap
) < len(gap
):
612 extra_size
= len(biggest_gap
) - len(block
.addrs
)
614 return None # no space anywhere
615 # try to allocate in the middle of the gap, so mmaps can grow later
616 offset
= extra_size
// 2
618 # align to page -- this depends on gap being aligned already.
620 # rounds down offset, so no need to check size again since it can't
621 # ever get closer to the end of the gap
622 offset
-= offset
% MMAP_PAGE_SIZE
623 start
= biggest_gap
.start
+ offset
624 addrs
= range(start
, start
+ len(block
))
625 return plain_data
.replace(block
, addrs
=addrs
)
627 def __mmap_emu_try_grow_down(self
, addr
, needed_flag
):
628 # type: (int, MMapPageFlags) -> bool
629 """ if addr is the page just before a GROW_DOWN block, try to grow it.
630 returns True if successful. """
631 raise NotImplementedError # FIXME: implement
633 def brk_syscall(self
, addr
):
634 assert self
.emulating_mmap
, "brk syscall requires emulating_mmap=True"
635 assert self
.mmap_emu_data_block
is not None, \
636 "brk syscall requires a data block/segment"
638 # round addr up to the nearest page
639 addr_div_page_size
= -(-addr
// MMAP_PAGE_SIZE
) # ceil(addr / size)
640 addr
= addr_div_page_size
* MMAP_PAGE_SIZE
642 raise NotImplementedError # FIXME: finish
644 def mmap_syscall(self
, addr
, length
, prot
, flags
, fd
, offset
, is_mmap2
):
646 offset
*= 4096 # specifically *not* the page size
647 prot_read
= bool(prot
& ppc_flags
.PROT_READ
)
648 prot_write
= bool(prot
& ppc_flags
.PROT_WRITE
)
649 prot_exec
= bool(prot
& ppc_flags
.PROT_EXEC
)
650 prot_all
= (ppc_flags
.PROT_READ | ppc_flags
.PROT_WRITE
651 | ppc_flags
.PROT_EXEC
)
652 # checks based off the checks in linux
654 return -ppc_flags
.EINVAL
655 if offset
% MMAP_PAGE_SIZE
:
656 return -ppc_flags
.EINVAL
657 if flags
& ppc_flags
.MAP_HUGETLB
:
659 return -ppc_flags
.EINVAL
660 if length
<= 0 or offset
< 0:
661 return -ppc_flags
.EINVAL
662 if flags
& ppc_flags
.MAP_FIXED_NOREPLACE
:
663 flags |
= ppc_flags
.MAP_FIXED
664 if not (flags
& ppc_flags
.MAP_FIXED
):
665 addr
&= MMAP_PAGE_SIZE
- 1 # page-align address, rounding down
666 # page-align length, rounding up
667 length
= (length
+ MMAP_PAGE_SIZE
- 1) & ~
(MMAP_PAGE_SIZE
- 1)
668 if length
+ offset
>= 2 ** 64:
670 return -ppc_flags
.ENOMEM
671 block_flags
= MMapPageFlags
.NONE
673 block_flags |
= MMapPageFlags
.R
675 block_flags |
= MMapPageFlags
.W
677 block_flags |
= MMapPageFlags
.X
678 if flags
& ppc_flags
.MAP_GROWSDOWN
:
679 block_flags |
= MMapPageFlags
.GROWS_DOWN
683 file = os
.readlink("/proc/self/fd/%i" % fd
)
685 return -ppc_flags
.EBADF
687 block
= MMapEmuBlock(
688 range(addr
, addr
+ length
), block_flags
, file, offset
)
689 except (ValueError, MemException
):
690 return -ppc_flags
.EINVAL
691 if not (flags
& ppc_flags
.MAP_FIXED
):
692 block
= self
.__mmap
_emu
_find
_free
_addr
(block
)
694 return -ppc_flags
.ENOMEM
695 if flags
& ppc_flags
.MAP_LOCKED
:
696 return -ppc_flags
.EPERM
697 map_ty
= flags
& ppc_flags
.MAP_TYPE
700 if map_ty
== ppc_flags
.MAP_SHARED
:
701 flags
&= LEGACY_MAP_MASK
703 if fallthrough
or map_ty
== ppc_flags
.MAP_SHARED_VALIDATE
:
704 if flags
& ~LEGACY_MAP_MASK
:
705 return -ppc_flags
.EOPNOTSUPP
706 raise NotImplementedError("MAP_SHARED on file")
708 if fallthrough
or map_ty
== ppc_flags
.MAP_PRIVATE
:
709 if flags
& _MAP_GROWS
:
710 return -ppc_flags
.EINVAL
712 return -ppc_flags
.EINVAL
713 elif map_ty
== ppc_flags
.MAP_SHARED
:
714 if flags
& _MAP_GROWS
:
715 return -ppc_flags
.EINVAL
716 raise NotImplementedError("MAP_SHARED on memory")
717 elif map_ty
!= ppc_flags
.MAP_PRIVATE
:
718 return -ppc_flags
.EINVAL
719 replace
= not (flags
& ppc_flags
.MAP_FIXED_NOREPLACE
)
720 if not self
.__mmap
_emu
_map
_fixed
(block
, replace
, dry_run
=True):
721 # failed, was that because there's an existing memory block or
722 # that was an invalid address?
723 if self
.__mmap
_emu
_map
_fixed
(block
, replace
=True, dry_run
=True):
724 return -ppc_flags
.EEXIST
# existing memory block
726 return -ppc_flags
.EINVAL
# invalid address
727 mblock
= self
.mem_blocks
[block
.underlying_block_key
]
728 offsets
= block
.underlying_block_offsets
729 buf
= (ctypes
.c_ubyte
* len(offsets
)).from_buffer(mblock
, offsets
[0])
730 buf_addr
= ctypes
.addressof(buf
)
731 libc
= ctypes
.CDLL(None)
732 syscall
= libc
.syscall
733 restype
= syscall
.restype
734 argtypes
= syscall
.argtypes
735 syscall
.restype
= ctypes
.c_long
736 syscall
.argtypes
= (ctypes
.c_long
,) * 6
737 call_no
= ctypes
.c_long(ppc_flags
.host_defines
['SYS_mmap'])
738 host_prot
= ppc_flags
.host_defines
['PROT_READ']
739 if block
.flags
& prot_write
:
740 host_prot
= ppc_flags
.host_defines
['PROT_WRITE']
741 host_flags
= ppc_flags
.host_defines
['MAP_FIXED']
742 host_flags |
= ppc_flags
.host_defines
['MAP_PRIVATE']
744 host_flags |
= ppc_flags
.host_defines
['MAP_ANONYMOUS']
746 call_no
, ctypes
.c_long(buf_addr
), ctypes
.c_long(len(offsets
)),
747 ctypes
.c_long(host_prot
), ctypes
.c_long(host_flags
),
748 ctypes
.c_long(fd
), ctypes
.c_long(offset
)))
749 syscall
.restype
= restype
750 syscall
.argtypes
= argtypes
752 return -ctypes
.get_errno()
753 self
.__mmap
_emu
_map
_fixed
(block
, replace
=True, dry_run
=False)
754 return block
.addrs
.start
757 def mmap_page_idx_to_addr(page_idx
):
758 assert 0 <= page_idx
< _PAGE_COUNT
759 if page_idx
>= _NEG_PG_IDX_START
:
760 page_idx
-= _PAGE_COUNT
761 return (page_idx
* MMAP_PAGE_SIZE
) % 2 ** 64
764 def addr_to_mmap_page_idx(addr
):
765 page_idx
, offset
= divmod(addr
, MMAP_PAGE_SIZE
)
766 page_idx
%= _PAGE_COUNT
767 expected
= MemMMap
.mmap_page_idx_to_addr(page_idx
) + offset
769 exc
= MemException("not sign extended",
770 ("address not sign extended: 0x%X "
771 "expected 0x%X") % (addr
, expected
))
776 def __reduce_ex__(self
, protocol
):
777 raise PicklingError("MemMMap can't be deep-copied or pickled")
779 def __access_addr_range_err(self
, start_addr
, size
, needed_flag
):
780 assert needed_flag
!= MMapPageFlags
.W
, \
781 "can't write to address 0x%X size 0x%X" % (start_addr
, size
)
784 def __access_addr_range(self
, start_addr
, size
, needed_flag
):
785 assert size
> 0, "invalid size"
786 page_idx
= self
.addr_to_mmap_page_idx(start_addr
)
787 last_addr
= start_addr
+ size
- 1
788 last_page_idx
= self
.addr_to_mmap_page_idx(last_addr
)
789 block_addr
= start_addr
% BLOCK_SIZE
790 block_k
= start_addr
- block_addr
791 last_block_addr
= last_addr
% BLOCK_SIZE
792 last_block_k
= last_addr
- last_block_addr
793 if block_k
!= last_block_k
:
794 return self
.__access
_addr
_range
_err
(start_addr
, size
, needed_flag
)
795 for i
in range(page_idx
, last_page_idx
+ 1):
796 flags
= self
.__page
_flags
.get(i
, 0)
797 if flags
& needed_flag
== 0:
798 if not self
.__mmap
_emu
_try
_grow
_down
(start_addr
, needed_flag
):
799 return self
.__access
_addr
_range
_err
(
800 start_addr
, size
, needed_flag
)
801 if needed_flag
is MMapPageFlags
.W
:
802 self
.modified_pages
.add(page_idx
)
803 return self
.mem_blocks
[block_k
], block_addr
805 def get_ctypes(self
, start_addr
, size
, is_write
):
806 """ returns a ctypes ubyte array referring to the memory at
807 `start_addr` with size `size`
809 flag
= MMapPageFlags
.W
if is_write
else MMapPageFlags
.R
810 block
, block_addr
= self
.__access
_addr
_range
(start_addr
, size
, flag
)
811 assert block
is not None, \
812 f
"can't read from address 0x{start_addr:X} size 0x{size:X}"
813 return (ctypes
.c_ubyte
* size
).from_buffer(block
, block_addr
)
815 def _read_word(self
, word_idx
, reason
):
816 block
, block_addr
= self
.__access
_addr
_range
(
817 word_idx
* self
.bytes_per_word
, self
.bytes_per_word
,
818 reason
.needed_mmap_page_flag
)
820 return reason
.read_default
821 bytes_
= block
[block_addr
:block_addr
+ self
.bytes_per_word
]
822 return int.from_bytes(bytes_
, 'little')
824 def _write_word(self
, word_idx
, value
):
825 block
, block_addr
= self
.__access
_addr
_range
(
826 word_idx
* self
.bytes_per_word
, self
.bytes_per_word
,
828 bytes_
= value
.to_bytes(self
.bytes_per_word
, 'little')
829 block
[block_addr
:block_addr
+ self
.bytes_per_word
] = bytes_
832 zeros
= bytes(self
.bytes_per_word
)
833 for page_idx
in self
.modified_pages
:
834 start
= self
.mmap_page_idx_to_addr(page_idx
)
835 block
, block_addr
= self
.__access
_addr
_range
(
836 start
, MMAP_PAGE_SIZE
, MMapPageFlags
.R
)
837 end
= start
+ MMAP_PAGE_SIZE
838 for word_idx
in range(start
// self
.bytes_per_word
,
839 end
// self
.bytes_per_word
):
840 next_block_addr
= block_addr
+ self
.bytes_per_word
841 bytes_
= block
[block_addr
:next_block_addr
]
842 block_addr
= next_block_addr