1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Funded by NLnet http://nlnet.nl
4 """core of the python-based POWER9 simulator
6 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
7 not speed, it is for both learning and educational purposes, as well as
8 a method of verifying the HDL.
12 * https://bugs.libre-soc.org/show_bug.cgi?id=424
15 from collections
import defaultdict
16 from openpower
.decoder
.selectable_int
import SelectableInt
17 from openpower
.util
import log
, LogType
20 from cached_property
import cached_property
22 from pickle
import PicklingError
24 from nmutil
import plain_data
25 from pathlib
import Path
26 from openpower
.syscalls
import ppc_flags
30 def swap_order(x
, nbytes
):
31 x
= x
.to_bytes(nbytes
, byteorder
='little')
32 x
= int.from_bytes(x
, byteorder
='big', signed
=False)
36 class MemException(Exception):
40 def process_mem(initial_mem
, row_bytes
=8):
42 # different types of memory data structures recognised (for convenience)
43 if isinstance(initial_mem
, list):
44 initial_mem
= (0, initial_mem
)
45 if isinstance(initial_mem
, tuple):
46 startaddr
, mem
= initial_mem
48 for i
, val
in enumerate(mem
):
49 initial_mem
[startaddr
+ row_bytes
*i
] = (val
, row_bytes
)
51 for addr
, val
in initial_mem
.items():
52 if isinstance(val
, tuple):
55 width
= row_bytes
# assume same width
56 # val = swap_order(val, width)
57 res
[addr
] = (val
, width
)
63 class _ReadReason(enum
.Enum
):
65 SubWordWrite
= enum
.auto()
70 def read_default(self
):
71 if self
in (self
.SubWordWrite
, self
.Dump
):
76 def needed_mmap_page_flag(self
):
77 if self
is self
.Execute
:
78 return MMapPageFlags
.X
79 return MMapPageFlags
.R
83 def __init__(self
, row_bytes
, initial_mem
, misaligned_ok
):
84 self
.bytes_per_word
= row_bytes
85 self
.word_log2
= math
.ceil(math
.log2(row_bytes
))
86 self
.last_ld_addr
= None
87 self
.last_st_addr
= None
88 self
.misaligned_ok
= misaligned_ok
89 log("Sim-Mem", initial_mem
, self
.bytes_per_word
, self
.word_log2
)
93 self
.initialize(row_bytes
, initial_mem
)
95 def initialize(self
, row_bytes
, initial_mem
):
96 for addr
, (val
, width
) in process_mem(initial_mem
, row_bytes
).items():
97 # val = swap_order(val, width)
98 self
.st(addr
, val
, width
, swap
=False)
100 def _read_word(self
, word_idx
, reason
):
101 raise NotImplementedError
103 def _write_word(self
, word_idx
, value
):
104 raise NotImplementedError
107 raise NotImplementedError
110 def _get_shifter_mask(self
, wid
, remainder
):
111 shifter
= ((self
.bytes_per_word
- wid
) - remainder
) * \
113 # XXX https://bugs.libre-soc.org/show_bug.cgi?id=377
115 shifter
= remainder
* 8
116 mask
= (1 << (wid
* 8)) - 1
117 log("width,rem,shift,mask", wid
, remainder
, hex(shifter
), hex(mask
))
120 # TODO: Implement ld/st of lesser width
121 def ld(self
, address
, width
=8, swap
=True, check_in_mem
=False,
122 instr_fetch
=False, reason
=None):
123 log("ld from addr 0x%x width %d" % (address
, width
),
124 swap
, check_in_mem
, instr_fetch
)
125 self
.last_ld_addr
= address
# record last load
127 remainder
= address
& (self
.bytes_per_word
- 1)
128 address
= address
>> self
.word_log2
129 if remainder
& (width
- 1) != 0:
130 exc
= MemException("unaligned",
131 "Unaligned access: remainder %x width %d" %
136 reason
= _ReadReason
.Execute
if instr_fetch
else _ReadReason
.Read
137 val
= self
._read
_word
(address
, reason
)
143 log("ld mem @ 0x%x rem %d : 0x%x" % (ldaddr
, remainder
, val
))
145 if width
!= self
.bytes_per_word
:
146 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
147 log("masking", hex(val
), hex(mask
<< shifter
), shifter
)
148 val
= val
& (mask
<< shifter
)
151 val
= swap_order(val
, width
)
152 log("Read 0x%x from addr 0x%x" % (val
, ldaddr
))
155 def _st(self
, addr
, v
, width
=8, swap
=True):
157 remainder
= addr
& (self
.bytes_per_word
- 1)
158 addr
= addr
>> self
.word_log2
159 log("Writing 0x%x to ST 0x%x memaddr 0x%x/%x swap %s" %
160 (v
, staddr
, addr
, remainder
, str(swap
)))
161 if not self
.misaligned_ok
and remainder
& (width
- 1) != 0:
162 exc
= MemException("unaligned",
163 "Unaligned access: remainder %x width %d" %
168 v
= swap_order(v
, width
)
169 if width
!= self
.bytes_per_word
:
170 val
= self
._read
_word
(addr
, _ReadReason
.SubWordWrite
)
171 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
172 val
&= ~
(mask
<< shifter
)
174 self
._write
_word
(addr
, val
)
177 self
._write
_word
(addr
, v
)
178 log("mem @ 0x%x: 0x%x" % (staddr
, val
))
180 def st(self
, st_addr
, v
, width
=8, swap
=True):
181 self
.last_st_addr
= st_addr
# record last store
182 # misaligned not allowed: pass straight to Mem._st
183 if not self
.misaligned_ok
:
184 return self
._st
(st_addr
, v
, width
, swap
)
185 remainder
= st_addr
& (self
.bytes_per_word
- 1)
187 v
= swap_order(v
, width
)
188 # not misaligned: pass through to Mem._st but we've swapped already
189 misaligned
= remainder
& (width
- 1)
190 if misaligned
== 0 or (remainder
+ width
<= self
.bytes_per_word
):
191 return self
._st
(st_addr
, v
, width
, swap
=False)
192 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
193 # split into two halves. lower first
194 maxmask
= (1 << (self
.bytes_per_word
)*8) - 1
195 val1
= ((v
<< shifter
) & maxmask
) >> shifter
196 self
._st
(st_addr
, val1
, width
=width
-misaligned
, swap
=False)
198 val2
= v
>> ((width
-misaligned
)*8)
199 addr2
= (st_addr
>> self
.word_log2
) << self
.word_log2
200 addr2
+= self
.bytes_per_word
201 log("v, val2", hex(v
), hex(val2
), "ad", addr2
)
202 self
._st
(addr2
, val2
, width
=width
-misaligned
, swap
=False)
204 def __call__(self
, addr
, sz
):
205 val
= self
.ld(addr
.value
, sz
, swap
=False)
206 log("memread", addr
, sz
, hex(val
), kind
=LogType
.InstrInOuts
)
207 return SelectableInt(val
, sz
*8)
209 def memassign(self
, addr
, sz
, val
):
210 log("memassign", addr
, sz
, val
, kind
=LogType
.InstrInOuts
)
211 self
.st(addr
.value
, val
.value
, sz
, swap
=False)
213 def dump(self
, printout
=True, asciidump
=False):
214 keys
= list(self
.word_idxs())
218 v
= self
._read
_word
(k
, _ReadReason
.Dump
)
225 c
= chr(v
>> (i
*8) & 0xff)
226 if not c
.isprintable():
229 print("%016x: %016x" % ((k
*8) & 0xffffffffffffffff, v
), s
)
232 def log_fancy(self
, *, kind
=LogType
.Default
, name
="Memory",
233 log2_line_size
=4, log2_column_chunk_size
=3, log
=log
):
234 line_size
= 1 << log2_line_size
235 subline_mask
= line_size
- 1
236 column_chunk_size
= 1 << log2_column_chunk_size
239 return bytearray(line_size
)
240 mem_lines
= defaultdict(make_line
)
241 subword_range
= range(1 << self
.word_log2
)
242 for k
in self
.word_idxs():
243 addr
= k
<< self
.word_log2
244 for _
in subword_range
:
245 v
= self
.ld(addr
, width
=1, reason
=_ReadReason
.Dump
)
246 mem_lines
[addr
>> log2_line_size
][addr
& subline_mask
] = v
250 last_line_index
= None
251 for line_index
in sorted(mem_lines
.keys()):
252 line_addr
= line_index
<< log2_line_size
253 if last_line_index
is not None \
254 and last_line_index
+ 1 != line_index
:
256 last_line_index
= line_index
257 line_bytes
= mem_lines
[line_index
]
258 line_str
= f
"0x{line_addr:08X}:"
259 for col_chunk
in range(0, line_size
,
262 for i
in range(column_chunk_size
):
263 line_str
+= f
" {line_bytes[col_chunk + i]:02X}"
265 for i
in range(line_size
):
266 if 0x20 <= line_bytes
[i
] <= 0x7E:
267 line_str
+= chr(line_bytes
[i
])
271 lines
.append(line_str
)
272 lines
= "\n".join(lines
)
273 log(f
"\n{name}:\n{lines}\n", kind
=kind
)
276 class Mem(MemCommon
):
277 def __init__(self
, row_bytes
=8, initial_mem
=None, misaligned_ok
=False):
279 super().__init
__(row_bytes
, initial_mem
, misaligned_ok
)
281 def _read_word(self
, word_idx
, reason
):
282 return self
.mem
.get(word_idx
, reason
.read_default
)
284 def _write_word(self
, word_idx
, value
):
285 self
.mem
[word_idx
] = value
288 return self
.mem
.keys()
291 class MMapPageFlags(enum
.IntFlag
):
292 """ flags on each mmap-ped page
294 Note: these are *not* PowerISA MMU pages, but instead internal to Mem so
295 it can detect invalid accesses and assert rather than segfaulting.
300 "readable when instr_fetch=True"
303 "shared -- aka. not copy-on-write"
306 """this memory block will grow when the address one page before the
307 beginning is accessed"""
313 _ALLOWED_MMAP_NORMAL_FLAGS
= MMapPageFlags
.RWX | MMapPageFlags
.S
314 _ALLOWED_MMAP_STACK_FLAGS
= MMapPageFlags
.RWX | MMapPageFlags
.GROWS_DOWN
317 MMAP_PAGE_SIZE
= 1 << 16 # size of chunk that we track
318 _PAGE_COUNT
= (1 << 48) // MMAP_PAGE_SIZE
# 48-bit address space
319 _NEG_PG_IDX_START
= _PAGE_COUNT
// 2 # start of negative half of address space
321 # code assumes BLOCK_SIZE is a power of two
322 # BLOCK_SIZE = 1 << 32
323 BLOCK_SIZE
= 1 << 28 # reduced so it works on armv7a
325 assert BLOCK_SIZE
% MMAP_PAGE_SIZE
== 0
326 assert MMAP_PAGE_SIZE
% mmap
.PAGESIZE
== 0, "host's page size is too big"
327 assert 2 ** (mmap
.PAGESIZE
.bit_length() - 1) == mmap
.PAGESIZE
, \
328 "host's page size isn't a power of 2"
330 DEFAULT_BLOCK_ADDRS
= (
331 0, # low end of user space
332 2 ** 47 - BLOCK_SIZE
, # high end of user space
336 @plain_data.plain_data(frozen
=True, unsafe_hash
=True)
338 __slots__
= ("addrs", "flags", "file", "file_off")
340 def __init__(self
, addrs
, flags
=MMapPageFlags
.NONE
, file=None, file_off
=0):
341 # type: (range, MMapPageFlags, Path | str | None, int) -> None
343 raise ValueError("bad address range, step must be 1")
345 raise ValueError("bad address range, must be non-empty")
347 raise ValueError("bad address range, must be non-negative")
348 if addrs
.stop
> 2 ** 64:
349 raise ValueError("bad address range -- goes beyond 2 ** 64")
350 if addrs
.start
% MMAP_PAGE_SIZE
:
351 raise ValueError("bad address range -- start isn't page-aligned")
352 if addrs
.stop
% MMAP_PAGE_SIZE
:
353 raise ValueError("bad address range -- stop isn't page-aligned")
354 if addrs
[0] // BLOCK_SIZE
!= addrs
[-1] // BLOCK_SIZE
:
356 "bad address range -- crosses underlying block boundaries")
359 raise ValueError("bad file_off, must be non-negative")
360 if file_off
% MMAP_PAGE_SIZE
:
361 raise ValueError("bad file_off, must be page-aligned")
362 if flags
& ~_ALLOWED_MMAP_NORMAL_FLAGS
:
363 raise ValueError("invalid flags for mmap with file")
366 if flags
& ~_ALLOWED_MMAP_NORMAL_FLAGS
:
367 if flags
& ~_ALLOWED_MMAP_STACK_FLAGS
:
368 raise ValueError("invalid flags for anonymous mmap")
369 file_off
= 0 # no file -- clear offset
373 self
.file_off
= file_off
374 self
.page_indexes
# check that addresses can be mapped to pages
376 def intersects(self
, other
):
377 # type: (MMapEmuBlock) -> bool
378 return (other
.addrs
.start
< self
.addrs
.stop
379 and self
.addrs
.start
< other
.addrs
.stop
)
382 def is_private_mem(self
):
383 return self
.file is None and not self
.flags
& MMapPageFlags
.S
386 def underlying_block_key(self
):
387 offset
= self
.addrs
.start
% BLOCK_SIZE
388 return self
.addrs
.start
- offset
391 def underlying_block_offsets(self
):
392 start
= self
.addrs
.start
% BLOCK_SIZE
393 return range(start
, start
+ len(self
.addrs
))
396 def page_indexes(self
):
397 first_page
= MemMMap
.addr_to_mmap_page_idx(self
.addrs
[0])
398 # can't just use stop, since that may be out-of-range
399 last_page
= MemMMap
.addr_to_mmap_page_idx(self
.addrs
[-1])
400 if first_page
< _NEG_PG_IDX_START
and last_page
>= _NEG_PG_IDX_START
:
402 "bad address range, crosses transition from positive "
403 "canonical addresses to negative canonical addresses")
404 return range(first_page
, last_page
+ 1)
406 def difference(self
, remove
):
407 # type: (MMapEmuBlock) -> list[MMapEmuBlock]
408 """returns the blocks left after removing `remove` from `self`"""
409 if not self
.intersects(remove
):
412 addrs
= range(self
.addrs
.start
, remove
.addrs
.start
)
414 retval
.append(plain_data
.replace(self
, addrs
=addrs
))
415 addrs
= range(remove
.addrs
.stop
, self
.addrs
.stop
)
417 file_off
= self
.file_off
+ addrs
.start
- self
.addrs
.start
418 retval
.append(plain_data
.replace(
419 self
, addrs
=addrs
, file_off
=file_off
))
423 # stuff marked "not available" is not in the powerpc64le headers on my system
426 | ppc_flags
.MAP_PRIVATE
427 | ppc_flags
.MAP_FIXED
428 | ppc_flags
.MAP_ANONYMOUS
429 | ppc_flags
.MAP_DENYWRITE
430 | ppc_flags
.MAP_EXECUTABLE
431 # | ppc_flags.MAP_UNINITIALIZED # not available -- ignored for now
432 | ppc_flags
.MAP_GROWSDOWN
433 | ppc_flags
.MAP_LOCKED
434 | ppc_flags
.MAP_NORESERVE
435 | ppc_flags
.MAP_POPULATE
436 | ppc_flags
.MAP_NONBLOCK
437 | ppc_flags
.MAP_STACK
438 | ppc_flags
.MAP_HUGETLB
439 # | ppc_flags.MAP_32BIT # not available -- ignored for now
440 # | ppc_flags.MAP_ABOVE4G # not available -- ignored for now
441 # | ppc_flags.MAP_HUGE_2MB # not available -- ignored for now
442 # | ppc_flags.MAP_HUGE_1GB # not available -- ignored for now
445 _MAP_GROWS
= ppc_flags
.MAP_GROWSDOWN
446 # _MAP_GROWS |= ppc_flags.MAP_GROWSUP # not available -- ignored for now
449 """ len(), but with fix for len(range(2**64)) raising OverflowError """
452 except OverflowError:
453 assert isinstance(r
, range)
454 return 1 + (r
.stop
- r
.start
- 1) // r
.step
457 class MemMMap(MemCommon
):
458 def __init__(self
, row_bytes
=8, initial_mem
=None, misaligned_ok
=False,
459 block_addrs
=DEFAULT_BLOCK_ADDRS
, emulating_mmap
=False,
460 mmap_emu_data_block
=None):
461 # we can't allocate the entire 2 ** 47 byte address space, so split
462 # it into smaller blocks
464 addr
: mmap
.mmap(-1, BLOCK_SIZE
) for addr
in sorted(block_addrs
)}
465 assert all(addr
% BLOCK_SIZE
== 0 for addr
in self
.mem_blocks
), \
466 "misaligned block address not supported"
467 self
.__page
_flags
= {}
468 self
.modified_pages
= set()
469 self
.mmap_emu_data_block
= mmap_emu_data_block
470 self
.__mmap
_emu
_alloc
_blocks
= set() # type: set[MMapEmuBlock] | None
472 for addr
, block
in self
.mem_blocks
.items():
473 block_addr
= ctypes
.addressof(ctypes
.c_ubyte
.from_buffer(block
))
474 log("0x%X -> 0x%X len=0x%X" % (addr
, block_addr
, BLOCK_SIZE
))
476 # build the list of unbacked blocks -- those address ranges that have
477 # no backing memory so mmap can't allocate there. These are maintained
478 # separately from __mmap_emu_alloc_blocks so munmap/mremap can't
481 range(a
, a
+ len(b
)) for a
, b
in self
.mem_blocks
.items()]
482 self
.__mmap
_emu
_unbacked
_blocks
= tuple(self
.__gaps
_in
(addr_ranges
))
485 if mmap_emu_data_block
is not None:
486 if not isinstance(mmap_emu_data_block
, MMapEmuBlock
):
488 "mmap_emu_data_block must be a MMapEmuBlock")
489 if mmap_emu_data_block
.file is not None:
491 "mmap_emu_data_block must be an anonymous mapping")
492 if not self
.__mmap
_emu
_map
_fixed
(block
=mmap_emu_data_block
,
493 replace
=False, dry_run
=False):
494 raise ValueError("invalid mmap_emu_data_block")
496 self
.__mmap
_emu
_alloc
_blocks
= None
497 if mmap_emu_data_block
is not None:
498 raise ValueError("can't set mmap_emu_data_block "
499 "without emulating_mmap=True")
500 # mark blocks as readable/writable
501 for addr
, block
in self
.mem_blocks
.items():
502 start_page
= self
.addr_to_mmap_page_idx(addr
)
503 end_page
= start_page
+ len(block
) // MMAP_PAGE_SIZE
504 for page_idx
in range(start_page
, end_page
):
505 self
.__page
_flags
[page_idx
] = MMapPageFlags
.RWX
507 super().__init
__(row_bytes
, initial_mem
, misaligned_ok
)
510 def __gaps_in(sorted_ranges
, start
=0, stop
=2 ** 64):
511 # type: (list[range] | tuple[range], int, int) -> list[range]
514 for r
in sorted_ranges
:
515 gap
= range(start
, r
.start
)
519 gap
= range(start
, stop
)
525 def emulating_mmap(self
):
526 return self
.__mmap
_emu
_alloc
_blocks
is not None
528 def __mmap_emu_map_fixed(self
, block
, replace
, dry_run
):
529 # type: (MMapEmuBlock, bool, bool) -> bool
530 """insert the block at the fixed address passed in, replacing the
531 parts of any other blocks that overlap if `replace` is `True`.
533 If `dry_run`, then don't make any changes, just check if it would
536 This function requires the caller to check `block`'s permissions and to
537 perform the underlying `mmap` first.
539 if block
.underlying_block_key
not in self
.mem_blocks
:
540 return False # unbacked block
541 # intersecting_blocks must be separate list so we don't iterate while
542 # we modify self.__mmap_emu_alloc_blocks
543 intersecting_blocks
= [
544 b
for b
in self
.__mmap
_emu
_alloc
_blocks
if block
.intersects(b
)]
545 for b
in intersecting_blocks
:
548 if self
.mmap_emu_data_block
== b
:
549 # FIXME: what does linux do here?
550 raise NotImplementedError(
551 "mmap overlapping the data block isn't implemented")
553 self
.__mmap
_emu
_alloc
_blocks
.remove(b
)
554 for replacement
in b
.difference(block
):
555 self
.__mmap
_emu
_alloc
_blocks
.add(replacement
)
557 self
.__mmap
_emu
_alloc
_blocks
.add(block
)
558 for page_idx
in block
.page_indexes
:
559 self
.__page
_flags
[page_idx
] = block
.flags
562 def __mmap_emu_resize_map_fixed(self
, block
, new_size
):
563 # type: (MMapEmuBlock, int) -> MMapEmuBlock | None
564 assert block
in self
.__mmap
_emu
_alloc
_blocks
, \
565 "can't resize unmapped block"
566 if new_size
== len(block
.addrs
):
568 addrs
= range(block
.addrs
.start
, block
.addrs
.start
+ new_size
)
569 new_block
= plain_data
.replace(block
, addrs
=addrs
)
570 self
.__mmap
_emu
_alloc
_blocks
.remove(block
)
572 if not self
.__mmap
_emu
_map
_fixed
(
573 new_block
, replace
=False, dry_run
=True):
576 self
.__mmap
_emu
_alloc
_blocks
.add(block
)
577 if not block
.is_private_mem
:
578 # FIXME: implement resizing underlying mapping
579 raise NotImplementedError
581 # clear newly mapped bytes
582 clear_addrs
= range(block
.addrs
.stop
, new_block
.addrs
.stop
)
584 clear_block
= MMapEmuBlock(clear_addrs
)
585 mem_block
= self
.mem_blocks
[clear_block
.underlying_block_key
]
586 assert mem_block
is not None
587 clear_size
= len(clear_addrs
)
588 arr
= (ctypes
.c_ubyte
* clear_size
).from_buffer(
589 mem_block
, clear_block
.underlying_block_offsets
.start
)
590 ctypes
.memset(arr
, 0, clear_size
)
591 if self
.mmap_emu_data_block
== block
:
592 self
.mmap_emu_data_block
= new_block
593 self
.__mmap
_emu
_alloc
_blocks
.remove(block
)
594 self
.__mmap
_emu
_alloc
_blocks
.add(new_block
)
596 if new_size
< len(block
.addrs
):
597 # shrinking -- unmap pages at end
598 r
= range(new_block
.page_indexes
.stop
, block
.page_indexes
.stop
)
600 self
.__page
_flags
.pop(page_idx
)
601 self
.modified_pages
.remove(page_idx
)
603 # expanding -- map pages at end, they're cleared already
604 r
= range(block
.page_indexes
.stop
, new_block
.page_indexes
.stop
)
606 self
.__page
_flags
[page_idx
] = block
.flags
607 self
.modified_pages
.remove(page_idx
) # cleared page
610 def __mmap_emu_find_free_addr(self
, block
):
611 # type: (MMapEmuBlock) -> MMapEmuBlock | None
612 """find a spot where `block` will fit, returning the new block"""
613 blocks
= [*self
.__mmap
_emu
_alloc
_blocks
,
614 *self
.__mmap
_emu
_unbacked
_blocks
]
615 blocks
.sort(key
=lambda b
: b
.addrs
.start
)
616 biggest_gap
= range(0)
617 for gap
in self
.__gaps
_in
([b
.addrs
for b
in blocks
]):
618 if len(biggest_gap
) < len(gap
):
620 extra_size
= len(biggest_gap
) - len(block
.addrs
)
622 return None # no space anywhere
623 # try to allocate in the middle of the gap, so mmaps can grow later
624 offset
= extra_size
// 2
626 # align to page -- this depends on gap being aligned already.
628 # rounds down offset, so no need to check size again since it can't
629 # ever get closer to the end of the gap
630 offset
-= offset
% MMAP_PAGE_SIZE
631 start
= biggest_gap
.start
+ offset
632 addrs
= range(start
, start
+ len(block
))
633 return plain_data
.replace(block
, addrs
=addrs
)
635 def __mmap_emu_try_grow_down(self
, addr
, needed_flag
):
636 # type: (int, MMapPageFlags) -> bool
637 """ if addr is the page just before a GROW_DOWN block, try to grow it.
638 returns True if successful. """
639 return False # FIXME: implement
641 def brk_syscall(self
, addr
):
642 assert self
.emulating_mmap
, "brk syscall requires emulating_mmap=True"
643 assert self
.mmap_emu_data_block
is not None, \
644 "brk syscall requires a data block/segment"
646 # round addr up to the nearest page
647 addr_div_page_size
= -(-addr
// MMAP_PAGE_SIZE
) # ceil(addr / size)
648 addr
= addr_div_page_size
* MMAP_PAGE_SIZE
650 raise NotImplementedError # FIXME: finish
652 def mmap_syscall(self
, addr
, length
, prot
, flags
, fd
, offset
, is_mmap2
):
653 assert self
.emulating_mmap
, "mmap syscall requires emulating_mmap=True"
655 offset
*= 4096 # specifically *not* the page size
656 prot_read
= bool(prot
& ppc_flags
.PROT_READ
)
657 prot_write
= bool(prot
& ppc_flags
.PROT_WRITE
)
658 prot_exec
= bool(prot
& ppc_flags
.PROT_EXEC
)
659 prot_all
= (ppc_flags
.PROT_READ | ppc_flags
.PROT_WRITE
660 | ppc_flags
.PROT_EXEC
)
661 # checks based off the checks in linux
663 return -ppc_flags
.EINVAL
664 if offset
% MMAP_PAGE_SIZE
:
665 return -ppc_flags
.EINVAL
666 if flags
& ppc_flags
.MAP_HUGETLB
:
668 return -ppc_flags
.EINVAL
669 if length
<= 0 or offset
< 0:
670 return -ppc_flags
.EINVAL
671 if flags
& ppc_flags
.MAP_FIXED_NOREPLACE
:
672 flags |
= ppc_flags
.MAP_FIXED
673 if not (flags
& ppc_flags
.MAP_FIXED
):
674 addr
&= MMAP_PAGE_SIZE
- 1 # page-align address, rounding down
675 # page-align length, rounding up
676 length
= (length
+ MMAP_PAGE_SIZE
- 1) & ~
(MMAP_PAGE_SIZE
- 1)
677 if length
+ offset
>= 2 ** 64:
679 return -ppc_flags
.ENOMEM
680 block_flags
= MMapPageFlags
.NONE
682 block_flags |
= MMapPageFlags
.R
684 block_flags |
= MMapPageFlags
.W
686 block_flags |
= MMapPageFlags
.X
687 if flags
& ppc_flags
.MAP_GROWSDOWN
:
688 block_flags |
= MMapPageFlags
.GROWS_DOWN
692 file = os
.readlink("/proc/self/fd/%i" % fd
)
694 return -ppc_flags
.EBADF
696 block
= MMapEmuBlock(
697 range(addr
, addr
+ length
), block_flags
, file, offset
)
698 except (ValueError, MemException
):
699 return -ppc_flags
.EINVAL
700 if not (flags
& ppc_flags
.MAP_FIXED
):
701 block
= self
.__mmap
_emu
_find
_free
_addr
(block
)
703 return -ppc_flags
.ENOMEM
704 if flags
& ppc_flags
.MAP_LOCKED
:
705 return -ppc_flags
.EPERM
706 map_ty
= flags
& ppc_flags
.MAP_TYPE
709 if map_ty
== ppc_flags
.MAP_SHARED
:
710 flags
&= LEGACY_MAP_MASK
712 if fallthrough
or map_ty
== ppc_flags
.MAP_SHARED_VALIDATE
:
713 if flags
& ~LEGACY_MAP_MASK
:
714 return -ppc_flags
.EOPNOTSUPP
715 raise NotImplementedError("MAP_SHARED on file")
717 if fallthrough
or map_ty
== ppc_flags
.MAP_PRIVATE
:
718 if flags
& _MAP_GROWS
:
719 return -ppc_flags
.EINVAL
721 return -ppc_flags
.EINVAL
722 elif map_ty
== ppc_flags
.MAP_SHARED
:
723 if flags
& _MAP_GROWS
:
724 return -ppc_flags
.EINVAL
725 raise NotImplementedError("MAP_SHARED on memory")
726 elif map_ty
!= ppc_flags
.MAP_PRIVATE
:
727 return -ppc_flags
.EINVAL
728 replace
= not (flags
& ppc_flags
.MAP_FIXED_NOREPLACE
)
729 if not self
.__mmap
_emu
_map
_fixed
(block
, replace
, dry_run
=True):
730 # failed, was that because there's an existing memory block or
731 # that was an invalid address?
732 if self
.__mmap
_emu
_map
_fixed
(block
, replace
=True, dry_run
=True):
733 return -ppc_flags
.EEXIST
# existing memory block
735 return -ppc_flags
.EINVAL
# invalid address
736 mblock
= self
.mem_blocks
[block
.underlying_block_key
]
737 offsets
= block
.underlying_block_offsets
738 buf
= (ctypes
.c_ubyte
* len(offsets
)).from_buffer(mblock
, offsets
[0])
739 buf_addr
= ctypes
.addressof(buf
)
740 libc
= ctypes
.CDLL(None)
741 syscall
= libc
.syscall
742 restype
= syscall
.restype
743 argtypes
= syscall
.argtypes
744 syscall
.restype
= ctypes
.c_long
745 syscall
.argtypes
= (ctypes
.c_long
,) * 6
746 call_no
= ctypes
.c_long(ppc_flags
.host_defines
['SYS_mmap'])
747 host_prot
= ppc_flags
.host_defines
['PROT_READ']
748 if block
.flags
& MMapPageFlags
.W
:
749 host_prot |
= ppc_flags
.host_defines
['PROT_WRITE']
750 host_flags
= ppc_flags
.host_defines
['MAP_FIXED']
751 host_flags |
= ppc_flags
.host_defines
['MAP_PRIVATE']
752 length
= len(offsets
)
753 extra_zeros_length
= 0
754 extra_zeros_start
= 0
756 host_flags |
= ppc_flags
.host_defines
['MAP_ANONYMOUS']
758 file_sz
= os
.fstat(fd
).st_size
759 # host-page-align file_sz, rounding up
760 file_sz
= (file_sz
+ mmap
.PAGESIZE
- 1) & ~
(mmap
.PAGESIZE
- 1)
761 extra_zeros_length
= max(0, length
- (file_sz
- offset
))
762 extra_zeros_start
= buf_addr
+ (file_sz
- offset
)
763 length
-= extra_zeros_length
765 call_no
, ctypes
.c_long(buf_addr
), ctypes
.c_long(length
),
766 ctypes
.c_long(host_prot
), ctypes
.c_long(host_flags
),
767 ctypes
.c_long(fd
), ctypes
.c_long(offset
)))
769 return -ctypes
.get_errno()
770 self
.__mmap
_emu
_map
_fixed
(block
, replace
=True, dry_run
=False)
771 if extra_zeros_length
!= 0:
772 host_flags
= ppc_flags
.host_defines
['MAP_ANONYMOUS']
773 host_flags |
= ppc_flags
.host_defines
['MAP_FIXED']
774 host_flags |
= ppc_flags
.host_defines
['MAP_PRIVATE']
775 if -1 == int(syscall(
776 call_no
, ctypes
.c_long(extra_zeros_start
),
777 ctypes
.c_long(extra_zeros_length
),
778 ctypes
.c_long(host_prot
), ctypes
.c_long(host_flags
),
779 ctypes
.c_long(-1), ctypes
.c_long(0))):
780 return -ctypes
.get_errno()
782 # memory could be non-zero, mark as modified
783 for page_idx
in block
.page_indexes
:
784 self
.modified_pages
.add(page_idx
)
785 return block
.addrs
.start
788 def mmap_page_idx_to_addr(page_idx
):
789 assert 0 <= page_idx
< _PAGE_COUNT
790 if page_idx
>= _NEG_PG_IDX_START
:
791 page_idx
-= _PAGE_COUNT
792 return (page_idx
* MMAP_PAGE_SIZE
) % 2 ** 64
795 def addr_to_mmap_page_idx(addr
):
796 page_idx
, offset
= divmod(addr
, MMAP_PAGE_SIZE
)
797 page_idx
%= _PAGE_COUNT
798 expected
= MemMMap
.mmap_page_idx_to_addr(page_idx
) + offset
800 exc
= MemException("not sign extended",
801 ("address not sign extended: 0x%X "
802 "expected 0x%X") % (addr
, expected
))
807 def __reduce_ex__(self
, protocol
):
808 raise PicklingError("MemMMap can't be deep-copied or pickled")
810 def __access_addr_range_err(self
, start_addr
, size
, needed_flag
):
811 assert needed_flag
!= MMapPageFlags
.W
, \
812 "can't write to address 0x%X size 0x%X" % (start_addr
, size
)
815 def __access_addr_range(self
, start_addr
, size
, needed_flag
):
816 assert size
> 0, "invalid size"
817 page_idx
= self
.addr_to_mmap_page_idx(start_addr
)
818 last_addr
= start_addr
+ size
- 1
819 last_page_idx
= self
.addr_to_mmap_page_idx(last_addr
)
820 block_addr
= start_addr
% BLOCK_SIZE
821 block_k
= start_addr
- block_addr
822 last_block_addr
= last_addr
% BLOCK_SIZE
823 last_block_k
= last_addr
- last_block_addr
824 if block_k
!= last_block_k
:
825 return self
.__access
_addr
_range
_err
(start_addr
, size
, needed_flag
)
826 for i
in range(page_idx
, last_page_idx
+ 1):
827 flags
= self
.__page
_flags
.get(i
, 0)
828 if flags
& needed_flag
== 0:
829 if not self
.__mmap
_emu
_try
_grow
_down
(start_addr
, needed_flag
):
830 return self
.__access
_addr
_range
_err
(
831 start_addr
, size
, needed_flag
)
832 if needed_flag
is MMapPageFlags
.W
:
833 self
.modified_pages
.add(page_idx
)
834 return self
.mem_blocks
[block_k
], block_addr
836 def get_ctypes(self
, start_addr
, size
, is_write
):
837 """ returns a ctypes ubyte array referring to the memory at
838 `start_addr` with size `size`
840 flag
= MMapPageFlags
.W
if is_write
else MMapPageFlags
.R
841 block
, block_addr
= self
.__access
_addr
_range
(start_addr
, size
, flag
)
842 assert block
is not None, \
843 f
"can't read from address 0x{start_addr:X} size 0x{size:X}"
844 return (ctypes
.c_ubyte
* size
).from_buffer(block
, block_addr
)
846 def _read_word(self
, word_idx
, reason
):
847 block
, block_addr
= self
.__access
_addr
_range
(
848 word_idx
* self
.bytes_per_word
, self
.bytes_per_word
,
849 reason
.needed_mmap_page_flag
)
851 return reason
.read_default
852 bytes_
= block
[block_addr
:block_addr
+ self
.bytes_per_word
]
853 return int.from_bytes(bytes_
, 'little')
855 def _write_word(self
, word_idx
, value
):
856 block
, block_addr
= self
.__access
_addr
_range
(
857 word_idx
* self
.bytes_per_word
, self
.bytes_per_word
,
859 bytes_
= value
.to_bytes(self
.bytes_per_word
, 'little')
860 block
[block_addr
:block_addr
+ self
.bytes_per_word
] = bytes_
863 zeros
= bytes(self
.bytes_per_word
)
864 for page_idx
in self
.modified_pages
:
865 start
= self
.mmap_page_idx_to_addr(page_idx
)
866 block
, block_addr
= self
.__access
_addr
_range
(
867 start
, MMAP_PAGE_SIZE
, MMapPageFlags
.R
)
868 end
= start
+ MMAP_PAGE_SIZE
869 for word_idx
in range(start
// self
.bytes_per_word
,
870 end
// self
.bytes_per_word
):
871 next_block_addr
= block_addr
+ self
.bytes_per_word
872 bytes_
= block
[block_addr
:next_block_addr
]
873 block_addr
= next_block_addr