1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Funded by NLnet http://nlnet.nl
4 """core of the python-based POWER9 simulator
6 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
7 not speed, it is for both learning and educational purposes, as well as
8 a method of verifying the HDL.
12 * https://bugs.libre-soc.org/show_bug.cgi?id=424
15 from collections
import defaultdict
16 from openpower
.decoder
.selectable_int
import SelectableInt
17 from openpower
.util
import log
, LogType
20 from cached_property
import cached_property
23 from pickle
import PicklingError
25 from nmutil
import plain_data
26 from pathlib
import Path
27 from openpower
.syscalls
import ppc_flags
29 from elftools
.elf
.elffile
import ELFFile
30 from elftools
.elf
.constants
import P_FLAGS
33 def swap_order(x
, nbytes
):
34 x
= x
.to_bytes(nbytes
, byteorder
='little')
35 x
= int.from_bytes(x
, byteorder
='big', signed
=False)
39 class MemException(Exception):
43 def process_mem(initial_mem
, row_bytes
=8):
45 # different types of memory data structures recognised (for convenience)
46 if isinstance(initial_mem
, list):
47 initial_mem
= (0, initial_mem
)
48 if isinstance(initial_mem
, tuple):
49 startaddr
, mem
= initial_mem
51 for i
, val
in enumerate(mem
):
52 initial_mem
[startaddr
+ row_bytes
*i
] = (val
, row_bytes
)
54 for addr
, val
in initial_mem
.items():
55 if isinstance(val
, tuple):
58 width
= row_bytes
# assume same width
59 # val = swap_order(val, width)
60 res
[addr
] = (val
, width
)
66 class _ReadReason(enum
.Enum
):
68 SubWordWrite
= enum
.auto()
73 def read_default(self
):
74 if self
in (self
.SubWordWrite
, self
.Dump
):
79 def needed_mmap_page_flag(self
):
80 if self
is self
.Execute
:
81 return MMapPageFlags
.X
82 return MMapPageFlags
.R
86 return self
is not self
.Dump
90 def __init__(self
, row_bytes
, initial_mem
, misaligned_ok
):
91 self
.bytes_per_word
= row_bytes
92 self
.word_log2
= math
.ceil(math
.log2(row_bytes
))
93 self
.last_ld_addr
= None
94 self
.last_st_addr
= None
95 self
.misaligned_ok
= misaligned_ok
96 log("Sim-Mem", initial_mem
, self
.bytes_per_word
, self
.word_log2
)
100 self
.initialize(row_bytes
, initial_mem
)
102 def initialize(self
, row_bytes
, initial_mem
):
103 if isinstance(initial_mem
, ELFFile
):
104 return load_elf(self
, initial_mem
)
105 for addr
, (val
, width
) in process_mem(initial_mem
, row_bytes
).items():
106 # val = swap_order(val, width)
107 self
.st(addr
, val
, width
, swap
=False)
109 def _read_word(self
, word_idx
, reason
):
110 raise NotImplementedError
112 def _write_word(self
, word_idx
, value
):
113 raise NotImplementedError
116 raise NotImplementedError
119 def make_sim_state_dict(self
):
120 """ returns a dict equivalent to:
122 for k in list(self.word_idxs()):
123 data = self.ld(k*8, 8, False)
127 for k
in list(self
.word_idxs()):
128 data
= self
.ld(k
*8, 8, False, reason
=_ReadReason
.Dump
)
132 def _get_shifter_mask(self
, wid
, remainder
, do_log
=True):
133 shifter
= ((self
.bytes_per_word
- wid
) - remainder
) * \
135 # XXX https://bugs.libre-soc.org/show_bug.cgi?id=377
137 shifter
= remainder
* 8
138 mask
= (1 << (wid
* 8)) - 1
140 log("width,rem,shift,mask",
141 wid
, remainder
, hex(shifter
), hex(mask
))
144 # TODO: Implement ld/st of lesser width
145 def ld(self
, address
, width
=8, swap
=True, check_in_mem
=False,
146 instr_fetch
=False, reason
=None):
147 do_log
= reason
is not None and reason
.ld_logs
149 log("ld from addr 0x%x width %d" % (address
, width
),
150 swap
, check_in_mem
, instr_fetch
)
151 self
.last_ld_addr
= address
# record last load
153 remainder
= address
& (self
.bytes_per_word
- 1)
154 address
= address
>> self
.word_log2
155 if remainder
& (width
- 1) != 0:
156 exc
= MemException("unaligned",
157 "Unaligned access: remainder %x width %d" %
162 reason
= _ReadReason
.Execute
if instr_fetch
else _ReadReason
.Read
163 val
= self
._read
_word
(address
, reason
)
170 log("ld mem @ 0x%x rem %d : 0x%x" % (ldaddr
, remainder
, val
))
172 if width
!= self
.bytes_per_word
:
173 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
, do_log
)
175 log("masking", hex(val
), hex(mask
<< shifter
), shifter
)
176 val
= val
& (mask
<< shifter
)
179 val
= swap_order(val
, width
)
181 log("Read 0x%x from addr 0x%x" % (val
, ldaddr
))
184 def _st(self
, addr
, v
, width
=8, swap
=True):
186 remainder
= addr
& (self
.bytes_per_word
- 1)
187 addr
= addr
>> self
.word_log2
188 log("Writing 0x%x to ST 0x%x memaddr 0x%x/%x swap %s" %
189 (v
, staddr
, addr
, remainder
, str(swap
)))
190 if not self
.misaligned_ok
and remainder
& (width
- 1) != 0:
191 exc
= MemException("unaligned",
192 "Unaligned access: remainder %x width %d" %
197 v
= swap_order(v
, width
)
198 if width
!= self
.bytes_per_word
:
199 val
= self
._read
_word
(addr
, _ReadReason
.SubWordWrite
)
200 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
201 val
&= ~
(mask
<< shifter
)
203 self
._write
_word
(addr
, val
)
206 self
._write
_word
(addr
, v
)
207 log("mem @ 0x%x: 0x%x" % (staddr
, val
))
209 def st(self
, st_addr
, v
, width
=8, swap
=True):
210 self
.last_st_addr
= st_addr
# record last store
211 # misaligned not allowed: pass straight to Mem._st
212 if not self
.misaligned_ok
:
213 return self
._st
(st_addr
, v
, width
, swap
)
214 remainder
= st_addr
& (self
.bytes_per_word
- 1)
216 v
= swap_order(v
, width
)
217 # not misaligned: pass through to Mem._st but we've swapped already
218 misaligned
= remainder
& (width
- 1)
219 if misaligned
== 0 or (remainder
+ width
<= self
.bytes_per_word
):
220 return self
._st
(st_addr
, v
, width
, swap
=False)
221 shifter
, mask
= self
._get
_shifter
_mask
(width
, remainder
)
222 # split into two halves. lower first
223 maxmask
= (1 << (self
.bytes_per_word
)*8) - 1
224 val1
= ((v
<< shifter
) & maxmask
) >> shifter
225 self
._st
(st_addr
, val1
, width
=width
-misaligned
, swap
=False)
227 val2
= v
>> ((width
-misaligned
)*8)
228 addr2
= (st_addr
>> self
.word_log2
) << self
.word_log2
229 addr2
+= self
.bytes_per_word
230 log("v, val2", hex(v
), hex(val2
), "ad", addr2
)
231 self
._st
(addr2
, val2
, width
=width
-misaligned
, swap
=False)
233 def __call__(self
, addr
, sz
):
234 val
= self
.ld(addr
.value
, sz
, swap
=False)
235 log("memread", addr
, sz
, hex(val
), kind
=LogType
.InstrInOuts
)
236 return SelectableInt(val
, sz
*8)
238 def memassign(self
, addr
, sz
, val
):
239 log("memassign", addr
, sz
, val
, kind
=LogType
.InstrInOuts
)
240 self
.st(addr
.value
, val
.value
, sz
, swap
=False)
242 def dump(self
, printout
=True, asciidump
=False):
243 keys
= list(self
.word_idxs())
247 v
= self
._read
_word
(k
, _ReadReason
.Dump
)
254 c
= chr(v
>> (i
*8) & 0xff)
255 if not c
.isprintable():
258 print("%016x: %016x" % ((k
*8) & 0xffffffffffffffff, v
), s
)
261 def log_fancy(self
, *, kind
=LogType
.Default
, name
="Memory",
262 log2_line_size
=4, log2_column_chunk_size
=3, log
=log
):
263 line_size
= 1 << log2_line_size
264 subline_mask
= line_size
- 1
265 column_chunk_size
= 1 << log2_column_chunk_size
268 return bytearray(line_size
)
269 mem_lines
= defaultdict(make_line
)
270 subword_range
= range(1 << self
.word_log2
)
271 words
= self
.make_sim_state_dict()
272 for addr
, word
in words
.items():
273 for i
in subword_range
:
274 v
= (word
>> i
* 8) & 0xFF
275 mem_lines
[addr
>> log2_line_size
][addr
& subline_mask
] = v
279 last_line_index
= None
280 for line_index
in sorted(mem_lines
.keys()):
281 line_addr
= line_index
<< log2_line_size
282 if last_line_index
is not None \
283 and last_line_index
+ 1 != line_index
:
285 last_line_index
= line_index
286 line_bytes
= mem_lines
[line_index
]
287 line_str
= f
"0x{line_addr:08X}:"
288 for col_chunk
in range(0, line_size
,
291 for i
in range(column_chunk_size
):
292 line_str
+= f
" {line_bytes[col_chunk + i]:02X}"
294 for i
in range(line_size
):
295 if 0x20 <= line_bytes
[i
] <= 0x7E:
296 line_str
+= chr(line_bytes
[i
])
300 lines
.append(line_str
)
301 lines
= "\n".join(lines
)
302 log(f
"\n{name}:\n{lines}\n", kind
=kind
)
304 def read_cstr(self
, addr
):
305 """ returns a `bytearray` for the c string starting at addr, the
306 returned `bytearray` doesn't contain the nul terminator.
308 modifying the returned `bytearray` doesn't modify bytes in `self`
313 b
= self
.ld(addr
, width
=1)
322 class Mem(MemCommon
):
323 def __init__(self
, row_bytes
=8, initial_mem
=None, misaligned_ok
=False):
325 super().__init
__(row_bytes
, initial_mem
, misaligned_ok
)
327 def _read_word(self
, word_idx
, reason
):
328 return self
.mem
.get(word_idx
, reason
.read_default
)
330 def _write_word(self
, word_idx
, value
):
331 self
.mem
[word_idx
] = value
334 return self
.mem
.keys()
337 class MMapPageFlags(enum
.IntFlag
):
338 """ flags on each mmap-ped page
340 Note: these are *not* PowerISA MMU pages, but instead internal to Mem so
341 it can detect invalid accesses and assert rather than segfaulting.
346 "readable when instr_fetch=True"
349 "shared -- aka. not copy-on-write"
352 """this memory block will grow when the address one page before the
353 beginning is accessed"""
360 _ALLOWED_MMAP_NORMAL_FLAGS
= MMapPageFlags
.RWX | MMapPageFlags
.S
361 _ALLOWED_MMAP_STACK_FLAGS
= MMapPageFlags
.RWX | MMapPageFlags
.GROWS_DOWN
364 MMAP_PAGE_SIZE
= 1 << 16 # size of chunk that we track
365 _PAGE_COUNT
= (1 << 48) // MMAP_PAGE_SIZE
# 48-bit address space
366 _NEG_PG_IDX_START
= _PAGE_COUNT
// 2 # start of negative half of address space
367 _USER_SPACE_SIZE
= _NEG_PG_IDX_START
* MMAP_PAGE_SIZE
369 # code assumes BLOCK_SIZE is a power of two
370 # BLOCK_SIZE = 1 << 32
371 BLOCK_SIZE
= 1 << 28 # reduced so it works on armv7a
373 assert BLOCK_SIZE
% MMAP_PAGE_SIZE
== 0
374 assert MMAP_PAGE_SIZE
% mmap
.PAGESIZE
== 0, "host's page size is too big"
375 assert 2 ** (mmap
.PAGESIZE
.bit_length() - 1) == mmap
.PAGESIZE
, \
376 "host's page size isn't a power of 2"
378 def _make_default_block_addrs():
379 needed_page_addrs
= (
380 0, # low end of user space
381 0x10000000, # default ELF load address
382 _USER_SPACE_SIZE
- MMAP_PAGE_SIZE
, # high end of user space
385 for page_addr
in needed_page_addrs
:
386 offset
= page_addr
% BLOCK_SIZE
387 block_addrs
.add(page_addr
- offset
)
388 return tuple(sorted(block_addrs
))
390 DEFAULT_BLOCK_ADDRS
= _make_default_block_addrs()
393 @plain_data.plain_data(frozen
=True, unsafe_hash
=True, repr=False)
395 __slots__
= ("addrs", "flags", "file", "file_off")
397 def __init__(self
, addrs
, flags
=MMapPageFlags
.NONE
, file=None, file_off
=0):
398 # type: (range, MMapPageFlags, Path | str | None, int) -> None
400 raise ValueError("bad address range, step must be 1")
402 raise ValueError("bad address range, must be non-empty")
404 raise ValueError("bad address range, must be non-negative")
405 if addrs
.stop
> 2 ** 64:
406 raise ValueError("bad address range -- goes beyond 2 ** 64")
407 if addrs
.start
% MMAP_PAGE_SIZE
:
408 raise ValueError("bad address range -- start isn't page-aligned")
409 if addrs
.stop
% MMAP_PAGE_SIZE
:
410 raise ValueError("bad address range -- stop isn't page-aligned")
411 if addrs
[0] // BLOCK_SIZE
!= addrs
[-1] // BLOCK_SIZE
:
413 "bad address range -- crosses underlying block boundaries")
416 raise ValueError("bad file_off, must be non-negative")
417 if file_off
% MMAP_PAGE_SIZE
:
418 raise ValueError("bad file_off, must be page-aligned")
419 if flags
& ~_ALLOWED_MMAP_NORMAL_FLAGS
:
420 raise ValueError("invalid flags for mmap with file")
423 if flags
& ~_ALLOWED_MMAP_NORMAL_FLAGS
:
424 if flags
& ~_ALLOWED_MMAP_STACK_FLAGS
:
425 raise ValueError("invalid flags for anonymous mmap")
426 file_off
= 0 # no file -- clear offset
430 self
.file_off
= file_off
431 self
.page_indexes
# check that addresses can be mapped to pages
433 def intersects(self
, other
):
434 # type: (MMapEmuBlock | range) -> bool
435 if isinstance(other
, MMapEmuBlock
):
439 return other
.start
< self
.addrs
.stop
and self
.addrs
.start
< other
.stop
442 def is_private_anon(self
):
443 return self
.file is None and not self
.flags
& MMapPageFlags
.S
446 def underlying_block_key(self
):
447 offset
= self
.addrs
.start
% BLOCK_SIZE
448 return self
.addrs
.start
- offset
451 def underlying_block_offsets(self
):
452 start
= self
.addrs
.start
% BLOCK_SIZE
453 return range(start
, start
+ len(self
.addrs
))
456 def page_indexes(self
):
457 first_page
= MemMMap
.addr_to_mmap_page_idx(self
.addrs
[0])
458 # can't just use stop, since that may be out-of-range
459 last_page
= MemMMap
.addr_to_mmap_page_idx(self
.addrs
[-1])
460 if first_page
< _NEG_PG_IDX_START
and last_page
>= _NEG_PG_IDX_START
:
462 "bad address range, crosses transition from positive "
463 "canonical addresses to negative canonical addresses")
464 return range(first_page
, last_page
+ 1)
466 def difference(self
, remove
):
467 # type: (MMapEmuBlock) -> list[MMapEmuBlock]
468 """returns the blocks left after removing `remove` from `self`"""
469 if not self
.intersects(remove
):
472 addrs
= range(self
.addrs
.start
, remove
.addrs
.start
)
474 retval
.append(plain_data
.replace(self
, addrs
=addrs
))
475 addrs
= range(remove
.addrs
.stop
, self
.addrs
.stop
)
477 file_off
= self
.file_off
+ addrs
.start
- self
.addrs
.start
478 retval
.append(plain_data
.replace(
479 self
, addrs
=addrs
, file_off
=file_off
))
483 parts
= ["MMapEmuBlock(range(0x%X, 0x%X)"
484 % (self
.addrs
.start
, self
.addrs
.stop
)]
485 if self
.flags
!= MMapPageFlags
.NONE
:
486 parts
.append(", flags=%r" % (self
.flags
, ))
487 if self
.file is not None:
488 parts
.append(", file=%r" % (self
.file, ))
489 if self
.file_off
!= 0:
490 parts
.append(", file_off=0x%X" % (self
.file_off
, ))
492 return "".join(parts
)
495 # stuff marked "not available" is not in the powerpc64le headers on my system
498 | ppc_flags
.MAP_PRIVATE
499 | ppc_flags
.MAP_FIXED
500 | ppc_flags
.MAP_ANONYMOUS
501 | ppc_flags
.MAP_DENYWRITE
502 | ppc_flags
.MAP_EXECUTABLE
503 # | ppc_flags.MAP_UNINITIALIZED # not available -- ignored for now
504 | ppc_flags
.MAP_GROWSDOWN
505 | ppc_flags
.MAP_LOCKED
506 | ppc_flags
.MAP_NORESERVE
507 | ppc_flags
.MAP_POPULATE
508 | ppc_flags
.MAP_NONBLOCK
509 | ppc_flags
.MAP_STACK
510 | ppc_flags
.MAP_HUGETLB
511 # | ppc_flags.MAP_32BIT # not available -- ignored for now
512 # | ppc_flags.MAP_ABOVE4G # not available -- ignored for now
513 # | ppc_flags.MAP_HUGE_2MB # not available -- ignored for now
514 # | ppc_flags.MAP_HUGE_1GB # not available -- ignored for now
517 _MAP_GROWS
= ppc_flags
.MAP_GROWSDOWN
518 # _MAP_GROWS |= ppc_flags.MAP_GROWSUP # not available -- ignored for now
521 """ len(), but with fix for len(range(2**64)) raising OverflowError """
524 except OverflowError:
525 assert isinstance(r
, range)
526 return 1 + (r
.stop
- r
.start
- 1) // r
.step
529 class MemMMap(MemCommon
):
530 def __init__(self
, row_bytes
=8, initial_mem
=None, misaligned_ok
=False,
531 block_addrs
=DEFAULT_BLOCK_ADDRS
, emulating_mmap
=False):
532 # we can't allocate the entire 2 ** 47 byte address space, so split
533 # it into smaller blocks
535 addr
: mmap
.mmap(-1, BLOCK_SIZE
) for addr
in sorted(block_addrs
)}
536 assert all(addr
% BLOCK_SIZE
== 0 for addr
in self
.mem_blocks
), \
537 "misaligned block address not supported"
538 self
.__page
_flags
= {}
539 self
.modified_pages
= set()
540 self
.__heap
_range
= None
541 self
.__mmap
_emu
_alloc
_blocks
= set() # type: set[MMapEmuBlock] | None
543 for addr
, block
in self
.mem_blocks
.items():
544 block_addr
= ctypes
.addressof(ctypes
.c_ubyte
.from_buffer(block
))
545 log("0x%X -> 0x%X len=0x%X" % (addr
, block_addr
, BLOCK_SIZE
))
547 # build the list of unbacked blocks -- those address ranges that have
548 # no backing memory so mmap can't allocate there. These are maintained
549 # separately from __mmap_emu_alloc_blocks so munmap/mremap can't
552 range(a
, a
+ len(b
)) for a
, b
in self
.mem_blocks
.items()]
553 self
.__mmap
_emu
_unbacked
_blocks
= tuple(self
.__gaps
_in
(addr_ranges
))
555 if not emulating_mmap
:
556 self
.__mmap
_emu
_alloc
_blocks
= None
557 # mark blocks as readable/writable
558 for addr
, block
in self
.mem_blocks
.items():
559 start_page
= self
.addr_to_mmap_page_idx(addr
)
560 end_page
= start_page
+ len(block
) // MMAP_PAGE_SIZE
561 for page_idx
in range(start_page
, end_page
):
562 self
.__page
_flags
[page_idx
] = MMapPageFlags
.RWX
564 super().__init
__(row_bytes
, initial_mem
, misaligned_ok
)
567 def heap_range(self
):
568 # type: () -> range | None
569 return self
.__heap
_range
572 def heap_range(self
, value
):
573 # type: (range | None) -> None
575 self
.__heap
_range
= value
577 if not self
.emulating_mmap
:
579 "can't set heap_range without emulating_mmap=True")
580 if not isinstance(value
, range):
581 raise TypeError("heap_range must be a range or None")
582 if value
.step
!= 1 or value
.start
> value
.stop
:
583 raise ValueError("heap_range is not a suitable range")
584 if value
.start
% MMAP_PAGE_SIZE
!= 0:
585 raise ValueError("heap_range.start must be aligned")
586 if value
.stop
% MMAP_PAGE_SIZE
!= 0:
587 raise ValueError("heap_range.stop must be aligned")
588 self
.__heap
_range
= value
591 def __gaps_in(sorted_ranges
, start
=0, stop
=2 ** 64):
592 # type: (list[range] | tuple[range], int, int) -> list[range]
595 for r
in sorted_ranges
:
596 gap
= range(start
, r
.start
)
600 gap
= range(start
, stop
)
606 def emulating_mmap(self
):
607 return self
.__mmap
_emu
_alloc
_blocks
is not None
609 def __mmap_emu_map_fixed(self
, block
, replace
, dry_run
):
610 # type: (MMapEmuBlock, bool, bool) -> bool
611 """insert the block at the fixed address passed in, replacing the
612 parts of any other blocks that overlap if `replace` is `True`.
614 If `dry_run`, then don't make any changes, just check if it would
617 This function requires the caller to check `block`'s permissions and to
618 perform the underlying `mmap` first.
620 if block
.underlying_block_key
not in self
.mem_blocks
:
621 return False # unbacked block
622 # intersecting_blocks must be separate list so we don't iterate while
623 # we modify self.__mmap_emu_alloc_blocks
624 intersecting_blocks
= [
625 b
for b
in self
.__mmap
_emu
_alloc
_blocks
if block
.intersects(b
)]
626 for b
in intersecting_blocks
:
630 self
.__mmap
_emu
_alloc
_blocks
.remove(b
)
631 for replacement
in b
.difference(block
):
632 self
.__mmap
_emu
_alloc
_blocks
.add(replacement
)
634 self
.__mmap
_emu
_alloc
_blocks
.add(block
)
635 for page_idx
in block
.page_indexes
:
636 self
.__page
_flags
[page_idx
] = block
.flags
639 def __mmap_emu_unmap(self
, block
):
640 # type: (MMapEmuBlock) -> int
641 """unmap `block`, return 0 if no error, otherwise return -errno"""
642 assert block
in self
.__mmap
_emu
_alloc
_blocks
, \
643 "can't unmap already unmapped block"
645 # replace mapping with zeros
646 retval
= self
.__mmap
_emu
_zero
_block
(block
)
650 self
.__mmap
_emu
_alloc
_blocks
.remove(block
)
651 # mark pages as empty
652 for page_idx
in block
.page_indexes
:
653 self
.__page
_flags
.pop(page_idx
)
654 self
.modified_pages
.discard(page_idx
)
657 def __mmap_emu_zero_block(self
, block
):
658 # type: (MMapEmuBlock) -> int
659 """ mmap zeros over block, return 0 if no error,
660 otherwise return -errno
662 mblock
= self
.mem_blocks
[block
.underlying_block_key
]
663 offsets
= block
.underlying_block_offsets
664 buf
= (ctypes
.c_ubyte
* len(offsets
)).from_buffer(mblock
, offsets
[0])
665 buf_addr
= ctypes
.addressof(buf
)
666 libc
= ctypes
.CDLL(None)
667 syscall
= libc
.syscall
668 syscall
.restype
= ctypes
.c_long
669 syscall
.argtypes
= (ctypes
.c_long
,) * 6
670 call_no
= ctypes
.c_long(ppc_flags
.host_defines
['SYS_mmap'])
671 host_prot
= ppc_flags
.host_defines
['PROT_READ']
672 host_prot |
= ppc_flags
.host_defines
['PROT_WRITE']
673 host_flags
= ppc_flags
.host_defines
['MAP_ANONYMOUS']
674 host_flags |
= ppc_flags
.host_defines
['MAP_FIXED']
675 host_flags |
= ppc_flags
.host_defines
['MAP_PRIVATE']
676 # map a block of zeros over it
677 if -1 == int(syscall(
678 call_no
, ctypes
.c_long(buf_addr
),
679 ctypes
.c_long(len(offsets
)),
680 ctypes
.c_long(host_prot
), ctypes
.c_long(host_flags
),
681 ctypes
.c_long(-1), ctypes
.c_long(0))):
682 return -ctypes
.get_errno()
685 def __mmap_emu_resize_map_fixed(self
, block
, new_size
):
686 # type: (MMapEmuBlock, int) -> MMapEmuBlock | None
687 assert block
in self
.__mmap
_emu
_alloc
_blocks
, \
688 "can't resize unmapped block"
689 if new_size
== len(block
.addrs
):
691 addrs
= range(block
.addrs
.start
, block
.addrs
.start
+ new_size
)
692 new_block
= plain_data
.replace(block
, addrs
=addrs
)
693 self
.__mmap
_emu
_alloc
_blocks
.remove(block
)
695 if not self
.__mmap
_emu
_map
_fixed
(
696 new_block
, replace
=False, dry_run
=True):
699 self
.__mmap
_emu
_alloc
_blocks
.add(block
)
700 if not block
.is_private_anon
:
701 # FIXME: implement resizing underlying mapping
702 raise NotImplementedError
704 # clear newly mapped bytes
705 clear_addrs
= range(block
.addrs
.stop
, new_block
.addrs
.stop
)
706 if len_(clear_addrs
):
707 clear_block
= MMapEmuBlock(clear_addrs
)
708 if self
.__mmap
_emu
_zero
_block
(clear_block
) < 0:
711 if new_size
< len(block
.addrs
):
712 # shrinking -- unmap pages at end
713 r
= range(new_block
.page_indexes
.stop
, block
.page_indexes
.stop
)
714 clear_block
= MMapEmuBlock(r
)
715 if self
.__mmap
_emu
_zero
_block
(clear_block
) < 0:
718 self
.__page
_flags
.pop(page_idx
)
719 self
.modified_pages
.discard(page_idx
)
721 # expanding -- map pages at end, they're cleared already
722 r
= range(block
.page_indexes
.stop
, new_block
.page_indexes
.stop
)
724 self
.__page
_flags
[page_idx
] = block
.flags
725 self
.modified_pages
.discard(page_idx
) # cleared page
726 self
.__mmap
_emu
_alloc
_blocks
.remove(block
)
727 self
.__mmap
_emu
_alloc
_blocks
.add(new_block
)
730 def __mmap_emu_find_free_addr(self
, block
):
731 # type: (MMapEmuBlock) -> MMapEmuBlock | None
732 """find a spot where `block` will fit, returning the new block"""
733 blocks
= [*self
.__mmap
_emu
_alloc
_blocks
,
734 *self
.__mmap
_emu
_unbacked
_blocks
]
735 blocks
.sort(key
=lambda b
: b
.addrs
.start
)
736 biggest_gap
= range(0)
737 for gap
in self
.__gaps
_in
([b
.addrs
for b
in blocks
]):
738 if len(biggest_gap
) < len(gap
):
740 extra_size
= len(biggest_gap
) - len(block
.addrs
)
742 return None # no space anywhere
743 # try to allocate in the middle of the gap, so mmaps can grow later
744 offset
= extra_size
// 2
746 # align to page -- this depends on gap being aligned already.
748 # rounds down offset, so no need to check size again since it can't
749 # ever get closer to the end of the gap
750 offset
-= offset
% MMAP_PAGE_SIZE
751 start
= biggest_gap
.start
+ offset
752 addrs
= range(start
, start
+ len(block
))
753 return plain_data
.replace(block
, addrs
=addrs
)
755 def __mmap_emu_try_grow_down(self
, addr
, needed_flag
):
756 # type: (int, MMapPageFlags) -> bool
757 """ if addr is the page just before a GROW_DOWN block, try to grow it.
758 returns True if successful. """
759 return False # FIXME: implement
761 def brk_syscall(self
, addr
):
762 assert self
.emulating_mmap
, "brk syscall requires emulating_mmap=True"
763 assert self
.heap_range
is not None, "brk syscall requires a heap"
765 if addr
< self
.heap_range
.start
:
766 # can't shrink heap to negative size
767 return self
.heap_range
.stop
# don't change heap
769 # round addr up to the nearest page
770 addr_div_page_size
= -(-addr
// MMAP_PAGE_SIZE
) # ceil(addr / size)
771 addr
= addr_div_page_size
* MMAP_PAGE_SIZE
773 # something else could be mmap-ped in the middle of the heap,
777 if len_(self
.heap_range
) != 0:
778 for b
in self
.__mmap
_emu
_alloc
_blocks
:
779 # we check for the end matching so we get the last heap block
780 # if the heap was split.
781 # the heap must not be a file mapping.
782 # the heap must not be shared, and must be RW
783 if b
.addrs
.stop
== self
.heap_range
.stop
and b
.file is None \
784 and b
.flags
== MMapPageFlags
.RW
:
788 if block
is not None and addr
< block
.addrs
.start
:
789 # heap was split by something, we can't shrink beyond
790 # the start of the last heap block
791 return self
.heap_range
.stop
# don't change heap
793 if block
is not None and addr
== block
.addrs
.start
:
795 if self
.__mmap
_emu
_unmap
(block
) < 0:
796 block
= None # can't unmap heap block
797 elif addr
> self
.heap_range
.stop
and block
is None:
800 addrs
= range(self
.heap_range
.stop
, addr
)
801 block
= MMapEmuBlock(addrs
, flags
=MMapPageFlags
.RW
)
802 if not self
.__mmap
_emu
_map
_fixed
(block
,
803 replace
=False, dry_run
=True):
805 elif 0 != self
.__mmap
_emu
_zero
_block
(block
):
808 self
.__mmap
_emu
_map
_fixed
(block
,
809 replace
=False, dry_run
=False)
810 except (MemException
, ValueError):
811 # caller could pass in invalid size, catch that
813 elif block
is not None: # resize block
815 block
= self
.__mmap
_emu
_resize
_map
_fixed
(
816 block
, addr
- block
.addrs
.start
)
817 except (MemException
, ValueError):
818 # caller could pass in invalid size, catch that
821 if block
is None and addr
!= self
.heap_range
.start
:
822 # can't resize heap block
823 return self
.heap_range
.stop
# don't change heap
825 # success! assign new heap_range
826 self
.heap_range
= range(self
.heap_range
.start
, addr
)
827 return self
.heap_range
.stop
# return new brk address
829 def mmap_syscall(self
, addr
, length
, prot
, flags
, fd
, offset
, is_mmap2
):
830 assert self
.emulating_mmap
, "mmap syscall requires emulating_mmap=True"
832 offset
*= 4096 # specifically *not* the page size
833 prot_read
= bool(prot
& ppc_flags
.PROT_READ
)
834 prot_write
= bool(prot
& ppc_flags
.PROT_WRITE
)
835 prot_exec
= bool(prot
& ppc_flags
.PROT_EXEC
)
836 prot_all
= (ppc_flags
.PROT_READ | ppc_flags
.PROT_WRITE
837 | ppc_flags
.PROT_EXEC
)
838 # checks based off the checks in linux
840 return -ppc_flags
.EINVAL
841 if offset
% MMAP_PAGE_SIZE
:
842 return -ppc_flags
.EINVAL
843 if flags
& ppc_flags
.MAP_HUGETLB
:
845 return -ppc_flags
.EINVAL
846 if length
<= 0 or offset
< 0:
847 return -ppc_flags
.EINVAL
848 if flags
& ppc_flags
.MAP_FIXED_NOREPLACE
:
849 flags |
= ppc_flags
.MAP_FIXED
850 if not (flags
& ppc_flags
.MAP_FIXED
):
851 addr
&= MMAP_PAGE_SIZE
- 1 # page-align address, rounding down
852 # page-align length, rounding up
853 length
= (length
+ MMAP_PAGE_SIZE
- 1) & ~
(MMAP_PAGE_SIZE
- 1)
854 if length
+ offset
>= 2 ** 64:
856 return -ppc_flags
.ENOMEM
857 block_flags
= MMapPageFlags
.NONE
859 block_flags |
= MMapPageFlags
.R
861 block_flags |
= MMapPageFlags
.W
863 block_flags |
= MMapPageFlags
.X
864 if flags
& ppc_flags
.MAP_GROWSDOWN
:
865 block_flags |
= MMapPageFlags
.GROWS_DOWN
869 file = os
.readlink("/proc/self/fd/%i" % fd
)
871 return -ppc_flags
.EBADF
873 block
= MMapEmuBlock(
874 range(addr
, addr
+ length
), block_flags
, file, offset
)
875 except (ValueError, MemException
):
876 return -ppc_flags
.EINVAL
877 if not (flags
& ppc_flags
.MAP_FIXED
):
878 block
= self
.__mmap
_emu
_find
_free
_addr
(block
)
880 return -ppc_flags
.ENOMEM
881 if flags
& ppc_flags
.MAP_LOCKED
:
882 return -ppc_flags
.EPERM
883 map_ty
= flags
& ppc_flags
.MAP_TYPE
886 if map_ty
== ppc_flags
.MAP_SHARED
:
887 flags
&= LEGACY_MAP_MASK
889 if fallthrough
or map_ty
== ppc_flags
.MAP_SHARED_VALIDATE
:
890 if flags
& ~LEGACY_MAP_MASK
:
891 return -ppc_flags
.EOPNOTSUPP
892 raise NotImplementedError("MAP_SHARED on file")
894 if fallthrough
or map_ty
== ppc_flags
.MAP_PRIVATE
:
895 if flags
& _MAP_GROWS
:
896 return -ppc_flags
.EINVAL
898 return -ppc_flags
.EINVAL
899 elif map_ty
== ppc_flags
.MAP_SHARED
:
900 if flags
& _MAP_GROWS
:
901 return -ppc_flags
.EINVAL
902 raise NotImplementedError("MAP_SHARED on memory")
903 elif map_ty
!= ppc_flags
.MAP_PRIVATE
:
904 return -ppc_flags
.EINVAL
905 replace
= not (flags
& ppc_flags
.MAP_FIXED_NOREPLACE
)
906 if not self
.__mmap
_emu
_map
_fixed
(block
, replace
, dry_run
=True):
907 # failed, was that because there's an existing memory block or
908 # that was an invalid address?
909 if self
.__mmap
_emu
_map
_fixed
(block
, replace
=True, dry_run
=True):
910 return -ppc_flags
.EEXIST
# existing memory block
912 return -ppc_flags
.EINVAL
# invalid address
913 mblock
= self
.mem_blocks
[block
.underlying_block_key
]
914 offsets
= block
.underlying_block_offsets
915 buf
= (ctypes
.c_ubyte
* len(offsets
)).from_buffer(mblock
, offsets
[0])
916 buf_addr
= ctypes
.addressof(buf
)
917 libc
= ctypes
.CDLL(None)
918 syscall
= libc
.syscall
919 syscall
.restype
= ctypes
.c_long
920 syscall
.argtypes
= (ctypes
.c_long
,) * 6
921 call_no
= ctypes
.c_long(ppc_flags
.host_defines
['SYS_mmap'])
922 host_prot
= ppc_flags
.host_defines
['PROT_READ']
923 if block
.flags
& MMapPageFlags
.W
:
924 host_prot |
= ppc_flags
.host_defines
['PROT_WRITE']
925 host_flags
= ppc_flags
.host_defines
['MAP_FIXED']
926 host_flags |
= ppc_flags
.host_defines
['MAP_PRIVATE']
927 length
= len(offsets
)
928 extra_zeros_length
= 0
929 extra_zeros_start
= 0
931 host_flags |
= ppc_flags
.host_defines
['MAP_ANONYMOUS']
932 # don't remove check, since we'll eventually have shared memory
933 if host_flags
& ppc_flags
.host_defines
['MAP_PRIVATE']:
934 # always map private memory read/write,
935 # so we can clear it if needed
936 host_prot |
= ppc_flags
.host_defines
['PROT_WRITE']
938 file_sz
= os
.fstat(fd
).st_size
939 # host-page-align file_sz, rounding up
940 file_sz
= (file_sz
+ mmap
.PAGESIZE
- 1) & ~
(mmap
.PAGESIZE
- 1)
941 extra_zeros_length
= max(0, length
- (file_sz
- offset
))
942 extra_zeros_start
= buf_addr
+ (file_sz
- offset
)
943 length
-= extra_zeros_length
945 call_no
, ctypes
.c_long(buf_addr
), ctypes
.c_long(length
),
946 ctypes
.c_long(host_prot
), ctypes
.c_long(host_flags
),
947 ctypes
.c_long(fd
), ctypes
.c_long(offset
)))
949 return -ctypes
.get_errno()
950 self
.__mmap
_emu
_map
_fixed
(block
, replace
=True, dry_run
=False)
951 if extra_zeros_length
!= 0:
952 host_flags
= ppc_flags
.host_defines
['MAP_ANONYMOUS']
953 host_flags |
= ppc_flags
.host_defines
['MAP_FIXED']
954 host_flags |
= ppc_flags
.host_defines
['MAP_PRIVATE']
955 if -1 == int(syscall(
956 call_no
, ctypes
.c_long(extra_zeros_start
),
957 ctypes
.c_long(extra_zeros_length
),
958 ctypes
.c_long(host_prot
), ctypes
.c_long(host_flags
),
959 ctypes
.c_long(-1), ctypes
.c_long(0))):
960 return -ctypes
.get_errno()
962 # memory could be non-zero, mark as modified
963 for page_idx
in block
.page_indexes
:
964 self
.modified_pages
.add(page_idx
)
965 log("mmap block=%s" % (block
,), kind
=LogType
.InstrInOuts
)
966 return block
.addrs
.start
969 def mmap_page_idx_to_addr(page_idx
):
970 assert 0 <= page_idx
< _PAGE_COUNT
971 if page_idx
>= _NEG_PG_IDX_START
:
972 page_idx
-= _PAGE_COUNT
973 return (page_idx
* MMAP_PAGE_SIZE
) % 2 ** 64
976 def addr_to_mmap_page_idx(addr
):
977 page_idx
, offset
= divmod(addr
, MMAP_PAGE_SIZE
)
978 page_idx
%= _PAGE_COUNT
979 expected
= MemMMap
.mmap_page_idx_to_addr(page_idx
) + offset
981 exc
= MemException("not sign extended",
982 ("address not sign extended: 0x%X "
983 "expected 0x%X") % (addr
, expected
))
988 def __reduce_ex__(self
, protocol
):
989 raise PicklingError("MemMMap can't be deep-copied or pickled")
991 def __access_addr_range_err(self
, start_addr
, size
, needed_flag
):
992 assert needed_flag
!= MMapPageFlags
.W
, \
993 "can't write to address 0x%X size 0x%X" % (start_addr
, size
)
994 if self
.emulating_mmap
:
995 exc
= MemException("access not allowed",
996 "memory access not allowed: addr=0x%X: %s"
997 % (start_addr
, needed_flag
))
1002 def __access_addr_range(self
, start_addr
, size
, needed_flag
):
1003 assert size
> 0, "invalid size"
1004 page_idx
= self
.addr_to_mmap_page_idx(start_addr
)
1005 last_addr
= start_addr
+ size
- 1
1006 last_page_idx
= self
.addr_to_mmap_page_idx(last_addr
)
1007 block_addr
= start_addr
% BLOCK_SIZE
1008 block_k
= start_addr
- block_addr
1009 last_block_addr
= last_addr
% BLOCK_SIZE
1010 last_block_k
= last_addr
- last_block_addr
1011 if block_k
!= last_block_k
:
1012 return self
.__access
_addr
_range
_err
(start_addr
, size
, needed_flag
)
1013 for i
in range(page_idx
, last_page_idx
+ 1):
1014 flags
= self
.__page
_flags
.get(i
, 0)
1015 if flags
& needed_flag
== 0:
1016 if not self
.__mmap
_emu
_try
_grow
_down
(start_addr
, needed_flag
):
1017 return self
.__access
_addr
_range
_err
(
1018 start_addr
, size
, needed_flag
)
1019 if needed_flag
is MMapPageFlags
.W
:
1020 self
.modified_pages
.add(page_idx
)
1021 return self
.mem_blocks
[block_k
], block_addr
1023 def get_ctypes(self
, start_addr
, size
, is_write
):
1024 """ returns a ctypes ubyte array referring to the memory at
1025 `start_addr` with size `size`
1027 flag
= MMapPageFlags
.W
if is_write
else MMapPageFlags
.R
1028 block
, block_addr
= self
.__access
_addr
_range
(start_addr
, size
, flag
)
1029 assert block
is not None, \
1030 f
"can't read from address 0x{start_addr:X} size 0x{size:X}"
1031 return (ctypes
.c_ubyte
* size
).from_buffer(block
, block_addr
)
1033 def _read_word(self
, word_idx
, reason
):
1034 block
, block_addr
= self
.__access
_addr
_range
(
1035 word_idx
* self
.bytes_per_word
, self
.bytes_per_word
,
1036 reason
.needed_mmap_page_flag
)
1038 return reason
.read_default
1039 bytes_
= block
[block_addr
:block_addr
+ self
.bytes_per_word
]
1040 return int.from_bytes(bytes_
, 'little')
1042 def _write_word(self
, word_idx
, value
):
1043 block
, block_addr
= self
.__access
_addr
_range
(
1044 word_idx
* self
.bytes_per_word
, self
.bytes_per_word
,
1046 bytes_
= value
.to_bytes(self
.bytes_per_word
, 'little')
1047 block
[block_addr
:block_addr
+ self
.bytes_per_word
] = bytes_
1049 def word_idxs(self
):
1050 zeros
= bytes(self
.bytes_per_word
)
1051 for page_idx
in self
.modified_pages
:
1052 start
= self
.mmap_page_idx_to_addr(page_idx
)
1053 block
, block_addr
= self
.__access
_addr
_range
(
1054 start
, MMAP_PAGE_SIZE
, MMapPageFlags
.R
)
1055 end
= start
+ MMAP_PAGE_SIZE
1056 for word_idx
in range(start
// self
.bytes_per_word
,
1057 end
// self
.bytes_per_word
):
1058 next_block_addr
= block_addr
+ self
.bytes_per_word
1059 bytes_
= block
[block_addr
:next_block_addr
]
1060 block_addr
= next_block_addr
1064 def make_sim_state_dict(self
):
1065 """ returns a dict equivalent to:
1067 for k in list(self.word_idxs()):
1068 data = self.ld(k*8, 8, False)
1071 if self
.bytes_per_word
!= 8:
1072 return super().make_sim_state_dict()
1074 page_struct
= struct
.Struct("<%dQ" % (MMAP_PAGE_SIZE
// 8,))
1075 assert page_struct
.size
== MMAP_PAGE_SIZE
, "got wrong format"
1076 for page_idx
in self
.modified_pages
:
1077 start
= self
.mmap_page_idx_to_addr(page_idx
)
1078 block
, block_addr
= self
.__access
_addr
_range
(
1079 start
, MMAP_PAGE_SIZE
, MMapPageFlags
.R
)
1080 # written this way to avoid unnecessary allocations
1081 words
= page_struct
.unpack_from(block
, block_addr
)
1082 for i
, v
in zip(range(start
, start
+ MMAP_PAGE_SIZE
, 8), words
):
1088 @plain_data.plain_data()
1090 __slots__
= "elf_file", "pc", "gprs", "fpscr"
1092 def __init__(self
, elf_file
, pc
, gprs
, fpscr
):
1093 self
.elf_file
= elf_file
1099 def raise_if_syscall_err(result
):
1100 if -4096 < result
< 0:
1101 raise OSError(-result
, os
.strerror(-result
))
1105 # TODO: change to much smaller size once GROWSDOWN is implemented
1106 DEFAULT_INIT_STACK_SZ
= 4 << 20
1109 def load_elf(mem
, elf_file
, args
=(), env
=(), stack_size
=DEFAULT_INIT_STACK_SZ
):
1110 if not isinstance(mem
, MemMMap
):
1111 raise TypeError("MemMMap required to load ELFs")
1112 if not isinstance(elf_file
, ELFFile
):
1114 if elf_file
.header
['e_type'] != 'ET_EXEC':
1115 raise NotImplementedError("dynamic binaries aren't implemented")
1116 fd
= elf_file
.stream
.fileno()
1118 for segment
in elf_file
.iter_segments():
1119 if segment
.header
['p_type'] in ('PT_DYNAMIC', 'PT_INTERP'):
1120 raise NotImplementedError("dynamic binaries aren't implemented")
1121 elif segment
.header
['p_type'] == 'PT_LOAD':
1122 flags
= segment
.header
['p_flags']
1123 offset
= segment
.header
['p_offset']
1124 vaddr
= segment
.header
['p_vaddr']
1125 filesz
= segment
.header
['p_filesz']
1126 memsz
= segment
.header
['p_memsz']
1127 align
= segment
.header
['p_align']
1128 if align
!= 0x10000:
1129 raise NotImplementedError("non-default ELF segment alignment")
1130 if align
< MMAP_PAGE_SIZE
:
1131 raise NotImplementedError("align less than MMAP_PAGE_SIZE")
1132 prot
= ppc_flags
.PROT_NONE
1133 if flags
& P_FLAGS
.PF_R
:
1134 prot |
= ppc_flags
.PROT_READ
1135 if flags
& P_FLAGS
.PF_W
:
1136 prot |
= ppc_flags
.PROT_WRITE
1137 if flags
& P_FLAGS
.PF_X
:
1138 prot |
= ppc_flags
.PROT_EXEC
1139 # align start to page
1140 adj
= offset
% MMAP_PAGE_SIZE
1146 # page-align, rounding up
1148 filesz
+ MMAP_PAGE_SIZE
- 1) & ~
(MMAP_PAGE_SIZE
- 1)
1149 # page-align, rounding up
1151 memsz
+ MMAP_PAGE_SIZE
- 1) & ~
(MMAP_PAGE_SIZE
- 1)
1152 page_end_init_needed
= filesz
< memsz
and filesz
< filesz_aligned
1153 zero_pages_needed
= memsz
> filesz_aligned
1154 adj_prot
= prot
# adjust prot for initialization
1155 if page_end_init_needed
:
1156 # we need to initialize trailing bytes to zeros,
1157 # so we need write access
1158 adj_prot |
= ppc_flags
.PROT_WRITE
1159 flags
= ppc_flags
.MAP_FIXED_NOREPLACE | ppc_flags
.MAP_PRIVATE
1160 result
= mem
.mmap_syscall(
1161 vaddr
, filesz
, adj_prot
, flags
, fd
, offset
, is_mmap2
=False)
1162 raise_if_syscall_err(result
)
1163 if page_end_init_needed
:
1164 page_end
= mem
.get_ctypes(
1165 vaddr
+ filesz
, filesz_aligned
- filesz
, True)
1166 ctypes
.memset(page_end
, 0, len(page_end
))
1167 if zero_pages_needed
:
1168 result
= mem
.mmap_syscall(
1169 vaddr
+ filesz_aligned
, memsz
- filesz_aligned
,
1170 prot
, flags
, fd
=-1, offset
=0, is_mmap2
=False)
1171 raise_if_syscall_err(result
)
1172 heap_start
= max(heap_start
, vaddr
+ memsz_aligned
)
1174 log("ignoring ELF segment of type " + segment
.header
['p_type'])
1175 # page-align stack_size, rounding up
1176 stack_size
= (stack_size
+ MMAP_PAGE_SIZE
- 1) & ~
(MMAP_PAGE_SIZE
- 1)
1177 stack_top
= _USER_SPACE_SIZE
1178 stack_low
= stack_top
- stack_size
1179 prot
= ppc_flags
.PROT_READ | ppc_flags
.PROT_WRITE
1180 flags
= ppc_flags
.MAP_FIXED_NOREPLACE | ppc_flags
.MAP_PRIVATE
1181 result
= mem
.mmap_syscall(
1182 stack_low
, stack_size
, prot
, flags
, fd
=-1, offset
=0, is_mmap2
=False)
1183 raise_if_syscall_err(result
)
1186 raise NotImplementedError("allocate argv on the stack")
1190 raise NotImplementedError("allocate envp on the stack")
1195 mem
.heap_range
= range(heap_start
, heap_start
) # empty heap to start
1197 # FIXME: incorrect, should point to the aux vector allocated on the stack
1200 # make space for red zone, 512 bytes specified in
1201 # 64-bit ELF V2 ABI Specification v1.5 section 2.2.3.4
1202 # https://files.openpower.foundation/s/cfA2oFPXbbZwEBK
1206 stack_top
-= stack_top
% 16
1208 # TODO: dynamically-linked binaries need to use the entry-point of ld.so
1209 pc
= elf_file
.header
['e_entry']
1211 gprs
[3] = len(args
) # argc
1215 gprs
[7] = 0 # termination function pointer
1218 return LoadedELF(elf_file
, pc
, gprs
, fpscr
)