+ # something else could be mmap-ped in the middle of the heap,
+ # be careful...
+
+ block = None
+ if len_(self.heap_range) != 0:
+ for b in self.__mmap_emu_alloc_blocks:
+ # we check for the end matching so we get the last heap block
+ # if the heap was split.
+ # the heap must not be a file mapping.
+ # the heap must not be shared, and must be RW
+ if b.addrs.stop == self.heap_range.stop and b.file is None \
+ and b.flags == MMapPageFlags.RW:
+ block = b
+ break
+
+ if block is not None and addr < block.addrs.start:
+ # heap was split by something, we can't shrink beyond
+ # the start of the last heap block
+ return self.heap_range.stop # don't change heap
+
+ if block is not None and addr == block.addrs.start:
+ # unmap heap block
+ if self.__mmap_emu_unmap(block) < 0:
+ block = None # can't unmap heap block
+ elif addr > self.heap_range.stop and block is None:
+ # map new heap block
+ try:
+ addrs = range(self.heap_range.stop, addr)
+ block = MMapEmuBlock(addrs, flags=MMapPageFlags.RW)
+ if not self.__mmap_emu_map_fixed(block,
+ replace=False, dry_run=True):
+ block = None
+ elif 0 != self.__mmap_emu_zero_block(block):
+ block = None
+ else:
+ self.__mmap_emu_map_fixed(block,
+ replace=False, dry_run=False)
+ except (MemException, ValueError):
+ # caller could pass in invalid size, catch that
+ block = None
+ elif block is not None: # resize block
+ try:
+ block = self.__mmap_emu_resize_map_fixed(
+ block, addr - block.addrs.start)
+ except (MemException, ValueError):
+ # caller could pass in invalid size, catch that
+ block = None
+
+ if block is None and addr != self.heap_range.start:
+ # can't resize heap block
+ return self.heap_range.stop # don't change heap
+
+ # success! assign new heap_range
+ self.heap_range = range(self.heap_range.start, addr)
+ return self.heap_range.stop # return new brk address