public:
MultiLevelPageTable(const std::string &__name, uint64_t _pid,
- System *_sys, Addr pageSize) :
- EmulationPageTable(__name, _pid, pageSize), system(_sys)
+ System *_sys, Addr _pageSize) :
+ EmulationPageTable(__name, _pid, _pageSize), system(_sys)
{}
~MultiLevelPageTable() {}
if (shared)
return;
- _basePtr = prepTopTable<EntryTypes...>(system, pageSize);
+ _basePtr = prepTopTable<EntryTypes...>(system, _pageSize);
}
Addr basePtr() { return _basePtr; }
Final entry;
- for (int64_t offset = 0; offset < size; offset += pageSize) {
- walk<EntryTypes...>(system, pageSize, _basePtr,
+ for (int64_t offset = 0; offset < size; offset += _pageSize) {
+ walk<EntryTypes...>(system, _pageSize, _basePtr,
vaddr + offset, true, &entry);
entry.reset(paddr + offset, true, flags & Uncacheable,
Final old_entry, new_entry;
- for (int64_t offset = 0; offset < size; offset += pageSize) {
+ for (int64_t offset = 0; offset < size; offset += _pageSize) {
// Unmap the original mapping.
- walk<EntryTypes...>(system, pageSize, _basePtr, vaddr + offset,
+ walk<EntryTypes...>(system, _pageSize, _basePtr, vaddr + offset,
false, &old_entry);
old_entry.present(false);
old_entry.write(system->physProxy);
// Map the new one.
- walk<EntryTypes...>(system, pageSize, _basePtr, new_vaddr + offset,
- true, &new_entry);
+ walk<EntryTypes...>(system, _pageSize, _basePtr,
+ new_vaddr + offset, true, &new_entry);
new_entry.reset(old_entry.paddr(), true, old_entry.uncacheable(),
old_entry.readonly());
new_entry.write(system->physProxy);
Final entry;
- for (int64_t offset = 0; offset < size; offset += pageSize) {
- walk<EntryTypes...>(system, pageSize, _basePtr,
+ for (int64_t offset = 0; offset < size; offset += _pageSize) {
+ walk<EntryTypes...>(system, _pageSize, _basePtr,
vaddr + offset, false, &entry);
fatal_if(!entry.present(),
"PageTable::unmap: Address %#x not mapped.", vaddr);
pTable.emplace(vaddr, Entry(paddr, flags));
}
- size -= pageSize;
- vaddr += pageSize;
- paddr += pageSize;
+ size -= _pageSize;
+ vaddr += _pageSize;
+ paddr += _pageSize;
}
}
pTable.emplace(new_vaddr, old_it->second);
pTable.erase(old_it);
- size -= pageSize;
- vaddr += pageSize;
- new_vaddr += pageSize;
+ size -= _pageSize;
+ vaddr += _pageSize;
+ new_vaddr += _pageSize;
}
}
auto it = pTable.find(vaddr);
assert(it != pTable.end());
pTable.erase(it);
- size -= pageSize;
- vaddr += pageSize;
+ size -= _pageSize;
+ vaddr += _pageSize;
}
}
// starting address must be page aligned
assert(pageOffset(vaddr) == 0);
- for (int64_t offset = 0; offset < size; offset += pageSize)
+ for (int64_t offset = 0; offset < size; offset += _pageSize)
if (pTable.find(vaddr + offset) != pTable.end())
return false;
if (!translate(req->getVaddr(), paddr))
return Fault(new GenericPageTableFault(req->getVaddr()));
req->setPaddr(paddr);
- if ((paddr & (pageSize - 1)) + req->getSize() > pageSize) {
+ if ((paddr & (_pageSize - 1)) + req->getSize() > _pageSize) {
panic("Request spans page boundaries!\n");
return NoFault;
}
typedef PTable::iterator PTableItr;
PTable pTable;
- const Addr pageSize;
+ const Addr _pageSize;
const Addr offsetMask;
const uint64_t _pid;
EmulationPageTable(
const std::string &__name, uint64_t _pid, Addr _pageSize) :
- pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
+ _pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
_pid(_pid), _name(__name), shared(false)
{
- assert(isPowerOf2(pageSize));
+ assert(isPowerOf2(_pageSize));
}
uint64_t pid() const { return _pid; };
Addr pageAlign(Addr a) { return (a & ~offsetMask); }
Addr pageOffset(Addr a) { return (a & offsetMask); }
+ // Page size can technically vary based on the virtual address, but we'll
+ // ignore that for now.
+ Addr pageSize() { return _pageSize; }
/**
* Maps a virtual memory region to a physical memory region.
Addr max_stack_size, Addr next_thread_stack_base,
Addr mmap_end)
: _ownerProcess(owner),
- _pageBytes(owner->system->getPageBytes()), _brkPoint(brk_point),
+ _pageBytes(owner->pTable->pageSize()), _brkPoint(brk_point),
_stackBase(stack_base), _stackSize(max_stack_size),
_maxStackSize(max_stack_size), _stackMin(stack_base - max_stack_size),
_nextThreadStackBase(next_thread_stack_base),
void
Process::allocateMem(Addr vaddr, int64_t size, bool clobber)
{
- int npages = divCeil(size, (int64_t)system->getPageBytes());
+ int npages = divCeil(size, pTable->pageSize());
Addr paddr = system->allocPhysPages(npages);
pTable->map(vaddr, paddr, size,
clobber ? EmulationPageTable::Clobber :
new_paddr = system->allocPhysPages(1);
// Read from old physical page.
- uint8_t *buf_p = new uint8_t[system->getPageBytes()];
- old_tc->getVirtProxy().readBlob(vaddr, buf_p, system->getPageBytes());
+ uint8_t buf_p[pTable->pageSize()];
+ old_tc->getVirtProxy().readBlob(vaddr, buf_p, sizeof(buf_p));
// Create new mapping in process address space by clobbering existing
// mapping (if any existed) and then write to the new physical page.
bool clobber = true;
- pTable->map(vaddr, new_paddr, system->getPageBytes(), clobber);
- new_tc->getVirtProxy().writeBlob(vaddr, buf_p, system->getPageBytes());
- delete[] buf_p;
+ pTable->map(vaddr, new_paddr, sizeof(buf_p), clobber);
+ new_tc->getVirtProxy().writeBlob(vaddr, buf_p, sizeof(buf_p));
}
bool
// Determine how large the interpreters footprint will be in the process
// address space.
- Addr interp_mapsize = roundUp(interp->mapSize(), system->getPageBytes());
+ Addr interp_mapsize = roundUp(interp->mapSize(), pTable->pageSize());
// We are allocating the memory area; set the bias to the lowest address
// in the allocated memory region.
SyscallReturn
getpagesizeFunc(SyscallDesc *desc, ThreadContext *tc)
{
- return (int)tc->getSystemPtr()->getPageBytes();
+ return (int)tc->getProcessPtr()->pTable->pageSize();
}
// access them again.
auto p = tc->getProcessPtr();
- if (start & (tc->getSystemPtr()->getPageBytes() - 1) || !length) {
+ if (p->pTable->pageOffset(start))
return -EINVAL;
- }
- length = roundUp(length, tc->getSystemPtr()->getPageBytes());
+ length = roundUp(length, p->pTable->pageSize());
p->memState->unmapRegion(start, length);
GuestABI::VarArgs<uint64_t> varargs)
{
auto p = tc->getProcessPtr();
- Addr page_bytes = tc->getSystemPtr()->getPageBytes();
+ Addr page_bytes = p->pTable->pageSize();
uint64_t provided_address = 0;
bool use_provided_address = flags & OS::TGT_MREMAP_FIXED;
int tgt_flags, int tgt_fd, typename OS::off_t offset)
{
auto p = tc->getProcessPtr();
- Addr page_bytes = tc->getSystemPtr()->getPageBytes();
+ Addr page_bytes = p->pTable->pageSize();
if (start & (page_bytes - 1) ||
offset & (page_bytes - 1) ||
Addr start, typename OS::size_t length, int prot,
int tgt_flags, int tgt_fd, typename OS::off_t offset)
{
+ auto page_size = tc->getProcessPtr()->pTable->pageSize();
return mmapFunc<OS>(desc, tc, start, length, prot, tgt_flags,
- tgt_fd, offset * tc->getSystemPtr()->getPageBytes());
+ tgt_fd, offset * page_size);
}
/// Target getrlimit() handler.