/*
- * Copyright (c) 2010-2012 ARM Limited
+ * Copyright (c) 2010-2012,2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* Andreas Hansson
*/
+#include "mem/abstract_mem.hh"
+
#include <vector>
+#include "arch/locked_mem.hh"
#include "cpu/base.hh"
#include "cpu/thread_context.hh"
#include "debug/LLSC.hh"
#include "debug/MemoryAccess.hh"
-#include "mem/abstract_mem.hh"
#include "mem/packet_access.hh"
#include "sim/system.hh"
AbstractMemory::AbstractMemory(const Params *p) :
MemObject(p), range(params()->range), pmemAddr(NULL),
confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
- _system(NULL)
+ kvmMap(p->kvm_map), _system(NULL)
{
}
void
AbstractMemory::regStats()
{
+ MemObject::regStats();
+
using namespace Stats;
assert(system());
if (i->addr == paddr) {
DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
i->contextId, paddr);
- // For ARM, a spinlock would typically include a Wait
- // For Event (WFE) to conserve energy. The ARMv8
- // architecture specifies that an event is
- // automatically generated when clearing the exclusive
- // monitor to wake up the processor in WFE.
- system()->getThreadContext(i->contextId)->getCpuPtr()->wakeup();
+ ContextID owner_cid = i->contextId;
+ ContextID requester_cid = pkt->req->contextId();
+ if (owner_cid != requester_cid) {
+ ThreadContext* ctx = system()->getThreadContext(owner_cid);
+ TheISA::globalClearExclusive(ctx);
+ }
i = lockedAddrList.erase(i);
} else {
i++;
void
AbstractMemory::access(PacketPtr pkt)
{
- assert(AddrRange(pkt->getAddr(),
- pkt->getAddr() + pkt->getSize() - 1).isSubset(range));
-
- if (pkt->memInhibitAsserted()) {
- DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
+ if (pkt->cacheResponding()) {
+ DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
pkt->getAddr());
return;
}
+ if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
+ DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
+ pkt->getAddr());
+ return;
+ }
+
+ assert(AddrRange(pkt->getAddr(),
+ pkt->getAddr() + (pkt->getSize() - 1)).isSubset(range));
+
uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
if (pkt->cmd == MemCmd::SwapReq) {
- std::vector<uint8_t> overwrite_val(pkt->getSize());
- uint64_t condition_val64;
- uint32_t condition_val32;
-
- if (!pmemAddr)
- panic("Swap only works if there is real memory (i.e. null=False)");
-
- bool overwrite_mem = true;
- // keep a copy of our possible write value, and copy what is at the
- // memory address into the packet
- std::memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(),
- pkt->getSize());
- std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
-
- if (pkt->req->isCondSwap()) {
- if (pkt->getSize() == sizeof(uint64_t)) {
- condition_val64 = pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
- sizeof(uint64_t));
- } else if (pkt->getSize() == sizeof(uint32_t)) {
- condition_val32 = (uint32_t)pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
- sizeof(uint32_t));
- } else
- panic("Invalid size for conditional read/write\n");
- }
+ if (pkt->isAtomicOp()) {
+ if (pmemAddr) {
+ memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+ (*(pkt->getAtomicOp()))(hostAddr);
+ }
+ } else {
+ std::vector<uint8_t> overwrite_val(pkt->getSize());
+ uint64_t condition_val64;
+ uint32_t condition_val32;
+
+ if (!pmemAddr)
+ panic("Swap only works if there is real memory (i.e. null=False)");
+
+ bool overwrite_mem = true;
+ // keep a copy of our possible write value, and copy what is at the
+ // memory address into the packet
+ std::memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(),
+ pkt->getSize());
+ std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+
+ if (pkt->req->isCondSwap()) {
+ if (pkt->getSize() == sizeof(uint64_t)) {
+ condition_val64 = pkt->req->getExtraData();
+ overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
+ sizeof(uint64_t));
+ } else if (pkt->getSize() == sizeof(uint32_t)) {
+ condition_val32 = (uint32_t)pkt->req->getExtraData();
+ overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
+ sizeof(uint32_t));
+ } else
+ panic("Invalid size for conditional read/write\n");
+ }
- if (overwrite_mem)
- std::memcpy(hostAddr, &overwrite_val[0], pkt->getSize());
+ if (overwrite_mem)
+ std::memcpy(hostAddr, &overwrite_val[0], pkt->getSize());
- assert(!pkt->req->isInstFetch());
- TRACE_PACKET("Read/Write");
- numOther[pkt->req->masterId()]++;
+ assert(!pkt->req->isInstFetch());
+ TRACE_PACKET("Read/Write");
+ numOther[pkt->req->masterId()]++;
+ }
} else if (pkt->isRead()) {
assert(!pkt->isWrite());
if (pkt->isLLSC()) {
+ assert(!pkt->fromCache());
+ // if the packet is not coming from a cache then we have
+ // to do the LL/SC tracking here
trackLoadLocked(pkt);
}
if (pmemAddr)
bytesRead[pkt->req->masterId()] += pkt->getSize();
if (pkt->req->isInstFetch())
bytesInstRead[pkt->req->masterId()] += pkt->getSize();
- } else if (pkt->isInvalidate()) {
+ } else if (pkt->isInvalidate() || pkt->isClean()) {
+ assert(!pkt->isWrite());
+ // in a fastmem system invalidating and/or cleaning packets
+ // can be seen due to cache maintenance requests
+
// no need to do anything
- // this clause is intentionally before the write clause: the only
- // transaction that is both a write and an invalidate is
- // WriteInvalidate, and for the sake of consistency, it does not
- // write to memory. in a cacheless system, there are no WriteInv's
- // because the Write -> WriteInvalidate rewrite happens in the cache.
} else if (pkt->isWrite()) {
if (writeOK(pkt)) {
if (pmemAddr) {
memcpy(hostAddr, pkt->getConstPtr<uint8_t>(), pkt->getSize());
- DPRINTF(MemoryAccess, "%s wrote %x bytes to address %x\n",
+ DPRINTF(MemoryAccess, "%s wrote %i bytes to address %x\n",
__func__, pkt->getSize(), pkt->getAddr());
}
assert(!pkt->req->isInstFetch());
bytesWritten[pkt->req->masterId()] += pkt->getSize();
}
} else {
- panic("unimplemented");
+ panic("Unexpected packet %s", pkt->print());
}
if (pkt->needsResponse()) {