uint8 getByte(int whichByte) const;
const uint8* getData(int offset, int len) const;
void setByte(int whichByte, uint8 data);
+ const uint8* getBlock() const;
+ uint8* copyData(uint8* dest, int offset, int size) const;
+ void setBlock(uint8* data) { setData(data, 0, System::getBlockSizeBytes()); }
void setData(uint8* data, int offset, int len);
void copyPartial(const DataBlock & dblk, int offset, int len);
bool equal(const DataBlock& obj) const;
setData(&dblk.m_data[offset], offset, len);
}
+inline
+const uint8* DataBlock::getBlock() const
+{
+ return m_data;
+}
+
+inline
+uint8* DataBlock::copyData(uint8* dest, int offset, int size) const
+{
+ assert(offset + size <= RubySystem::getBlockSizeBytes());
+ memcpy(dest, m_data + offset, size);
+ return dest;
+}
+
+
// ******************* Definitions *******************
// Output operator definition
return "LD";
case RubyRequestType_ST:
return "ST";
- case RubyRequestType_RMW:
- return "RMW";
+ case RubyRequestType_RMW_Read:
+ return "RMW_Read";
+ case RubyRequestType_RMW_Write:
+ return "RMW_Write";
case RubyRequestType_NULL:
default:
assert(0);
return RubyRequestType_LD;
else if (str == "ST")
return RubyRequestType_ST;
- else if (str == "RMW")
- return RubyRequestType_RMW;
+ else if (str == "RMW_Read")
+ return RubyRequestType_RMW_Read;
+ else if (str == "RMW_Write")
+ return RubyRequestType_RMW_Write;
else
assert(0);
return RubyRequestType_NULL;
RubyRequestType_IFETCH,
RubyRequestType_LD,
RubyRequestType_ST,
- RubyRequestType_RMW
+ RubyRequestType_RMW_Read,
+ RubyRequestType_RMW_Write
};
enum RubyAccessMode {
void setMemoryValue(const Address& addr, char* value,
unsigned int size_in_bytes );
+ void setLocked (const Address& addr, int context);
+ void clearLocked (const Address& addr);
+ bool isLocked (const Address& addr, int context);
// Print cache contents
void print(ostream& out) const;
void printData(ostream& out) const;
// The first index is the # of cache lines.
// The second index is the the amount associativity.
Vector<Vector<AbstractCacheEntry*> > m_cache;
+ Vector<Vector<int> > m_locked;
AbstractReplacementPolicy *m_replacementPolicy_ptr;
assert(false);
m_cache.setSize(m_cache_num_sets);
+ m_locked.setSize(m_cache_num_sets);
for (int i = 0; i < m_cache_num_sets; i++) {
m_cache[i].setSize(m_cache_assoc);
+ m_locked[i].setSize(m_cache_assoc);
for (int j = 0; j < m_cache_assoc; j++) {
m_cache[i][j] = NULL;
+ m_locked[i][j] = -1;
}
}
}
m_cache[cacheSet][i] = entry; // Init entry
m_cache[cacheSet][i]->m_Address = address;
m_cache[cacheSet][i]->m_Permission = AccessPermission_Invalid;
+ m_locked[cacheSet][i] = -1;
m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime());
if (location != -1){
delete m_cache[cacheSet][location];
m_cache[cacheSet][location] = NULL;
+ m_locked[cacheSet][location] = -1;
}
}
{
assert(address == line_address(address));
lookup(address).m_Permission = new_perm;
+ m_locked[cacheSet][loc] = -1;
assert(getPermission(address) == new_perm);
}
// entry = lookup(line_address(addr));
}
+inline
+void
+CacheMemory::setLocked(const Address& address, int context)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ m_locked[cacheSet][loc] = context;
+}
+
+inline
+void
+CacheMemory::clearLocked(const Address& address)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ m_locked[cacheSet][loc] = -1;
+}
+
+inline
+bool
+CacheMemory::isLocked(const Address& address, int context)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ return m_locked[cacheSet][loc] == context;
+}
+
#endif //CACHEMEMORY_H
Address line_addr(ruby_request.paddr);
line_addr.makeLineAddress();
if ((ruby_request.type == RubyRequestType_ST) ||
- (ruby_request.type == RubyRequestType_RMW)) {
+ (ruby_request.type == RubyRequestType_RMW_Read) ||
+ (ruby_request.type == RubyRequestType_RMW_Write)) {
m_writeRequestTable.deallocate(line_addr);
} else {
m_readRequestTable.deallocate(line_addr);
removeRequest(request);
assert((request->ruby_request.type == RubyRequestType_ST) ||
- (request->ruby_request.type == RubyRequestType_RMW));
+ (request->ruby_request.type == RubyRequestType_RMW_Read) ||
+ (request->ruby_request.type == RubyRequestType_RMW_Write));
+ // POLINA: the assumption is that atomics are only on data cache and not instruction cache
+ if (request->ruby_request.type == RubyRequestType_RMW_Read) {
+ m_dataCache_ptr->setLocked(address, m_version);
+ }
+ else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
+ if (m_dataCache_ptr->isLocked(address, m_version)) {
+ // if we are holding the lock for this
+ request->ruby_request.atomic_success = true;
+ m_dataCache_ptr->clearLocked(address);
+ }
+ else {
+ // if we are not holding the lock for this
+ request->ruby_request.atomic_success = false;
+ }
+
+ // can have livelock
+ }
hitCallback(request, data);
}
case RubyRequestType_ST:
ctype = CacheRequestType_ST;
break;
- case RubyRequestType_RMW:
+ case RubyRequestType_RMW_Read:
+ ctype = CacheRequestType_ATOMIC;
+ break;
+ case RubyRequestType_RMW_Write:
ctype = CacheRequestType_ATOMIC;
break;
default: