Check*
CheckTable::getCheck(const Address& address)
{
- DEBUG_MSG(TESTER_COMP, MedPrio, "Looking for check by address");
- DEBUG_EXPR(TESTER_COMP, MedPrio, address);
+ DPRINTF(RubyTest, "Looking for check by address: %s", address);
m5::hash_map<Address, Check*>::iterator i = m_lookup_map.find(address);
TraceFlag('LLSC')
TraceFlag('MMU')
TraceFlag('MemoryAccess')
-TraceFlag('Ruby')
+
+TraceFlag('RubyCache')
+TraceFlag('RubyDma')
+TraceFlag('RubyGenerated')
+TraceFlag('RubyMemory')
+TraceFlag('RubyNetwork')
+TraceFlag('RubyQueue')
+TraceFlag('RubyPort')
+TraceFlag('RubySlicc')
+TraceFlag('RubyStorebuffer')
+TraceFlag('RubyTester')
+
+CompoundFlag('Ruby', [ 'RubyQueue', 'RubyNetwork', 'RubyTester',
+ 'RubyGenerated', 'RubySlicc', 'RubyStorebuffer', 'RubyCache',
+ 'RubyMemory', 'RubyDma'])
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
- DEBUG_EXPR(address);
- //DEBUG_EXPR(out_msg.Destination);
+ DPRINTF(RubySlicc, "address: %s, destination: %s\n",
+ address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
- DEBUG_EXPR(address);
- //DEBUG_EXPR(out_msg.Destination);
+ DPRINTF(RubySlicc, "address: %s, destination: %s\n",
+ address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
- //DEBUG_EXPR(machineID);
+ DPRINTF(RubySlicc, "%s\n", machineID);
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
- DEBUG_EXPR(address);
- //DEBUG_EXPR(out_msg.Destination);
+ DPRINTF(RubySlicc, "address: %s, destination: %s\n",
+ address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
- DEBUG_EXPR(address);
- //DEBUG_EXPR(out_msg.Destination);
+ DPRINTF(RubySlicc, "address: %s, destination: %s\n",
+ address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Response_Control;
- DEBUG_EXPR(address);
+ DPRINTF(RubySlicc, "%s\n", address);
}
}
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits));
out_msg.MessageSize := MessageSizeType:Response_Control;
- DEBUG_EXPR(address);
+ DPRINTF(RubySlicc, "%s\n", address);
}
}
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
- //DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk);
sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
- //DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk);
sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
getL1CacheEntry(address).Dirty := true;
}
}
void addSharer(Address addr, MachineID requestor) {
- //DEBUG_EXPR(machineID);
- //DEBUG_EXPR(requestor);
- //DEBUG_EXPR(addr);
+ DPRINTF(RubySlicc, "machineID: %s, requestor: %s, address: %s\n",
+ machineID, requestor, addr);
getL2CacheEntry(addr).Sharers.add(requestor);
}
return Event:L1_PUTX_old;
}
} else {
- DEBUG_EXPR(addr);
- DEBUG_EXPR(type);
+ DPRINTF(RubySlicc, "address: %s, Request Type: %s\n", addr, type);
error("Invalid L1 forwarded request type");
}
}
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
if(L1unblockNetwork_in.isReady()) {
peek(L1unblockNetwork_in, ResponseMsg) {
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(getState(in_msg.Address));
- DEBUG_EXPR(in_msg.Sender);
- DEBUG_EXPR(in_msg.Type);
- DEBUG_EXPR(in_msg.Destination);
+ DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
+ in_msg.Address, getState(in_msg.Address), in_msg.Sender,
+ in_msg.Type, in_msg.Destination);
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
if(L1RequestIntraChipL2Network_in.isReady()) {
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- DEBUG_EXPR(in_msg.Address);
- //DEBUG_EXPR(id);
- DEBUG_EXPR(getState(in_msg.Address));
- //DEBUG_EXPR(in_msg.Requestor);
- DEBUG_EXPR(in_msg.Type);
- //DEBUG_EXPR(in_msg.Destination);
+ DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
+ in_msg.Address, getState(in_msg.Address), in_msg.Requestor,
+ in_msg.Type, in_msg.Destination);
assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
assert(in_msg.Destination.isElement(machineID));
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID);
- //DEBUG_EXPR(out_msg.Destination);
+ DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
out_msg.Dirty := getL2CacheEntry(address).Dirty;
- DEBUG_EXPR(out_msg.Address);
- //DEBUG_EXPR(out_msg.Destination);
- //DEBUG_EXPR(out_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Destination: %s, DataBlock: %s\n",
+ out_msg.Address, out_msg.Destination, out_msg.DataBlk);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address));
} else {
- DEBUG_EXPR(in_msg);
+ DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
}
}
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:CleanReplacement, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
out_msg.Prefetch := in_msg.Prefetch;
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ in_msg.Address, in_msg.DataBlk);
}
}
//added by SS for dma
out_msg.OriginalRequestorMachId := machineID;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
sequencer.readCallback(address,
GenericMachineType:L1Cache,
getCacheEntry(address).DataBlk);
action(rx_load_hit, "rx", desc="External load completed.") {
peek(responseNetwork_in, ResponseMsg) {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
sequencer.readCallback(address,
getNondirectHitMachType(in_msg.Sender),
getCacheEntry(address).DataBlk);
}
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
sequencer.writeCallback(address,
GenericMachineType:L1Cache,
getCacheEntry(address).DataBlk);
action(sx_store_hit, "sx", desc="External store completed.") {
peek(responseNetwork_in, ResponseMsg) {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
sequencer.writeCallback(address,
getNondirectHitMachType(in_msg.Sender),
getCacheEntry(address).DataBlk);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc,"%s\n", in_msg.Type);
error("Invalid message");
}
}
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
//out_msg.OriginalRequestorMachId := machineID;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc,"%s\n", out_msg);
}
}
}
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
- DEBUG_EXPR("MRM_DEBUG: L1 received");
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
trigger(Event:Own_GETX, in_msg.Address);
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- DEBUG_EXPR("Sending data to L2");
- DEBUG_EXPR(in_msg.Address);
+ DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Address);
}
else {
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
}
- DEBUG_EXPR("Sending data to L1");
+ DPRINTF(RubySlicc, "Sending data to L1\n");
}
}
}
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- DEBUG_EXPR("Sending exclusive data to L2");
+ DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
}
else {
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
}
- DEBUG_EXPR("Sending exclusive data to L1");
+ DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
}
}
}
}
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
sequencer.readCallback(address, getCacheEntry(address).DataBlk);
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
getCacheEntry(address).Dirty := true;
}
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
peek(responseToL1Cache_in, ResponseMsg) {
- DEBUG_EXPR("MRM_DEBUG: L1 decrementNumberOfMessages");
- DEBUG_EXPR(in_msg.Acks);
+ DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
}
}
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, L2_TBEs[address].DataBlk);
}
action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") {
out_msg.Acks := L2_TBEs[address].Local_GETX_IntAcks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, L2_TBEs[address].DataBlk);
}
action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") {
out_msg.Acks := L2_TBEs[address].Fwd_GETX_ExtAcks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, L2_TBEs[address].DataBlk);
}
action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") {
out_msg.Acks := L2_TBEs[address].Fwd_GETX_ExtAcks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(L2_TBEs[address].DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, L2_TBEs[address].DataBlk);
}
action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") {
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
}
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getL2CacheEntry(address).DataBlk);
}
action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") {
out_msg.Acks := L2_TBEs[address].Local_GETX_IntAcks;
}
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getL2CacheEntry(address).DataBlk);
}
action(dd_sendDataToFwdGETX, "dd", desc="send data") {
out_msg.Acks := in_msg.Acks;
}
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getL2CacheEntry(address).DataBlk);
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
- DEBUG_EXPR(address);
- DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getL2CacheEntry(address).DataBlk);
}
action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") {
action(ee_sendLocalInv, "\ee", desc="Send local invalidates") {
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
- DEBUG_EXPR(address);
- DEBUG_EXPR(getLocalSharers(address));
- DEBUG_EXPR(L2_TBEs[address].NumIntPendingAcks);
+ DPRINTF(RubySlicc, "Address: %s, Local Sharers: %s, Pending Acks: %d\n",
+ address, getLocalSharers(address),
+ L2_TBEs[address].NumIntPendingAcks);
if (isLocalOwnerValid(address)) {
L2_TBEs[address].NumIntPendingAcks := L2_TBEs[address].NumIntPendingAcks + 1;
- DEBUG_EXPR(getLocalOwner(address));
+ DPRINTF(RubySlicc, "%s\n", getLocalOwner(address));
}
enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ in_msg.Address, in_msg.DataBlk);
}
}
action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ in_msg.Address, in_msg.DataBlk);
}
}
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
// Not used:
out_msg.ReadX := false;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
// Not used:
out_msg.ReadX := false;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ in_msg.Address, in_msg.DataBlk);
}
}
int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_ptr))";
int averageLatencyEstimate() {
- DEBUG_EXPR( (averageLatencyCounter >> averageLatencyHysteresis) );
+ DPRINTF(RubySlicc, "%d\n",
+ (averageLatencyCounter >> averageLatencyHysteresis));
//profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) );
return averageLatencyCounter >> averageLatencyHysteresis;
}
void updateAverageLatencyEstimate(int latency) {
- DEBUG_EXPR( latency );
+ DPRINTF(RubySlicc, "%d\n", latency);
assert(latency >= 0);
// By subtracting the current average and then adding the most
// Increment IssueCount
L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
- DEBUG_EXPR("incremented issue count");
- DEBUG_EXPR(L1_TBEs[address].IssueCount);
+ DPRINTF(RubySlicc, "incremented issue count to %d\n",
+ L1_TBEs[address].IssueCount);
// Set a wakeup timer
if (dynamic_timeout_enabled) {
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getCacheEntry(address).DataBlk);
sequencer.readCallback(address,
GenericMachineType:L1Cache,
}
action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getCacheEntry(address).DataBlk);
peek(responseNetwork_in, ResponseMsg) {
sequencer.readCallback(address,
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getCacheEntry(address).DataBlk);
sequencer.writeCallback(address,
GenericMachineType:L1Cache,
getCacheEntry(address).DataBlk);
getCacheEntry(address).Dirty := true;
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
}
action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(address);
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ address, getCacheEntry(address).DataBlk);
peek(responseNetwork_in, ResponseMsg) {
sequencer.writeCallback(address,
}
getCacheEntry(address).Dirty := true;
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
}
action(i_allocateTBE, "i", desc="Allocate TBE") {
action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Tokens != 0);
- DEBUG_EXPR("MRM_DEBUG L1 received tokens");
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.Tokens);
+ DPRINTF(RubySlicc, "L1 received tokens for address: %s, tokens: %d\n",
+ in_msg.Address, in_msg.Tokens);
getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens;
- DEBUG_EXPR(getCacheEntry(address).Tokens);
+ DPRINTF(RubySlicc, "%d\n", getCacheEntry(address).Tokens);
if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
getCacheEntry(address).Dirty := true;
} else if(type == CoherenceRequestType:GETX) {
return GenericRequestType:GETX;
} else {
- DEBUG_EXPR(type);
+ DPRINTF(RubySlicc, "%s\n", type);
error("invalid CoherenceRequestType");
}
}
} else if (in_msg.Type == CoherenceResponseType:INV) {
trigger(Event:L1_INV, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Unexpected message");
}
}
peek(requestNetwork_in, RequestMsg) {
if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
//profile_filter_action(1);
- DEBUG_EXPR("filtered message");
- DEBUG_EXPR(in_msg.RetryNum);
+ DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
+ in_msg.RetryNum);
}
else {
enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack_All_Tokens, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
} else {
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
out_msg.MessageSize := MessageSizeType:Request_Control;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
out_msg.DataBlk := TBEs[address].DataBlk;
// then add the dma write data
out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
out_msg.Type := MemoryRequestType:MEMORY_WB;
// first, initialize the data blk to the current version of system memory
out_msg.DataBlk := TBEs[address].DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ in_msg.Address, in_msg.DataBlk);
}
}
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := getCacheEntry(address).DataBlk;
- DEBUG_EXPR(out_msg.DataBlk);
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
out_msg.Dirty := getCacheEntry(address).Dirty;
if (in_msg.DirectedProbe) {
out_msg.Acks := machineCount(MachineType:L1Cache);
out_msg.Sender := machineID;
out_msg.Destination := in_msg.MergedRequestors;
out_msg.DataBlk := getCacheEntry(address).DataBlk;
- DEBUG_EXPR(out_msg.DataBlk);
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Acks := machineCount(MachineType:L1Cache);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
sequencer.readCallback(address,
testAndClearLocalHit(address),
}
action(hx_external_load_hit, "hx", desc="load required external msgs") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
peek(responseToCache_in, ResponseMsg) {
sequencer.readCallback(address,
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
peek(mandatoryQueue_in, CacheMsg) {
sequencer.writeCallback(address,
testAndClearLocalHit(address),
}
action(sx_external_store_hit, "sx", desc="store required external msgs.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
peek(responseToCache_in, ResponseMsg) {
sequencer.writeCallback(address,
}
action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
sequencer.writeCallback(address,
getNondirectHitMachType(address,
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
peek(responseToCache_in, ResponseMsg) {
assert(in_msg.Acks > 0);
- DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
- DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
TBEs[address].LastResponder := in_msg.Sender;
if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime);
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- DEBUG_EXPR(out_msg.Destination);
+ DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.Dirty := TBEs[address].Dirty;
if (in_msg.DirectedProbe) {
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination := in_msg.MergedRequestors;
- DEBUG_EXPR(out_msg.Destination);
+ DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.Dirty := TBEs[address].Dirty;
out_msg.Acks := machineCount(MachineType:L1Cache);
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
peek(responseToCache_in, ResponseMsg) {
- DEBUG_EXPR(getCacheEntry(address).DataBlk);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
+ getCacheEntry(address).DataBlk, in_msg.DataBlk);
assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
getCacheEntry(address).DataBlk := in_msg.DataBlk;
getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty;
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
- DEBUG_EXPR(in_msg.Type);
+ DPRINTF(RubySlicc, "%d\n", in_msg.Type);
error("Invalid message");
}
}
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
peek(responseToDir_in, ResponseMsg) {
assert(in_msg.Acks > 0);
- DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
//
// Note that cache data responses will have an ack count of 2. However,
// directory DMA requests must wait for acks from all LLC caches, so
// only decrement by 1.
//
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
- DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
}
}
action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
- DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
//
// Note that cache data responses will have an ack count of 2. However,
// directory DMA requests must wait for acks from all LLC caches, so
// only decrement by 1.
//
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
- DEBUG_EXPR(TBEs[address].NumPendingMsgs);
+ DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
}
}
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(out_msg.DataBlk);
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Acks := TBEs[address].Acks;
- DEBUG_EXPR(out_msg.Acks);
+ DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
assert(out_msg.Acks > 0);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
peek(responseToDir_in, ResponseMsg) {
getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ in_msg.Address, in_msg.DataBlk);
}
}
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
+ DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
+ in_msg.Address, in_msg.DataBlk);
}
}
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
- DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
- DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
- DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
}
action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
- DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
- DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
}
action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
out_msg.DataBlk := TBEs[address].DataBlk;
// then add the dma write data
out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
- DEBUG_EXPR(out_msg);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
sticky_vars.AddVariables(
BoolVariable('NO_VECTOR_BOUNDS_CHECKS', "Don't do bounds checks", True),
- BoolVariable('RUBY_DEBUG', "Add debugging stuff to Ruby", False),
('GEMS_ROOT', "Add debugging stuff to Ruby", Dir('..').srcnode().abspath),
)
-export_vars += [ 'NO_VECTOR_BOUNDS_CHECKS', 'RUBY_DEBUG', 'GEMS_ROOT' ]
+export_vars += [ 'NO_VECTOR_BOUNDS_CHECKS', 'GEMS_ROOT' ]
if (current_size + n <= m_max_size) {
return true;
} else {
- DEBUG_MSG(QUEUE_COMP, MedPrio, n);
- DEBUG_MSG(QUEUE_COMP, MedPrio, current_size);
- DEBUG_MSG(QUEUE_COMP, MedPrio, m_size);
- DEBUG_MSG(QUEUE_COMP, MedPrio, m_max_size);
+ DPRINTF(RubyQueue, "n: %d, current_size: %d, m_size: %d, "
+ "m_max_size: %d\n",
+ n, current_size, m_size, m_max_size);
m_not_avail_count++;
return false;
}
const Message*
MessageBuffer::peekAtHeadOfQueue() const
{
- DEBUG_NEWLINE(QUEUE_COMP, MedPrio);
-
- DEBUG_MSG(QUEUE_COMP, MedPrio,
- csprintf("Peeking at head of queue %s time: %d.",
- m_name, g_eventQueue_ptr->getTime()));
+ DPRINTF(RubyQueue, "Peeking at head of queue %s time: %lld\n",
+ m_name, g_eventQueue_ptr->getTime());
assert(isReady());
const Message* msg_ptr = m_prio_heap.front().m_msgptr.get();
assert(msg_ptr);
- DEBUG_EXPR(QUEUE_COMP, MedPrio, *msg_ptr);
- DEBUG_NEWLINE(QUEUE_COMP, MedPrio);
+ DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
return msg_ptr;
}
void
MessageBuffer::enqueue(MsgPtr message, Time delta)
{
- DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
- DEBUG_MSG(QUEUE_COMP, HighPrio,
- csprintf("enqueue %s time: %d.", m_name,
- g_eventQueue_ptr->getTime()));
- DEBUG_EXPR(QUEUE_COMP, MedPrio, message);
- DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
+ DPRINTF(RubyQueue, "Enqueue %s time: %lld, message: %s.\n",
+ m_name, g_eventQueue_ptr->getTime(), (*(message.get())));
m_msg_counter++;
m_size++;
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
- DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
- DEBUG_MSG(QUEUE_COMP, HighPrio,
- csprintf("enqueue %s with arrival_time %d cur_time: %d.",
- m_name, arrival_time, g_eventQueue_ptr->getTime()));
- DEBUG_EXPR(QUEUE_COMP, MedPrio, message);
- DEBUG_NEWLINE(QUEUE_COMP, HighPrio);
+ DPRINTF(RubyQueue, "Enqueue %s with arrival_time %lld cur_time: %lld, "
+ "message: %s.\n",
+ m_name, arrival_time, g_eventQueue_ptr->getTime(),
+ (*(message.get())));
// Schedule the wakeup
if (m_consumer_ptr != NULL) {
void
MessageBuffer::dequeue(MsgPtr& message)
{
- DEBUG_MSG(QUEUE_COMP, MedPrio, "dequeue from " + m_name);
+ DPRINTF(RubyQueue, "Dequeue from %s\n", m_name);
message = m_prio_heap.front().m_msgptr;
pop();
- DEBUG_EXPR(QUEUE_COMP, MedPrio, message);
+ DPRINTF(RubyQueue, "Enqueue message is %s\n", (*(message.get())));
}
int
void
MessageBuffer::pop()
{
- DEBUG_MSG(QUEUE_COMP, MedPrio, "pop from " + m_name);
+ DPRINTF(RubyQueue, "Pop from %s\n", m_name);
assert(isReady());
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
void
MessageBuffer::recycle()
{
- DEBUG_MSG(QUEUE_COMP, MedPrio, "recycling " + m_name);
+ DPRINTF(RubyQueue, "Recycling %s\n", m_name);
assert(isReady());
MessageBufferNode node = m_prio_heap.front();
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
void
MessageBuffer::reanalyzeMessages(const Address& addr)
{
- DEBUG_MSG(QUEUE_COMP, MedPrio, "reanalyzeMessages " + m_name);
+ DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", m_name);
assert(m_stall_msg_map.count(addr) > 0);
//
void
MessageBuffer::stallMessage(const Address& addr)
{
- DEBUG_MSG(QUEUE_COMP, MedPrio, "stalling " + m_name);
+ DPRINTF(RubyQueue, "Stalling %s\n", m_name);
assert(isReady());
assert(addr.getOffset() == 0);
MsgPtr message = m_prio_heap.front().m_msgptr;
return false; // no error
}
- if (RUBY_DEBUG == false) {
- cerr << "Error: User specified set of debug components, but the "
- << "RUBY_DEBUG compile-time flag is false." << endl
- << "Solution: Re-compile with RUBY_DEBUG set to true." << endl;
- return true; // error
- }
-
if (string(filter_str) == "all") {
return false; // no error
}
#include <string>
#include <vector>
-#include "config/ruby_debug.hh"
#include "mem/ruby/common/Global.hh"
#include "sim/sim_object.hh"
} \
} while (0)
-#define DEBUG_MSG(module, priority, MESSAGE) do { \
- using namespace std; \
- if (RUBY_DEBUG) { \
- if (g_debug_ptr->validDebug(module, priority)) { \
- (* debug_cout_ptr) << "Debug: in fn " \
- << __PRETTY_FUNCTION__ \
- << " in " << __FILE__ << ":" \
- << __LINE__ << ": " \
- << (MESSAGE) << endl << flush; \
- } \
- } \
-} while (0)
-
-#define DEBUG_EXPR(module, priority, EXPR) do { \
- using namespace std; \
- if (RUBY_DEBUG) { \
- if (g_debug_ptr->validDebug(module, priority)) { \
- (* debug_cout_ptr) << "Debug: in fn " \
- << __PRETTY_FUNCTION__ \
- << " in " << __FILE__ << ":" \
- << __LINE__ << ": " \
- << #EXPR << " is " \
- << (EXPR) << endl << flush; \
- } \
- } \
-} while (0)
-
-#define DEBUG_NEWLINE(module, priority) do { \
- using namespace std; \
- if (RUBY_DEBUG) { \
- if (g_debug_ptr->validDebug(module, priority)) { \
- (* debug_cout_ptr) << endl << flush; \
- } \
- } \
-} while (0)
-
-#define DEBUG_SLICC(priority, LINE, MESSAGE) do { \
- using namespace std; \
- if (RUBY_DEBUG) { \
- if (g_debug_ptr->validDebug(SLICC_COMP, priority)) { \
- (* debug_cout_ptr) << (LINE) << (MESSAGE) << endl << flush; \
- } \
- } \
-} while (0)
-
-#define DEBUG_OUT(rest... ) do { \
- using namespace std; \
- if (RUBY_DEBUG) { \
- cout << "Debug: in fn " \
- << __PRETTY_FUNCTION__ \
- << " in " << __FILE__ << ":" \
- << __LINE__ << ": "; \
- g_debug_ptr->debugMsg(rest); \
- } \
-} while (0)
-
#define ERROR_OUT( rest... ) do { \
using namespace std; \
if (ERROR_MESSAGE_FLAG) { \
~NetDest()
{
- DEBUG_MSG(MEMORY_COMP, LowPrio, "NetDest Destructor");
+ DPRINTF(RubyMemory, "NetDest Destructor\n");
}
void add(MachineID newElement);
void
NetworkInterface_d::wakeup()
{
- DEBUG_EXPR(NETWORK_COMP, MedPrio, m_id);
- DEBUG_MSG(NETWORK_COMP, MedPrio, "NI WOKE UP");
- DEBUG_EXPR(NETWORK_COMP, MedPrio, g_eventQueue_ptr->getTime());
+ DPRINTF(RubyNetwork, "m_id: %d woke up at time: %lld",
+ m_id, g_eventQueue_ptr->getTime());
MsgPtr msg_ptr;
void
Switch_d::wakeup()
{
- DEBUG_MSG(NETWORK_COMP, HighPrio, "Switch woke up");
- DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime());
+ DPRINTF(RubyNetwork, "Switch woke up at time: %lld\n",
+ g_eventQueue_ptr->getTime());
for (int inport = 0; inport < m_num_inports; inport++) {
if (!m_switch_buffer[inport]->isReady())
if (inNetLink->isReady()) {
flit *t_flit = inNetLink->consumeLink();
if (t_flit->get_type() == TAIL_ || t_flit->get_type() == HEAD_TAIL_) {
- DEBUG_EXPR(NETWORK_COMP, HighPrio, m_id);
- DEBUG_MSG(NETWORK_COMP, HighPrio, "Message got delivered");
- DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime());
+ DPRINTF(RubyNetwork, "m_id: %d, Message delivered at time: %lld\n",
+ m_id, g_eventQueue_ptr->getTime());
// When we are doing network only testing, the messages do not
// have to be buffered into the message buffers of the protocol
// checking the incoming link
if (m_in_link[incoming_port]->isReady()) {
- DEBUG_EXPR(NETWORK_COMP, HighPrio, m_id);
- DEBUG_EXPR(NETWORK_COMP, HighPrio, g_eventQueue_ptr->getTime());
+ DPRINTF(RubyNetwork, "m_id: %d, Time: %lld\n",
+ m_id, g_eventQueue_ptr->getTime());
t_flit = m_in_link[incoming_port]->peekLink();
routeCompute(t_flit, incoming_port);
m_in_link[incoming_port]->consumeLink();
void
PerfectSwitch::wakeup()
{
- DEBUG_EXPR(NETWORK_COMP, MedPrio, m_switch_id);
+ DPRINTF(RubyNetwork, "m_switch_id: %d\n",m_switch_id);
MsgPtr msg_ptr;
// Is there a message waiting?
while (m_in[incoming][vnet]->isReady()) {
- DEBUG_EXPR(NETWORK_COMP, MedPrio, incoming);
+ DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
// Peek at message
msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
- DEBUG_EXPR(NETWORK_COMP, MedPrio, *net_msg_ptr);
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
output_links.clear();
output_link_destinations.clear();
// pick the next link to look at
int link = m_link_order[i].m_link;
NetDest dst = m_routing_table[link];
- DEBUG_EXPR(NETWORK_COMP, MedPrio, dst);
+ DPRINTF(RubyNetwork, "dst: %s\n", dst);
if (!msg_dsts.intersectionIsNotEmpty(dst))
continue;
int outgoing = output_links[i];
if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
enough = false;
- DEBUG_MSG(NETWORK_COMP, HighPrio,
- "checking if node is blocked");
- DEBUG_EXPR(NETWORK_COMP, HighPrio, outgoing);
- DEBUG_EXPR(NETWORK_COMP, HighPrio, vnet);
- DEBUG_EXPR(NETWORK_COMP, HighPrio, enough);
+ DPRINTF(RubyNetwork, "Checking if node is blocked\n"
+ "outgoing: %d, vnet: %d, enough: %d\n",
+ outgoing, vnet, enough);
}
// There were not enough resources
if (!enough) {
g_eventQueue_ptr->scheduleEvent(this, 1);
- DEBUG_MSG(NETWORK_COMP, HighPrio,
- "Can't deliver message since a node is blocked");
- DEBUG_EXPR(NETWORK_COMP, HighPrio, *net_msg_ptr);
+ DPRINTF(RubyNetwork, "Can't deliver message since a node "
+ "is blocked\n"
+ "Message: %s\n", (*net_msg_ptr));
break; // go to next incoming port
}
output_link_destinations[i];
// Enqeue msg
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
- DEBUG_MSG(NETWORK_COMP, HighPrio,
- csprintf("switch: %d enqueuing net msg from "
- "inport[%d][%d] to outport [%d][%d] time: %d.",
+ DPRINTF(RubyNetwork, "Switch: %d enqueuing net msg from "
+ "inport[%d][%d] to outport [%d][%d] time: %lld.\n",
m_switch_id, incoming, vnet, outgoing, vnet,
- g_eventQueue_ptr->getTime()));
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ g_eventQueue_ptr->getTime());
m_out[outgoing][vnet]->enqueue(msg_ptr);
}
m_units_remaining[vnet] +=
network_message_to_size(net_msg_ptr);
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
- DEBUG_MSG(NETWORK_COMP, HighPrio,
- csprintf("throttle: %d my bw %d bw spent enqueueing "
- "net msg %d time: %d.",
+ DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
+ "enqueueing net msg %d time: %lld.\n",
m_node, getLinkBandwidth(), m_units_remaining[vnet],
- g_eventQueue_ptr->getTime()));
+ g_eventQueue_ptr->getTime());
// Move the message
m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
// Count the message
m_message_counters[net_msg_ptr->getMessageSize()][vnet]++;
- DEBUG_MSG(NETWORK_COMP,LowPrio,*m_out[vnet]);
- DEBUG_NEWLINE(NETWORK_COMP,HighPrio);
+ DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]);
}
// Calculate the amount of bandwidth we spent on this message
if (bw_remaining > 0 &&
(m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
!m_out[vnet]->areNSlotsAvailable(1)) {
- DEBUG_MSG(NETWORK_COMP,LowPrio,vnet);
+ DPRINTF(RubyNetwork, "vnet: %d", vnet);
// schedule me to wakeup again because I'm waiting for my
// output queue to become available
schedule_wakeup = true;
// We have extra bandwidth and our output buffer was
// available, so we must not have anything else to do until
// another message arrives.
- DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
- DEBUG_MSG(NETWORK_COMP, LowPrio, "not scheduled again");
+ DPRINTF(RubyNetwork, "%s not scheduled again\n", *this);
} else {
- DEBUG_MSG(NETWORK_COMP, LowPrio, *this);
- DEBUG_MSG(NETWORK_COMP, LowPrio, "scheduled again");
+ DPRINTF(RubyNetwork, "%s scheduled again\n", *this);
// We are out of bandwidth for this cycle, so wakeup next
// cycle and continue
}
}
- DEBUG_MSG(NETWORK_COMP, MedPrio, "returning shortest path");
- DEBUG_EXPR(NETWORK_COMP, MedPrio, (src-(2*max_machines)));
- DEBUG_EXPR(NETWORK_COMP, MedPrio, (next-(2*max_machines)));
- DEBUG_EXPR(NETWORK_COMP, MedPrio, src);
- DEBUG_EXPR(NETWORK_COMP, MedPrio, next);
- DEBUG_EXPR(NETWORK_COMP, MedPrio, result);
- DEBUG_NEWLINE(NETWORK_COMP, MedPrio);
+ DPRINTF(RubyNetwork, "Returning shortest path\n"
+ "(src-(2*max_machines)): %d, (next-(2*max_machines)): %d, "
+ "src: %d, next: %d, result: %s\n",
+ (src-(2*max_machines)), (next-(2*max_machines)),
+ src, next, result);
return result;
}
{
if (request_map.find(id) == request_map.end()) {
ERROR_OUT("Request ID not found in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ DPRINTF(RubyStorebuffer, "id: %lld\n", id);
ASSERT(0);
} else {
request_map[id]->complete(id);
if (m_storebuffer_size > 0){
m_use_storebuffer = true;
}
-
-#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("*******storebuffer_t::Using Write Buffer? %d\n",
- m_use_storebuffer);
-#endif
}
StoreBuffer::~StoreBuffer()
uint64_t id = libruby_issue_request(m_port, request);
if (request_map.find(id) != request_map.end()) {
ERROR_OUT("Request ID is already in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ DPRINTF(RubyStorebuffer, "id: %lld\n", id);
ASSERT(0);
} else {
request_map.insert(make_pair(id, this));
}
-#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\n***StoreBuffer: addToStoreBuffer BEGIN, contents:\n");
- DEBUG_OUT("\n");
- DEBUG_OUT("\t INSERTING new request\n");
-#endif
-
buffer.push_front(SBEntry(request, NULL));
m_buffer_size++;
}
iseq++;
-
-#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("***StoreBuffer: addToStoreBuffer END, contents:\n");
- DEBUG_OUT("\n");
-#endif
}
uint64_t id = libruby_issue_request(m_port, request);
if (request_map.find(id) != request_map.end()) {
ERROR_OUT("Request ID is already in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ DPRINTF(RubyStorebuffer, "id: %lld\n", id);
ASSERT(0);
} else {
request_map.insert(make_pair(id, this));
return;
}
-#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\n***StoreBuffer: flushStoreBuffer BEGIN, contents:\n");
- DEBUG_OUT("\n");
-#endif
-
m_storebuffer_flushing = (m_buffer_size > 0);
}
physical_address_t physical_address =
outstanding_requests.find(id)->second.paddr;
RubyRequestType type = outstanding_requests.find(id)->second.type;
-#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\n***StoreBuffer: complete BEGIN, contents:\n");
- DEBUG_OUT("\n");
-#endif
if (type == RubyRequestType_ST) {
physical_address_t lineaddr = physical_address & m_block_mask;
ASSERT(0);
}
-#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("***StoreBuffer: complete END, contents:\n");
- DEBUG_OUT("\n");
-#endif
} else if (type == RubyRequestType_LD) {
m_hit_callback(id);
}
void
StoreBuffer::print()
{
- DEBUG_OUT("[%d] StoreBuffer: Total entries: %d Outstanding: %d\n",
- m_id, m_buffer_size);
+ DPRINTF(RubyStorebuffer, "[%d] StoreBuffer: Total entries: %d "
+ "Outstanding: %d\n",
+ m_id, m_storebuffer_size, m_buffer_size);
if (!m_use_storebuffer)
- DEBUG_OUT("\t WRITE BUFFER NOT USED\n");
+ DPRINTF(RubyStorebuffer, "\t WRITE BUFFER NOT USED\n");
}
-
-
-
-
DataBlock*& data_ptr)
{
assert(address == line_address(address));
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ DPRINTF(RubyCache, "address: %s\n", address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
DataBlock*& data_ptr)
{
assert(address == line_address(address));
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ DPRINTF(RubyCache, "address: %s\n", address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc == -1) {
// We didn't find the tag
- DEBUG_EXPR(CACHE_COMP, LowPrio, address);
- DEBUG_MSG(CACHE_COMP, LowPrio, "No tag match");
+ DPRINTF(RubyCache, "No tag match for address: %s\n", address);
return false;
}
- DEBUG_EXPR(CACHE_COMP, LowPrio, address);
- DEBUG_MSG(CACHE_COMP, LowPrio, "found");
+ DPRINTF(RubyCache, "address: %s found\n", address);
return true;
}
assert(address == line_address(address));
assert(!isTagPresent(address));
assert(cacheAvail(address));
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ DPRINTF(RubyCache, "address: %s\n", address);
// Find the first open slot
Index cacheSet = addressToCacheSet(address);
{
assert(address == line_address(address));
assert(isTagPresent(address));
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ DPRINTF(RubyCache, "address: %s\n", address);
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
assert(isPresent(address));
Directory_Entry* entry;
uint64 idx;
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ DPRINTF(RubyCache, "address: %s\n", address);
if (m_use_map) {
if (m_sparseMemory->exist(address)) {
Source('Sequencer.cc', Werror=False)
Source('System.cc')
Source('TimerTable.cc')
-
-TraceFlag('RubyCache')
-TraceFlag('RubyDma')
int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
int lowBit;
assert(address == line_address(address));
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ DPRINTF(RubyCache, "address: %s\n", address);
for (int level = 0; level < m_number_of_levels; level++) {
// Create the appropriate sub address for this level
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
- DEBUG_EXPR(CACHE_COMP, HighPrio, level);
- DEBUG_EXPR(CACHE_COMP, HighPrio, lowBit);
- DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1);
- DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
+ DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, "
+ "curAddress: %s\n",
+ level, lowBit, highBit - 1, curAddress);
// Adjust the highBit value for the next level
highBit -= m_number_of_bits_per_level[level];
if (curTable->count(curAddress) != 0) {
curTable = (SparseMapType*)(((*curTable)[curAddress]).entry);
} else {
- DEBUG_MSG(CACHE_COMP, HighPrio, "Not found");
+ DPRINTF(RubyCache, "Not found\n");
return false;
}
}
- DEBUG_MSG(CACHE_COMP, HighPrio, "Entry found");
+ DPRINTF(RubyCache, "Entry found\n");
return true;
}
curAddress.setAddress(address.bitSelect(curInfo.lowBit,
curInfo.highBit - 1));
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
- DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.level);
- DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.lowBit);
- DEBUG_EXPR(CACHE_COMP, HighPrio, curInfo.highBit - 1);
- DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
+ DPRINTF(RubyCache, "address: %s, curInfo.level: %d, curInfo.lowBit: %d, "
+ "curInfo.highBit - 1: %d, curAddress: %s\n",
+ address, curInfo.level, curInfo.lowBit,
+ curInfo.highBit - 1, curAddress);
assert(curInfo.curTable->count(curAddress) != 0);
assert(exist(address));
assert(address == line_address(address));
- DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ DPRINTF(RubyCache, "address: %s\n", address);
Address curAddress;
SparseMapType* curTable = m_map_head;
lowBit = highBit - m_number_of_bits_per_level[level];
curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
- DEBUG_EXPR(CACHE_COMP, HighPrio, level);
- DEBUG_EXPR(CACHE_COMP, HighPrio, lowBit);
- DEBUG_EXPR(CACHE_COMP, HighPrio, highBit - 1);
- DEBUG_EXPR(CACHE_COMP, HighPrio, curAddress);
+ DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, "
+ "curAddress: %s\n",
+ level, lowBit, highBit - 1, curAddress);
// Adjust the highBit value for the next level
highBit -= m_number_of_bits_per_level[level];
void RaceyPseudoThread::performCallback(int proc, Address address, uint8_t * data ) {
assert(proc == m_proc_id);
- DEBUG_EXPR(TESTER_COMP, LowPrio, proc);
- DEBUG_EXPR(TESTER_COMP, LowPrio, address);
-
+ DPRINTF(RubyTester, "proc: %d, address: %s\n", proc, address);
m_last_progress = m_driver.eventQueue->getTime();
def generate(self, code):
machine = self.state_machine
- # DEBUG_EXPR is strange since it takes parameters of multiple types
- if self.proc_name == "DEBUG_EXPR":
- # FIXME - check for number of parameters
- code('DEBUG_SLICC(MedPrio, "$0: ", $1)',
- self.exprs[0].location, self.exprs[0].inline())
+ if self.proc_name == "DPRINTF":
+ # Code for inserting the location of the DPRINTF()
+ # statement in the .sm file in the statement it self.
+ # 'self.exprs[0].location' represents the location.
+ # 'format' represents the second argument of the
+ # original DPRINTF() call. It is left unmodified.
+ # str_list is used for concatenating the argument
+ # list following the format specifier. A DPRINTF()
+ # call may or may not contain any arguments following
+ # the format specifier. These two cases need to be
+ # handled differently. Hence the check whether or not
+ # the str_list is empty.
+
+ format = "%s" % (self.exprs[1].inline())
+ format_length = len(format)
+ str_list = []
+
+ for i in range(2, len(self.exprs)):
+ str_list.append("%s" % self.exprs[i].inline())
+
+ if len(str_list) == 0:
+ code('DPRINTF(RubySlicc, "$0: $1")',
+ self.exprs[0].location, format[2:format_length-2])
+ else:
+ code('DPRINTF(RubySlicc, "$0: $1", $2)',
+ self.exprs[0].location, format[2:format_length-2],
+ ', '.join(str_list))
return self.symtab.find("void", Type)
void
$c_ident::${{action.ident}}(const Address& addr)
{
- DEBUG_MSG(GENERATED_COMP, HighPrio, "executing");
+ DPRINTF(RubyGenerated, "executing\\n");
${{action["c_code"]}}
}
break; // If we got this far, we have nothing left todo
}
// g_eventQueue_ptr->scheduleEvent(this, 1);
- // DEBUG_NEWLINE(GENERATED_COMP, MedPrio);
}
''')
{
${ident}_State next_state = state;
- DEBUG_NEWLINE(GENERATED_COMP, MedPrio);
- DEBUG_MSG(GENERATED_COMP, MedPrio, *this);
- DEBUG_EXPR(GENERATED_COMP, MedPrio, g_eventQueue_ptr->getTime());
- DEBUG_EXPR(GENERATED_COMP, MedPrio,state);
- DEBUG_EXPR(GENERATED_COMP, MedPrio,event);
- DEBUG_EXPR(GENERATED_COMP, MedPrio,addr);
+ DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %s\\n",
+ *this,
+ g_eventQueue_ptr->getTime(),
+ ${ident}_State_to_string(state),
+ ${ident}_Event_to_string(event),
+ addr);
TransitionResult result =
doTransitionWorker(event, state, next_state, addr);
if (result == TransitionResult_Valid) {
- DEBUG_EXPR(GENERATED_COMP, MedPrio, next_state);
- DEBUG_NEWLINE(GENERATED_COMP, MedPrio);
+ DPRINTF(RubyGenerated, "next_state: %s\\n",
+ ${ident}_State_to_string(next_state));
m_profiler.countTransition(state, event);
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->profileTransition("${ident}",
"Resource Stall");
}
} else if (result == TransitionResult_ProtocolStall) {
- DEBUG_MSG(GENERATED_COMP, HighPrio, "stalling");
- DEBUG_NEWLINE(GENERATED_COMP, MedPrio);
+ DPRINTF(RubyGenerated, "stalling\\n");
if (Debug::getProtocolTrace()) {
g_system_ptr->getProfiler()->profileTransition("${ident}",
m_version, addr,