There is another type Time in src/base class which results in a conflict.
}
void
-Check::performCallback(NodeID proc, SubBlock* data, Time curTime)
+Check::performCallback(NodeID proc, SubBlock* data, Cycles curTime)
{
Address address = data->getAddress();
int _num_readers, RubyTester* _tester);
void initiate(); // Does Action or Check or nether
- void performCallback(NodeID proc, SubBlock* data, Time curTime);
+ void performCallback(NodeID proc, SubBlock* data, Cycles curTime);
const Address& getAddress() { return m_address; }
void changeAddress(const Address& address);
m_last_progress_vector.resize(m_num_cpus);
for (int i = 0; i < m_last_progress_vector.size(); i++) {
- m_last_progress_vector[i] = 0;
+ m_last_progress_vector[i] = Cycles(0);
}
m_num_writers = writePorts.size();
RubyTester::checkForDeadlock()
{
int size = m_last_progress_vector.size();
- Time current_time = curCycle();
+ Cycles current_time = curCycle();
for (int processor = 0; processor < size; processor++) {
if ((current_time - m_last_progress_vector[processor]) >
m_deadlock_threshold) {
RubyTester& operator=(const RubyTester& obj);
CheckTable* m_checkTable_ptr;
- std::vector<Time> m_last_progress_vector;
+ std::vector<Cycles> m_last_progress_vector;
int m_num_cpus;
uint64 m_checks_completed;
typedef unsigned long long uint64;
typedef long long int64;
-typedef int64 Time;
typedef uint64 physical_address_t;
typedef int64 Index; // what the address bit ripper returns
// instantiating the NI flit buffers
for (int i = 0; i < m_num_vcs; i++) {
m_ni_buffers[i] = new flitBuffer_d();
- m_ni_enqueue_time[i] = INFINITE_;
+ m_ni_enqueue_time[i] = Cycles(INFINITE_);
}
m_vc_allocator.resize(m_virtual_networks); // 1 allocator per vnet
if (t_flit->get_type() == TAIL_ ||
t_flit->get_type() == HEAD_TAIL_) {
- m_ni_enqueue_time[vc] = INFINITE_;
+ m_ni_enqueue_time[vc] = Cycles(INFINITE_);
}
return;
}
// Input Flit Buffers
// The flit buffers which will serve the Consumer
std::vector<flitBuffer_d *> m_ni_buffers;
- std::vector<Time> m_ni_enqueue_time;
+ std::vector<Cycles> m_ni_enqueue_time;
// The Message buffers that takes messages from the protocol
std::vector<MessageBuffer *> inNode_ptr;
StoreTrace::downgrade(NodeID node)
{
if (node == m_last_writer) {
- Time current = curTick();
+ Tick current = curTick();
assert(m_stores_this_interval != 0);
assert(m_last_store != 0);
assert(m_first_store != 0);
#include <iostream>
+#include "base/types.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Histogram.hh"
Address m_addr;
NodeID m_last_writer;
- Time m_first_store;
- Time m_last_store;
+ Tick m_first_store;
+ Tick m_last_store;
int m_stores_this_interval;
int64 m_total_samples; // Total number of store lifetimes of this line
}
inline Cycles zero_time() { return Cycles(0); }
-inline Cycles TimeToCycles(Time t) { return Cycles(t); }
inline NodeID
intToID(int nodenum)
void
CacheRecorder::addRecord(int cntrl, const physical_address_t data_addr,
const physical_address_t pc_addr,
- RubyRequestType type, Time time, DataBlock& data)
+ RubyRequestType type, Tick time, DataBlock& data)
{
TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) +
m_block_size_bytes);
#include <vector>
#include "base/hashmap.hh"
+#include "base/types.hh"
#include "mem/protocol/RubyRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/DataBlock.hh"
class TraceRecord {
public:
int m_cntrl_id;
- Time m_time;
+ Tick m_time;
physical_address_t m_data_address;
physical_address_t m_pc_address;
RubyRequestType m_type;
uint64_t block_size_bytes);
void addRecord(int cntrl, const physical_address_t data_addr,
const physical_address_t pc_addr, RubyRequestType type,
- Time time, DataBlock& data);
+ Tick time, DataBlock& data);
uint64 aggregateRecords(uint8_t** data, uint64 size);