{
DEBUG_MSG(QUEUE_COMP,MedPrio,"pop from " + m_name);
assert(isReady());
- Time ready_time = m_prio_heap.extractMin().m_time;
+ m_prio_heap.extractMin();
// record previous size and time so the current buffer size isn't adjusted until next cycle
if (m_time_last_time_pop < g_eventQueue_ptr->getTime()) {
m_size_at_cycle_start = m_size;
//crossbar
double SIM_crossbar_stat_energy(power_crossbar *crsbar, double n_data)
{
- double Eavg = 0, Eatomic, Estatic;
+ double Eavg = 0, Eatomic;
if (n_data > crsbar->n_out) {
n_data = crsbar->n_out;
int highest_prio_vnet = m_virtual_networks-1;
int lowest_prio_vnet = 0;
int decrementer = 1;
- bool schedule_wakeup = false;
NetworkMessage* net_msg_ptr = NULL;
// invert priorities to avoid starvation seen in the component network
// of the network.
// Helper functions based on chapter 29 of Cormen et al.
-static Matrix extend_shortest_path(const Matrix& current_dist, Matrix& latencies, Matrix& inter_switches);
+static void extend_shortest_path(Matrix& current_dist, Matrix& latencies, Matrix& inter_switches);
static Matrix shortest_path(const Matrix& weights, Matrix& latencies, Matrix& inter_switches);
static bool link_is_shortest_path_to_node(SwitchID src, SwitchID next, SwitchID final, const Matrix& weights, const Matrix& dist);
static NetDest shortest_path_to_node(SwitchID src, SwitchID next, const Matrix& weights, const Matrix& dist);
assert(addr == line_address(addr));
Time last_time = m_ruby_start;
if (m_conflicting_map_ptr->exist(addr)) {
- Time last_time = m_conflicting_map_ptr->lookup(addr);
+ last_time = m_conflicting_map_ptr->lookup(addr);
}
Time current_time = g_eventQueue_ptr->getTime();
assert (current_time - last_time > 0);
void profileOverflow(const Address & addr, MachineID mach)
{
+#if 0
if(mach.type == MACHINETYPE_L1CACHE_ENUM){
// for L1 overflows
int proc_num = L1CacheMachIDToProcessorNum(mach);
// g_system_ptr->getChip(chip_num)->m_L1Cache_xact_mgr_vec[p]->profileOverflow(addr, false);
}
}
+#endif
}
Time arrival_time = memRef.m_time;
uint64 at = arrival_time;
bool is_mem_read = memRef.m_is_mem_read;
- bool dirtyWB = memRef.m_is_dirty_wb;
physical_address_t addr = memRef.m_addr;
int bank = getBank(addr);
if (m_debug) {
int NodePersistentTable::countReadStarvingForAddress(const Address& address) const
{
- int count = 0;
if (m_map_ptr->exist(address)) {
NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
return (entry.m_starving.count() - entry.m_request_to_write.count());
int PersistentTable::countReadStarvingForAddress(const Address& address) const
{
- int count = 0;
if (m_map_ptr->exist(address)) {
PersistentTableEntry& entry = m_map_ptr->lookup(address);
return (entry.m_starving.count() - entry.m_request_to_write.count());
assert(line_address(m_pending_address) == addr);
assert(line_address(peek().m_subblock.getAddress()) == addr);
CacheRequestType type = peek().m_type;
- int threadID = peek().m_thread;
+ //int threadID = peek().m_thread;
assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
m_pending = false;
if (m_status == DetermInvGeneratorStatus_Load_Pending) {
m_driver.recordLoadLatency(g_eventQueue_ptr->getTime() - m_last_transition);
- NodeID firstByte = data.readByte(); // dummy read
-
m_driver.loadCompleted(m_node, data.getAddress());
if (!m_driver.isStoreReady(m_node, m_address)) { // if we don't have to store, we are done for this transaction
for (int processor=0; processor<size; processor++) {
if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
WARN_EXPR(processor);
+#ifndef NDEBUG
Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
+#endif
assert(seq_ptr != NULL);
// if (seq_ptr->isRequestPending()) {
// WARN_EXPR(seq_ptr->pendingAddress());
Address addr;
// FIXME - make this a parameter of the workload
- bool done = false;
int lock_number = 0;
int counter = 0;
while (1) {
for (int processor=0; processor<size; processor++) {
if ((current_time - m_last_progress_vector[processor]) > g_DEADLOCK_THRESHOLD) {
WARN_EXPR(processor);
+#ifndef NDEBUG
Sequencer* seq_ptr = g_system_ptr->getChip(processor/RubyConfig::numberOfProcsPerChip())->getSequencer(processor%RubyConfig::numberOfProcsPerChip());
+#endif
assert(seq_ptr != NULL);
// if (seq_ptr->isRequestPending()) {
// WARN_EXPR(seq_ptr->pendingAddress());