{
m_msg_counter = 0;
m_consumer_ptr = NULL;
- m_clockobj_ptr = NULL;
+ m_sender_ptr = NULL;
+ m_receiver_ptr = NULL;
m_ordering_set = false;
m_strict_fifo = true;
int
MessageBuffer::getSize()
{
- if (m_time_last_time_size_checked == m_clockobj_ptr->curCycle()) {
+ if (m_time_last_time_size_checked == m_receiver_ptr->curCycle()) {
return m_size_last_time_size_checked;
} else {
- m_time_last_time_size_checked = m_clockobj_ptr->curCycle();
+ m_time_last_time_size_checked = m_receiver_ptr->curCycle();
m_size_last_time_size_checked = m_size;
return m_size;
}
// until next cycle, but enqueue operations effect the visible
// size immediately
int current_size = max(m_size_at_cycle_start, m_size);
- if (m_time_last_time_pop < m_clockobj_ptr->curCycle()) {
+ if (m_time_last_time_pop < m_receiver_ptr->curCycle()) {
// no pops this cycle - m_size is correct
current_size = m_size;
} else {
- if (m_time_last_time_enqueue < m_clockobj_ptr->curCycle()) {
+ if (m_time_last_time_enqueue < m_receiver_ptr->curCycle()) {
// no enqueues this cycle - m_size_at_cycle_start is correct
current_size = m_size_at_cycle_start;
} else {
}
void
-MessageBuffer::enqueue(MsgPtr message, Cycles delta)
+MessageBuffer::enqueue(MsgPtr message, Cycles delay)
{
m_msg_counter++;
m_size++;
// record current time incase we have a pop that also adjusts my size
- if (m_time_last_time_enqueue < m_clockobj_ptr->curCycle()) {
+ if (m_time_last_time_enqueue < m_receiver_ptr->curCycle()) {
m_msgs_this_cycle = 0; // first msg this cycle
- m_time_last_time_enqueue = m_clockobj_ptr->curCycle();
+ m_time_last_time_enqueue = m_receiver_ptr->curCycle();
}
m_msgs_this_cycle++;
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
- assert(delta>0);
- Cycles current_time(m_clockobj_ptr->curCycle());
+ assert(delay > 0);
+ Cycles delta = m_receiver_ptr->ticksToCycles(delay *
+ m_sender_ptr->clockPeriod());
+
+ Cycles current_time(m_receiver_ptr->curCycle());
Cycles arrival_time(0);
if (!RubySystem::getRandomization() || (m_randomization == false)) {
if (arrival_time < m_last_arrival_time) {
panic("FIFO ordering violated: %s name: %s current time: %d "
"delta: %d arrival_time: %d last arrival_time: %d\n",
- *this, m_name, current_time * m_clockobj_ptr->clockPeriod(),
- delta * m_clockobj_ptr->clockPeriod(),
- arrival_time * m_clockobj_ptr->clockPeriod(),
- m_last_arrival_time * m_clockobj_ptr->clockPeriod());
+ *this, m_name, current_time * m_receiver_ptr->clockPeriod(),
+ delta * m_receiver_ptr->clockPeriod(),
+ arrival_time * m_receiver_ptr->clockPeriod(),
+ m_last_arrival_time * m_receiver_ptr->clockPeriod());
}
}
Message* msg_ptr = message.get();
assert(msg_ptr != NULL);
- assert(m_clockobj_ptr->curCycle() >= msg_ptr->getLastEnqueueTime() &&
+ assert(m_receiver_ptr->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
"ensure we aren't dequeued early");
- msg_ptr->setDelayedCycles(m_clockobj_ptr->curCycle() -
+ msg_ptr->setDelayedTicks(m_receiver_ptr->clockEdge() -
msg_ptr->getLastEnqueueTime() +
- msg_ptr->getDelayedCycles());
- msg_ptr->setLastEnqueueTime(arrival_time);
+ msg_ptr->getDelayedTicks());
+ msg_ptr->setLastEnqueueTime(arrival_time * m_receiver_ptr->clockPeriod());
// Insert the message into the priority heap
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
greater<MessageBufferNode>());
DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
- arrival_time * m_clockobj_ptr->clockPeriod(), *(message.get()));
+ arrival_time * m_receiver_ptr->clockPeriod(), *(message.get()));
// Schedule the wakeup
if (m_consumer_ptr != NULL) {
// record previous size and time so the current buffer size isn't
// adjusted until next cycle
- if (m_time_last_time_pop < m_clockobj_ptr->curCycle()) {
+ if (m_time_last_time_pop < m_receiver_ptr->curCycle()) {
m_size_at_cycle_start = m_size;
- m_time_last_time_pop = m_clockobj_ptr->curCycle();
+ m_time_last_time_pop = m_receiver_ptr->curCycle();
}
m_size--;
}
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
- node.m_time = m_clockobj_ptr->curCycle() + m_recycle_latency;
+ node.m_time = m_receiver_ptr->curCycle() + m_recycle_latency;
m_prio_heap.back() = node;
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
- m_consumer_ptr->scheduleEventAbsolute(m_clockobj_ptr->curCycle() +
+ m_consumer_ptr->scheduleEventAbsolute(m_receiver_ptr->curCycle() +
m_recycle_latency);
}
{
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
assert(m_stall_msg_map.count(addr) > 0);
- Cycles nextCycle = m_clockobj_ptr->curCycle() + Cycles(1);
+ Cycles nextCycle = m_receiver_ptr->curCycle() + Cycles(1);
//
// Put all stalled messages associated with this address back on the
MessageBuffer::reanalyzeAllMessages()
{
DPRINTF(RubyQueue, "ReanalyzeAllMessages %s\n");
- Cycles nextCycle = m_clockobj_ptr->curCycle() + Cycles(1);
+ Cycles nextCycle = m_receiver_ptr->curCycle() + Cycles(1);
//
// Put all stalled messages associated with this address back on the
// this function should only be called on dequeue
// ensure the msg hasn't been enqueued
- assert(msg_ptr->getLastEnqueueTime() <= m_clockobj_ptr->curCycle());
- msg_ptr->setDelayedCycles(m_clockobj_ptr->curCycle() -
- msg_ptr->getLastEnqueueTime() +
- msg_ptr->getDelayedCycles());
+ assert(msg_ptr->getLastEnqueueTime() <= m_receiver_ptr->clockEdge());
+
+ msg_ptr->setDelayedTicks(m_receiver_ptr->clockEdge() -
+ msg_ptr->getLastEnqueueTime() +
+ msg_ptr->getDelayedTicks());
- return msg_ptr->getDelayedCycles();
+ return m_receiver_ptr->ticksToCycles(msg_ptr->getDelayedTicks());
}
void
MessageBuffer::isReady() const
{
return ((m_prio_heap.size() > 0) &&
- (m_prio_heap.front().m_time <= m_clockobj_ptr->curCycle()));
+ (m_prio_heap.front().m_time <= m_receiver_ptr->curCycle()));
}
bool
m_consumer_ptr = consumer_ptr;
}
- void setClockObj(ClockedObject* obj)
+ void setSender(ClockedObject* obj)
{
- assert(m_clockobj_ptr == NULL);
- m_clockobj_ptr = obj;
+ assert(m_sender_ptr == NULL || m_sender_ptr == obj);
+ m_sender_ptr = obj;
+ }
+
+ void setReceiver(ClockedObject* obj)
+ {
+ assert(m_receiver_ptr == NULL || m_receiver_ptr == obj);
+ m_receiver_ptr = obj;
}
void setDescription(const std::string& name) { m_name = name; }
MessageBuffer& operator=(const MessageBuffer& obj);
// Data Members (m_ prefix)
- //! Object used for querying time.
- ClockedObject* m_clockobj_ptr;
+ //! The two ends of the buffer.
+ ClockedObject* m_sender_ptr;
+ ClockedObject* m_receiver_ptr;
+
//! Consumer to signal a wakeup(), can be NULL
Consumer* m_consumer_ptr;
std::vector<MessageBufferNode> m_prio_heap;
// the protocol injects messages into the NI
inNode_ptr[j]->setConsumer(this);
- inNode_ptr[j]->setClockObj(m_net_ptr);
+ inNode_ptr[j]->setReceiver(m_net_ptr);
+
+ outNode_ptr[j]->setSender(m_net_ptr);
}
}
flit_d *fl = new flit_d(i, vc, vnet, num_flits, new_msg_ptr,
m_net_ptr->curCycle());
- fl->set_delay(m_net_ptr->curCycle() - msg_ptr->getTime());
+ fl->set_delay(m_net_ptr->curCycle() -
+ m_net_ptr->ticksToCycles(msg_ptr->getTime()));
m_ni_buffers[vc]->insert(fl);
}
// protocol injects messages into the NI
for (int j = 0; j < m_virtual_networks; j++) {
inNode_ptr[j]->setConsumer(this);
- inNode_ptr[j]->setClockObj(m_net_ptr);
+ inNode_ptr[j]->setReceiver(m_net_ptr);
+
+ outNode_ptr[j]->setSender(m_net_ptr);
}
}
m_net_ptr->increment_injected_flits(vnet);
flit *fl = new flit(i, vc, vnet, num_flits, new_msg_ptr,
m_net_ptr->curCycle());
- fl->set_delay(m_net_ptr->curCycle() - msg_ptr->getTime());
+ fl->set_delay(m_net_ptr->curCycle() -
+ m_net_ptr->ticksToCycles(msg_ptr->getTime()));
m_ni_buffers[vc]->insert(fl);
}
}
void
-PerfectSwitch::addInPort(const vector<MessageBuffer*>& in, Switch *sw)
+PerfectSwitch::addInPort(const vector<MessageBuffer*>& in)
{
assert(in.size() == m_virtual_networks);
NodeID port = m_in.size();
for (int j = 0; j < m_virtual_networks; j++) {
m_in[port][j]->setConsumer(this);
- m_in[port][j]->setClockObj(sw);
string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
to_string(m_switch_id), to_string(port), to_string(j));
{ return csprintf("PerfectSwitch-%i", m_switch_id); }
void init(SimpleNetwork *);
- void addInPort(const std::vector<MessageBuffer*>& in, Switch *);
+ void addInPort(const std::vector<MessageBuffer*>& in);
void addOutPort(const std::vector<MessageBuffer*>& out,
const NetDest& routing_table_entry);
void clearRoutingTables();
void
Switch::addInPort(const vector<MessageBuffer*>& in)
{
- m_perfect_switch_ptr->addInPort(in, this);
+ m_perfect_switch_ptr->addInPort(in);
+
+ for (int i = 0; i < in.size(); i++) {
+ in[i]->setReceiver(this);
+ }
}
void
// Create one buffer per vnet (these are intermediaryQueues)
vector<MessageBuffer*> intermediateBuffers;
for (int i = 0; i < out.size(); i++) {
+ out[i]->setSender(this);
+
MessageBuffer* buffer_ptr = new MessageBuffer;
// Make these queues ordered
buffer_ptr->setOrdering(true);
if (m_network_ptr->getBufferSize() > 0) {
buffer_ptr->resize(m_network_ptr->getBufferSize());
}
+
intermediateBuffers.push_back(buffer_ptr);
m_buffers_to_free.push_back(buffer_ptr);
+
+ buffer_ptr->setSender(this);
+ buffer_ptr->setReceiver(this);
}
// Hook the queues to the PerfectSwitch
m_perfect_switch_ptr->addOutPort(intermediateBuffers, routing_table_entry);
// Hook the queues to the Throttle
- throttle_ptr->addLinks(intermediateBuffers, out, this);
+ throttle_ptr->addLinks(intermediateBuffers, out);
}
void
void
Throttle::addLinks(const std::vector<MessageBuffer*>& in_vec,
- const std::vector<MessageBuffer*>& out_vec, ClockedObject *em)
+ const std::vector<MessageBuffer*>& out_vec)
{
assert(in_vec.size() == out_vec.size());
for (int i=0; i<in_vec.size(); i++) {
- addVirtualNetwork(in_vec[i], out_vec[i], em);
+ addVirtualNetwork(in_vec[i], out_vec[i]);
}
m_message_counters.resize(MessageSizeType_NUM);
}
void
-Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr,
- ClockedObject *em)
+Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
{
m_units_remaining.push_back(0);
m_in.push_back(in_ptr);
// Set consumer and description
m_in[m_vnets]->setConsumer(this);
- m_in[m_vnets]->setClockObj(em);
string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
to_string(m_node) + "]";
DPRINTF(RubyNetwork, "throttle: %d my bw %d bw spent "
"enqueueing net msg %d time: %lld.\n",
m_node, getLinkBandwidth(), m_units_remaining[vnet],
- g_system_ptr->getTime());
+ g_system_ptr->curCycle());
// Move the message
m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
void
Throttle::clearStats()
{
- m_ruby_start = g_system_ptr->getTime();
+ m_ruby_start = g_system_ptr->curCycle();
m_links_utilized = 0.0;
for (int i = 0; i < m_message_counters.size(); i++) {
Throttle::getUtilization() const
{
return 100.0 * double(m_links_utilized) /
- double(g_system_ptr->getTime()-m_ruby_start);
+ double(g_system_ptr->curCycle()-m_ruby_start);
}
void
{ return csprintf("Throttle-%i", m_sID); }
void addLinks(const std::vector<MessageBuffer*>& in_vec,
- const std::vector<MessageBuffer*>& out_vec, ClockedObject *em);
+ const std::vector<MessageBuffer*>& out_vec);
void wakeup();
void printStats(std::ostream& out) const;
private:
void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth);
- void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr,
- ClockedObject *em);
+ void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
void linkUtilized(double ratio) { m_links_utilized += ratio; }
// Private copy constructor and assignment operator
for (int i = 0; i < m_num_of_sequencers; i++) {
perProcCycleCount[i] =
- g_system_ptr->getTime() - m_cycles_executed_at_start[i] + 1;
+ g_system_ptr->curCycle() - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
}
ostream &out = *m_periodic_output_file_ptr;
- out << "ruby_cycles: " << g_system_ptr->getTime()-m_ruby_start << endl
+ out << "ruby_cycles: " << g_system_ptr->curCycle()-m_ruby_start << endl
<< "mbytes_resident: " << process_memory_resident() << endl
<< "mbytes_total: " << process_memory_total() << endl;
double minutes = seconds / 60.0;
double hours = minutes / 60.0;
double days = hours / 24.0;
- Cycles ruby_cycles = g_system_ptr->getTime()-m_ruby_start;
+ Cycles ruby_cycles = g_system_ptr->curCycle()-m_ruby_start;
if (!short_stats) {
out << "Elapsed_time_in_seconds: " << seconds << endl;
out << "Virtual_time_in_days: " << days << endl;
out << endl;
- out << "Ruby_current_time: " << g_system_ptr->getTime() << endl;
+ out << "Ruby_current_time: " << g_system_ptr->curCycle() << endl;
out << "Ruby_start_time: " << m_ruby_start << endl;
out << "Ruby_cycles: " << ruby_cycles << endl;
out << endl;
for (int i = 0; i < m_num_of_sequencers; i++) {
perProcCycleCount[i] =
- g_system_ptr->getTime() - m_cycles_executed_at_start[i] + 1;
+ g_system_ptr->curCycle() - m_cycles_executed_at_start[i] + 1;
// The +1 allows us to avoid division by zero
}
void
Profiler::clearStats()
{
- m_ruby_start = g_system_ptr->getTime();
+ m_ruby_start = g_system_ptr->curCycle();
m_real_time_start_time = time(NULL);
m_cycles_executed_at_start.resize(m_num_of_sequencers);
if (g_system_ptr == NULL) {
m_cycles_executed_at_start[i] = 0;
} else {
- m_cycles_executed_at_start[i] = g_system_ptr->getTime();
+ m_cycles_executed_at_start[i] = g_system_ptr->curCycle();
}
}
//g_eventQueue_ptr->triggerAllEvents();
// update the start time
- m_ruby_start = g_system_ptr->getTime();
+ m_ruby_start = g_system_ptr->curCycle();
}
void
uint64 tr = 0;
Address watch_address = Address(tr);
- DPRINTFN("%7s %3s RUBY WATCH %d\n", g_system_ptr->getTime(), id,
+ DPRINTFN("%7s %3s RUBY WATCH %d\n", g_system_ptr->curCycle(), id,
watch_address);
// don't care about success or failure
class Message : public RefCounted
{
public:
- Message(Cycles curTime)
+ Message(Tick curTime)
: m_time(curTime),
m_LastEnqueueTime(curTime),
- m_DelayedCycles(0)
+ m_DelayedTicks(0)
{ }
Message(const Message &other)
: m_time(other.m_time),
m_LastEnqueueTime(other.m_LastEnqueueTime),
- m_DelayedCycles(other.m_DelayedCycles)
+ m_DelayedTicks(other.m_DelayedTicks)
{ }
virtual ~Message() { }
virtual bool functionalWrite(Packet *pkt) = 0;
//{ fatal("Write functional access not implemented!"); }
- void setDelayedCycles(const Cycles cycles) { m_DelayedCycles = cycles; }
- const Cycles getDelayedCycles() const {return m_DelayedCycles;}
+ void setDelayedTicks(const Tick ticks) { m_DelayedTicks = ticks; }
+ const Tick getDelayedTicks() const {return m_DelayedTicks;}
- void setLastEnqueueTime(const Cycles& time) { m_LastEnqueueTime = time; }
- const Cycles getLastEnqueueTime() const {return m_LastEnqueueTime;}
+ void setLastEnqueueTime(const Tick& time) { m_LastEnqueueTime = time; }
+ const Tick getLastEnqueueTime() const {return m_LastEnqueueTime;}
- const Cycles& getTime() const { return m_time; }
- void setTime(const Cycles& new_time) { m_time = new_time; }
+ const Tick& getTime() const { return m_time; }
+ void setTime(const Tick& new_time) { m_time = new_time; }
private:
- Cycles m_time;
- Cycles m_LastEnqueueTime; // my last enqueue time
- Cycles m_DelayedCycles; // my delayed cycles
+ Tick m_time;
+ Tick m_LastEnqueueTime; // my last enqueue time
+ Tick m_DelayedTicks; // my delayed cycles
};
inline std::ostream&
class NetworkMessage : public Message
{
public:
- NetworkMessage(Cycles curTime)
+ NetworkMessage(Tick curTime)
: Message(curTime), m_internal_dest_valid(false)
{ }
PacketPtr pkt;
unsigned m_contextId;
- RubyRequest(Cycles curTime, uint64_t _paddr, uint8_t* _data, int _len,
+ RubyRequest(Tick curTime, uint64_t _paddr, uint8_t* _data, int _len,
uint64_t _pc, RubyRequestType _type, RubyAccessMode _access_mode,
PacketPtr _pkt, PrefetchBit _pb = PrefetchBit_No,
unsigned _proc_id = 100)
m_LineAddress.makeLineAddress();
}
- RubyRequest(Cycles curTime) : Message(curTime)
- {
- }
-
- RubyRequest*
- clone() const
- {
- return new RubyRequest(*this);
- }
-
- const Address&
- getLineAddress() const
- {
- return m_LineAddress;
- }
-
- const Address&
- getPhysicalAddress() const
- {
- return m_PhysicalAddress;
- }
-
- const RubyRequestType&
- getType() const
- {
- return m_Type;
- }
+ RubyRequest(Tick curTime) : Message(curTime) {}
+ RubyRequest* clone() const { return new RubyRequest(*this); }
- const Address&
- getProgramCounter() const
- {
- return m_ProgramCounter;
- }
-
- const RubyAccessMode&
- getAccessMode() const
- {
- return m_AccessMode;
- }
-
- const int&
- getSize() const
- {
- return m_Size;
- }
-
- const PrefetchBit&
- getPrefetch() const
- {
- return m_Prefetch;
- }
+ const Address& getLineAddress() const { return m_LineAddress; }
+ const Address& getPhysicalAddress() const { return m_PhysicalAddress; }
+ const RubyRequestType& getType() const { return m_Type; }
+ const Address& getProgramCounter() const { return m_ProgramCounter; }
+ const RubyAccessMode& getAccessMode() const { return m_AccessMode; }
+ const int& getSize() const { return m_Size; }
+ const PrefetchBit& getPrefetch() const { return m_Prefetch; }
void print(std::ostream& out) const;
-
bool functionalRead(Packet *pkt);
bool functionalWrite(Packet *pkt);
};
active_request.bytes_issued = 0;
active_request.pkt = pkt;
- SequencerMsg *msg = new SequencerMsg(curCycle());
+ SequencerMsg *msg = new SequencerMsg(clockEdge());
msg->getPhysicalAddress() = Address(paddr);
msg->getLineAddress() = line_address(msg->getPhysicalAddress());
msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
return;
}
- SequencerMsg *msg = new SequencerMsg(curCycle());
+ SequencerMsg *msg = new SequencerMsg(clockEdge());
msg->getPhysicalAddress() = Address(active_request.start_paddr +
active_request.bytes_completed);
void
RubyMemoryControl::enqueue(const MsgPtr& message, Cycles latency)
{
- Cycles arrival_time = g_system_ptr->getTime() + latency;
+ Cycles arrival_time = curCycle() + latency;
const MemoryMsg* memMess = safe_cast<const MemoryMsg*>(message.get());
physical_address_t addr = memMess->getAddress().getAddress();
MemoryRequestType type = memMess->getType();
RubyMemoryControl::isReady()
{
return ((!m_response_queue.empty()) &&
- (m_response_queue.front().m_time <= g_system_ptr->getTime()));
+ (m_response_queue.front().m_time <= g_system_ptr->curCycle()));
}
void
{
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
+ m_mandatory_q_ptr->setSender(this);
}
BaseMasterPort &
#include <string>
#include "mem/protocol/RequestStatus.hh"
+#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/system/System.hh"
#include "mem/mem_object.hh"
#include "mem/physical.hh"
#include "mem/tport.hh"
#include "params/RubyPort.hh"
-class MessageBuffer;
class AbstractController;
class RubyPort : public MemObject
#include "debug/RubyStats.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/slicc_interface/RubyRequest.hh"
pc = pkt->req->getPC();
}
- RubyRequest *msg = new RubyRequest(curCycle(), pkt->getAddr(),
+ RubyRequest *msg = new RubyRequest(clockEdge(), pkt->getAddr(),
pkt->getPtr<uint8_t>(true),
pkt->getSize(), pc, secondary_type,
RubyAccessMode_Supervisor, pkt,
static uint32_t getBlockSizeBits() { return m_block_size_bits; }
static uint64_t getMemorySizeBytes() { return m_memory_size_bytes; }
static uint32_t getMemorySizeBits() { return m_memory_size_bits; }
- Cycles getTime() const { return curCycle(); }
// Public Methods
Network*
WireBuffer::enqueue(MsgPtr message, Cycles latency)
{
m_msg_counter++;
- Cycles current_time = g_system_ptr->getTime();
+ Cycles current_time = g_system_ptr->curCycle();
Cycles arrival_time = current_time + latency;
assert(arrival_time > current_time);
pop_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
- node.m_time = g_system_ptr->getTime() + Cycles(1);
+ node.m_time = g_system_ptr->curCycle() + Cycles(1);
m_message_queue.back() = node;
push_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
WireBuffer::isReady()
{
return ((!m_message_queue.empty()) &&
- (m_message_queue.front().m_time <= g_system_ptr->getTime()));
+ (m_message_queue.front().m_time <= g_system_ptr->curCycle()));
}
void
# Declare message
code("${{msg_type.ident}} *out_msg = "\
- "new ${{msg_type.ident}}(curCycle());")
+ "new ${{msg_type.ident}}(clockEdge());")
# The other statements
t = self.statements.generate(code, None)
code('m_num_controllers++;')
for var in self.objects:
if var.ident.find("mandatoryQueue") >= 0:
- code('m_${{var.c_ident}}_ptr = new ${{var.type.c_ident}}();')
+ code('''
+m_${{var.c_ident}}_ptr = new ${{var.type.c_ident}}();
+m_${{var.c_ident}}_ptr->setReceiver(this);
+''')
code.dedent()
code('''
code('*$vid = ${{vtype["default"]}}; // $comment')
# Set ordering
- if "ordered" in var and "trigger_queue" not in var:
+ if "ordered" in var:
# A buffer
code('$vid->setOrdering(${{var["ordered"]}});')
code('$vid->setRandomization(${{var["random"]}});')
# Set Priority
- if vtype.isBuffer and \
- "rank" in var and "trigger_queue" not in var:
+ if vtype.isBuffer and "rank" in var:
code('$vid->setPriority(${{var["rank"]}});')
+ # Set sender and receiver for trigger queue
+ if var.ident.find("triggerQueue") >= 0:
+ code('$vid->setSender(this);')
+ code('$vid->setReceiver(this);')
+ elif vtype.c_ident == "TimerTable":
+ code('$vid->setClockObj(this);')
+
else:
# Network port object
network = var["network"]
code('assert($vid != NULL);')
+ # Set the end
+ if network == "To":
+ code('$vid->setSender(this);')
+ else:
+ code('$vid->setReceiver(this);')
+
# Set ordering
if "ordered" in var:
# A buffer
code('${{port.code}}.setConsumer(this);')
# Set the queue descriptions
code('${{port.code}}.setDescription("[Version " + to_string(m_version) + ", $ident, $port]");')
- # Set the clock object
- code('${{port.code}}.setClockObj(this);')
# Initialize the transition profiling
code()
''', klass="class")
if self.isMessage:
- code('(Cycles curTime) : %s(curTime) {' % self["interface"])
+ code('(Tick curTime) : %s(curTime) {' % self["interface"])
else:
code('()\n\t\t{')
params = ', '.join(params)
if self.isMessage:
- params = "const Cycles curTime, " + params
+ params = "const Tick curTime, " + params
code('${{self.c_ident}}($params)')