no_mig_atomic = not \
options.allow_atomic_migration)
+ if options.recycle_latency:
+ l1_cntrl.recycle_latency = options.recycle_latency
+
exec("system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
probe_filter_enabled = \
options.pf_on)
+ if options.recycle_latency:
+ dir_cntrl.recycle_latency = options.recycle_latency
+
exec("system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
dma_cntrl.dma_sequencer.port = dma_device.dma
dma_cntrl_nodes.append(dma_cntrl)
+ if options.recycle_latency:
+ dma_cntrl.recycle_latency = options.recycle_latency
+
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
parser.add_option("--ruby-debug", action="store_true", default=False)
parser.add_option("--ruby-debug-cycle", type="int", default=1)
+ parser.add_option("--recycle-latency", type="int", default=10,
+ help="Recycle latency for ruby controller input buffers")
+
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
+ MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", recycle_latency="1";
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
// STATES
}
}
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
}
}
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
- if (memQueue_in.isReady()) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.Address);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
// Actions
action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ }
unblockNetwork_in.dequeue();
}
}
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ peek(requestQueue_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
requestQueue_in.recycle();
}
if vtype.isBuffer and \
"rank" in var and "trigger_queue" not in var:
code('$vid->setPriority(${{var["rank"]}});')
+
else:
# Network port object
network = var["network"]
''')
+ if vtype.isBuffer:
+ if "recycle_latency" in var:
+ code('$vid->setRecycleLatency(${{var["recycle_latency"]}});')
+ else:
+ code('$vid->setRecycleLatency(m_recycle_latency);')
+
+
# Set the queue consumers
code.insert_newline()
for port in self.in_ports:
event = "%s_Event_%s" % (self.ident, trans.event.ident)
code('m_profiler.possibleTransition($state, $event);')
- # added by SS to initialize recycle_latency of message buffers
- for buf in self.message_buffer_names:
- code("$buf->setRecycleLatency(m_recycle_latency);")
-
code.dedent()
code('}')