("lpddr2_s4_1066_x32", "LPDDR2_S4_1066_x32"),
("lpddr3_1600_x32", "LPDDR3_1600_x32"),
("wio_200_x128", "WideIO_200_x128"),
- ("dramsim2", "DRAMSim2")
+ ("dramsim2", "DRAMSim2"),
+ ("ruby_memory", "RubyMemoryControl")
]
# Filtered list of aliases. Only aliases for existing memory
Ruby.create_system(options, True, test_sys, test_sys.iobus,
test_sys._dma_ports)
- test_sys.physmem = [SimpleMemory(range = r, null = True)
- for r in test_sys.mem_ranges]
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
-parser.add_option("-l", "--requests", metavar="N", default=100,
+parser.add_option("--requests", metavar="N", default=100,
help="Stop after N requests")
parser.add_option("-f", "--wakeup_freq", metavar="N", default=10,
help="Wakeup every N cycles")
print "Error: unknown direct test generator"
sys.exit(1)
-#
-# Create the M5 system. Note that the Memory Object isn't
-# actually used by the rubytester, but is included to support the
-# M5 memory size == Ruby memory size checks
-#
-system = System(physmem = SimpleMemory(),
- mem_ranges = [AddrRange(options.mem_size)])
+# Create the M5 system.
+system = System(mem_ranges = [AddrRange(options.mem_size)])
# Create a top-level voltage domain and clock domain
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
-#
# Create the ruby random tester
-#
-system.cpu = RubyDirectedTester(requests_to_complete = \
- options.requests,
- generator = generator)
+system.cpu = RubyDirectedTester(requests_to_complete = options.requests,
+ generator = generator)
Ruby.create_system(options, False, system)
#
# Tie the ruby tester ports to the ruby cpu ports
#
- system.tester.cpuPort = ruby_port.slave
+ system.cpu.cpuPort = ruby_port.slave
# -----------------------
# run simulation
system = System(cpu = cpus,
funcmem = SimpleMemory(in_addr_map = False),
funcbus = NoncoherentXBar(),
- physmem = SimpleMemory(),
clk_domain = SrcClockDomain(clock = options.sys_clock),
mem_ranges = [AddrRange(options.mem_size)])
# actually used by the rubytester, but is included to support the
# M5 memory size == Ruby memory size checks
#
-system = System(cpu = tester, physmem = SimpleMemory(),
- mem_ranges = [AddrRange(options.mem_size)])
+system = System(cpu = tester, mem_ranges = [AddrRange(options.mem_size)])
# Create a top-level voltage domain and clock domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
- # Use SimpleMemory with the null option since this memory is only used
- # for determining which addresses are within the range of the memory.
- # No space allocation is required.
- system.physmem = SimpleMemory(range=AddrRange(options.mem_size),
- null = True)
options.use_map = True
Ruby.create_system(options, False, system)
assert(options.num_cpus == len(system.ruby._cpu_ports))
#
# Create the Ruby objects associated with the directory controller
#
-
- mem_cntrl = RubyMemoryControl(
- clk_domain = ruby_system.memctrl_clk_domain,
- version = i,
- ruby_system = ruby_system)
-
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
- directory = \
- RubyDirectoryMemory(version = i,
- size = dir_size,
- use_map =
- options.use_map),
- memBuffer = mem_cntrl,
+ directory = RubyDirectoryMemory(
+ version = i, size = dir_size),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
clk_divider=3)
for i in xrange(options.num_dirs):
- #
- # Create the Ruby objects associated with the directory controller
- #
-
- mem_cntrl = RubyMemoryControl(
- clk_domain = ruby_system.memctrl_clk_domain,
- version = i,
- ruby_system = ruby_system)
-
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
- directory = \
- RubyDirectoryMemory(version = i,
- size = dir_size,
- use_map =
- options.use_map),
- memBuffer = mem_cntrl,
+ directory = RubyDirectoryMemory(
+ version = i, size = dir_size),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
clk_divider=3)
for i in xrange(options.num_dirs):
- #
- # Create the Ruby objects associated with the directory controller
- #
-
- mem_cntrl = RubyMemoryControl(
- clk_domain = ruby_system.memctrl_clk_domain,
- version = i,
- ruby_system = ruby_system)
-
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
-
dir_cntrl = Directory_Controller(version = i,
- directory = \
- RubyDirectoryMemory( \
- version = i,
- size = dir_size,
- use_map = options.use_map,
- map_levels = \
- options.map_levels),
- memBuffer = mem_cntrl,
+ directory = RubyDirectoryMemory(
+ version = i, size = dir_size),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
clk_divider=3)
for i in xrange(options.num_dirs):
- #
- # Create the Ruby objects associated with the directory controller
- #
-
- mem_cntrl = RubyMemoryControl(
- clk_domain = ruby_system.memctrl_clk_domain,
- version = i,
- ruby_system = ruby_system)
-
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
- directory = \
- RubyDirectoryMemory(version = i,
- size = dir_size,
- use_map = options.use_map),
- memBuffer = mem_cntrl,
+ directory = RubyDirectoryMemory(
+ version = i, size = dir_size),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
clk_divider=3)
for i in xrange(options.num_dirs):
- #
- # Create the Ruby objects associated with the directory controller
- #
-
- mem_cntrl = RubyMemoryControl(
- clk_domain = ruby_system.memctrl_clk_domain,
- version = i,
- ruby_system = ruby_system)
-
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
- directory = \
- RubyDirectoryMemory(version = i,
- use_map = options.use_map,
- size = dir_size),
- memBuffer = mem_cntrl,
+ directory = RubyDirectoryMemory(
+ version = i, size = dir_size),
l2_select_num_bits = l2_bits,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
clk_divider=3)
for i in xrange(options.num_dirs):
- #
- # Create the Ruby objects associated with the directory controller
- #
-
- mem_cntrl = RubyMemoryControl(
- clk_domain = ruby_system.memctrl_clk_domain,
- version = i,
- ruby_system = ruby_system)
-
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
start_index_bit = pf_start_bit)
dir_cntrl = Directory_Controller(version = i,
- directory = \
- RubyDirectoryMemory( \
- version = i,
- size = dir_size,
- use_map = options.use_map,
- map_levels = \
- options.map_levels,
- numa_high_bit = \
- options.numa_high_bit),
+ directory = RubyDirectoryMemory(
+ version = i, size = dir_size),
probeFilter = pf,
- memBuffer = mem_cntrl,
probe_filter_enabled = options.pf_on,
full_bit_dir_enabled = options.dir_on,
transitions_per_cycle = options.ports,
from m5.defines import buildEnv
from m5.util import addToPath, fatal
+import MemConfig
addToPath('../topologies')
def define_options(parser):
help="high order address bit to use for numa mapping. " \
"0 = highest bit, not specified = lowest bit")
- # ruby sparse memory options
- parser.add_option("--use-map", action="store_true", default=False)
- parser.add_option("--map-levels", type="int", default=4)
-
parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
parser.add_option("--random_seed", type="int", default=1234,
help="Used for seeding the random number generator")
- parser.add_option("--ruby_stats", type="string", default="ruby.stats")
-
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
+def setup_memory_controllers(system, ruby, dir_cntrls, options):
+ ruby.block_size_bytes = options.cacheline_size
+ ruby.memory_size_bits = 48
+ block_size_bits = int(math.log(options.cacheline_size, 2))
+
+ if options.numa_high_bit:
+ numa_bit = options.numa_high_bit
+ else:
+ # if the numa_bit is not specified, set the directory bits as the
+ # lowest bits above the block offset bits, and the numa_bit as the
+ # highest of those directory bits
+ dir_bits = int(math.log(options.num_dirs, 2))
+ numa_bit = block_size_bits + dir_bits - 1
+
+ index = 0
+ mem_ctrls = []
+ crossbars = []
+
+ # Sets bits to be used for interleaving. Creates memory controllers
+ # attached to a directory controller. A separate controller is created
+ # for each address range as the abstract memory can handle only one
+ # contiguous address range as of now.
+ for dir_cntrl in dir_cntrls:
+ dir_cntrl.directory.numa_high_bit = numa_bit
+
+ crossbar = None
+ if len(system.mem_ranges) > 1:
+ crossbar = NoncoherentXBar()
+ crossbars.append(crossbar)
+ dir_cntrl.memory = crossbar.slave
+
+ for r in system.mem_ranges:
+ mem_ctrl = MemConfig.create_mem_ctrl(
+ MemConfig.get(options.mem_type), r, index, options.num_dirs,
+ int(math.log(options.num_dirs, 2)), options.cacheline_size)
+
+ mem_ctrls.append(mem_ctrl)
+
+ if crossbar != None:
+ mem_ctrl.port = crossbar.master
+ else:
+ mem_ctrl.port = dir_cntrl.memory
+
+ index += 1
+
+ system.mem_ctrls = mem_ctrls
+
+ if len(crossbars) > 0:
+ ruby.crossbars = crossbars
+
+
def create_topology(controllers, options):
""" Called from create_system in configs/ruby/<protocol>.py
Must return an object which is a subclass of BaseTopology
def create_system(options, full_system, system, piobus = None, dma_ports = []):
- system.ruby = RubySystem(no_mem_vec = options.use_map)
+ system.ruby = RubySystem()
ruby = system.ruby
# Set the network classes based on the command line options
network.enable_fault_model = True
network.fault_model = FaultModel()
- # Loop through the directory controlers.
- # Determine the total memory size of the ruby system and verify it is equal
- # to physmem. However, if Ruby memory is using sparse memory in SE
- # mode, then the system should not back-up the memory state with
- # the Memory Vector and thus the memory size bytes should stay at 0.
- # Also set the numa bits to the appropriate values.
- total_mem_size = MemorySize('0B')
-
- ruby.block_size_bytes = options.cacheline_size
- block_size_bits = int(math.log(options.cacheline_size, 2))
-
- if options.numa_high_bit:
- numa_bit = options.numa_high_bit
- else:
- # if the numa_bit is not specified, set the directory bits as the
- # lowest bits above the block offset bits, and the numa_bit as the
- # highest of those directory bits
- dir_bits = int(math.log(options.num_dirs, 2))
- numa_bit = block_size_bits + dir_bits - 1
-
- for dir_cntrl in dir_cntrls:
- total_mem_size.value += dir_cntrl.directory.size.value
- dir_cntrl.directory.numa_high_bit = numa_bit
-
- phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
- assert(total_mem_size.value == phys_mem_size)
- ruby.mem_size = total_mem_size
+ setup_memory_controllers(system, ruby, dir_cntrls, options)
# Connect the cpu sequencers and the piobus
if piobus != None:
machine(Directory, "MESI Two Level directory protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
Cycles to_mem_ctrl_latency := 1;
Cycles directory_latency := 6;
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
void setAccessPermission(Address addr, State state) {
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
}
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer, rank = 2) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Prefetch := in_msg.Prefetch;
-
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Sender;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := in_msg.Prefetch;
-
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Sender, address, to_mem_ctrl_latency,
+ in_msg.DataBlk);
}
}
//added by SS for dma
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := machineID;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
}
}
}
}
- action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.OriginalRequestorMachId := machineID;
- out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ action(qw_queueMemoryWBRequest_partial, "qwp",
+ desc="Queue off-chip writeback request") {
+ peek(requestNetwork_in, RequestMsg) {
+ queueMemoryWritePartial(machineID, address, to_mem_ctrl_latency,
+ in_msg.DataBlk, in_msg.Len);
}
}
}
}
- action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
+ action(qw_queueMemoryWBRequest_partialTBE, "qwt",
+ desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.OriginalRequestorMachId := in_msg.Sender;
- //out_msg.DataBlk := in_msg.DataBlk;
- //out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
- out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
-
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := in_msg.Prefetch;
-
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWritePartial(in_msg.Sender, tbe.PhysicalAddress,
+ to_mem_ctrl_latency, tbe.DataBlk, tbe.Len);
}
}
machine(Directory, "Directory protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
Cycles directory_latency := 12;
+ Cycles to_memory_controller_latency := 1;
MessageBuffer * forwardFromDir, network="To", virtual_network="3",
ordered="false", vnet_type="forward";
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
// ** OUT_PORTS **
out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
-//added by SS
- out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
-
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
//added by SS
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
TBE tbe := TBEs[in_msg.Addr];
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- //out_msg.OriginalRequestorMachId := machineID;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.DataBlk.copyPartial(
- in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ queueMemoryWritePartial(in_msg.Requestor, address,
+ to_memory_controller_latency, in_msg.DataBlk,
+ in_msg.Len);
}
}
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
-
- // get incoming data
- out_msg.DataBlk.copyPartial(
- tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryWritePartial(in_msg.Requestor, address,
+ to_memory_controller_latency, tbe.DataBlk,
+ tbe.Len);
}
}
-
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
-
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
machine(Directory, "Directory protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
Cycles directory_latency := 6;
+ Cycles to_memory_controller_latency := 1;
// Message Queues
MessageBuffer * requestToDir, network="From", virtual_network="1",
}
void functionalRead(Address addr, Packet *pkt) {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
int functionalWrite(Address addr, Packet *pkt) {
- return memBuffer.functionalWrite(pkt);
+ int num_functional_writes := 0;
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
// if no sharers, then directory can be considered
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
}
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // These are not used by memory but are passed back here with the read data:
- out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS &&
- getDirectoryEntry(address).Sharers.count() == 0);
- out_msg.Acks := getDirectoryEntry(address).Sharers.count();
- if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- if (is_valid(tbe)) {
- out_msg.OriginalRequestorMachId := tbe.Requestor;
- }
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DPRINTF(RubySlicc, "%s\n", out_msg);
+ if (is_valid(tbe)) {
+ queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ } else {
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
}
action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt",
desc="Queue off-chip writeback request") {
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- if (is_valid(tbe)) {
- out_msg.OriginalRequestorMachId := tbe.Requestor;
- }
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.DataBlk.copyPartial(tbe.DataBlk,
- addressOffset(tbe.PhysicalAddress), tbe.Len);
-
- out_msg.MessageSize := in_msg.MessageSize;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ DataBlock DataBlk := in_msg.DataBlk;
+ DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress),
+ tbe.Len);
+ queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
+ DataBlk);
}
}
action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
machine(Directory, "Token protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
int l2_select_num_bits;
Cycles directory_latency := 5;
bool distributed_persistent := "True";
Cycles fixed_timeout_latency := 100;
Cycles reissue_wakeup_latency := 10;
+ Cycles to_memory_controller_latency := 1;
// Message Queues from dir to other controllers / network
MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
- DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
- DataBlock DataBlk, desc="The current view of system memory";
+ DataBlock DataBlk, desc="Current view of the associated address range";
int Len, desc="...";
MachineID DmaRequestor, desc="DMA requestor";
bool WentPersistent, desc="Did the DMA request require a persistent request";
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
// ** OUT_PORTS **
out_port(requestNetwork_out, RequestMsg, requestFromDir);
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
- //
- // Memory buffer for memory controller to DIMM communication
- //
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
// ** IN_PORTS **
-
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(persistentTable.findSmallest(address), address,
+ to_memory_controller_latency);
}
action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- // first, initialize the data blk to the current version of system memory
- out_msg.DataBlk := tbe.DataBlk;
- // then add the dma write data
- out_msg.DataBlk.copyPartial(
- tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWritePartial(tbe.DmaRequestor, address,
+ to_memory_controller_latency, tbe.DataBlk,
+ tbe.Len);
}
- action(lr_queueMemoryDmaReadWriteback, "lr", desc="Write DMA data from read to memory") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- // first, initialize the data blk to the current version of system memory
- out_msg.DataBlk := tbe.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
+ action(lr_queueMemoryDmaReadWriteback, "lr",
+ desc="Write DMA data from read to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
- tbe.DmaDataBlk := in_msg.DataBlk;
+ tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
tbe.DmaRequestor := in_msg.Requestor;
action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
peek(responseNetwork_in, ResponseMsg) {
+ DataBlock DataBlk := tbe.DataBlk;
tbe.DataBlk := in_msg.DataBlk;
+ tbe.DataBlk.copyPartial(DataBlk, addressOffset(tbe.PhysicalAddress),
+ tbe.Len);
}
}
machine(Directory, "AMD Hammer-like protocol")
: DirectoryMemory * directory;
CacheMemory * probeFilter;
- MemoryControl * memBuffer;
- Cycles memory_controller_latency := 2;
+ Cycles from_memory_controller_latency := 2;
+ Cycles to_memory_controller_latency := 1;
bool probe_filter_enabled := "False";
bool full_bit_dir_enabled := "False";
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
Event cache_request_to_event(CoherenceRequestType type) {
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- //
- // Memory buffer for memory controller to DIMM communication
- //
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
// ** IN_PORTS **
// Trigger Queue
}
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
peek(requestQueue_in, RequestMsg) {
if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:BLOCK_ACK;
out_msg.Requestor := in_msg.Requestor;
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
fwd_set := cache_entry.Sharers;
fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
if (fwd_set.count() > 0) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
}
} else {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
if (full_bit_dir_enabled) {
assert(cache_entry.Sharers.count() > 0);
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
}
}
} else {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
if (machineCount(MachineType:L1Cache) > 1) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(cache_entry));
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:INV;
fwd_set := cache_entry.Sharers;
fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
if (fwd_set.count() > 0) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
}
}
} else {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
// decouple the two.
//
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(tbe));
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:MERGED_GETS;
assert(machineCount(MachineType:L1Cache) > 1);
if (probe_filter_enabled || full_bit_dir_enabled) {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(cache_entry));
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
}
} else {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
if (probe_filter_enabled || full_bit_dir_enabled) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Requestor != cache_entry.Owner) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(cache_entry));
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
}
} else {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
assert(is_valid(tbe));
if (tbe.NumPendingMsgs > 0) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:GETX;
//
assert(is_valid(tbe));
if (tbe.NumPendingMsgs > 0) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:GETS;
//
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(in_msg.Dirty);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- // first, initialize the data blk to the current version of system memory
- out_msg.DataBlk := tbe.DataBlk;
- // then add the dma write data
- out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ assert(is_valid(tbe));
+ queueMemoryWritePartial(tbe.DmaRequestor, tbe.PhysicalAddress,
+ to_memory_controller_latency, tbe.DmaDataBlk,
+ tbe.Len);
}
action(ly_queueMemoryWriteFromTBE, "ly", desc="Write data to memory from TBE") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.DataBlk := tbe.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ tbe.DataBlk);
}
action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
NodeID version;
MachineID machineID;
NodeID clusterID;
+MessageBuffer responseFromMemory, ordered="false";
+
+// Functions implemented in the AbstractController class for
+// making timing access to the memory maintained by the
+// memory controllers.
+void queueMemoryRead(MachineID id, Address addr, Cycles latency);
+void queueMemoryWrite(MachineID id, Address addr, Cycles latency,
+ DataBlock block);
+void queueMemoryWritePartial(MachineID id, Address addr, Cycles latency,
+ DataBlock block, int size);
+
+// Functions implemented in the AbstractController class for
+// making functional access to the memory maintained by the
+// memory controllers.
+void functionalMemoryRead(Packet *pkt);
+bool functionalMemoryWrite(Packet *pkt);
}
-structure (MemoryControl, inport="yes", outport="yes", external = "yes") {
- void recordRequestType(CacheRequestType);
- void functionalRead(Packet *pkt);
- int functionalWrite(Packet *pkt);
-}
-
structure (DMASequencer, external = "yes") {
void ackCallback();
void dataCallback(DataBlock);
MakeInclude('structures/CacheMemory.hh')
MakeInclude('system/DMASequencer.hh')
MakeInclude('structures/DirectoryMemory.hh')
-MakeInclude('structures/MemoryControl.hh')
MakeInclude('structures/WireBuffer.hh')
MakeInclude('structures/PerfectCacheMemory.hh')
MakeInclude('structures/PersistentTable.hh')
void
MessageBuffer::enqueue(MsgPtr message, Cycles delta)
{
- m_msg_counter++;
+ assert(m_ordering_set);
// record current time incase we have a pop that also adjusts my size
if (m_time_last_time_enqueue < m_sender->curCycle()) {
m_msgs_this_cycle = 0; // first msg this cycle
m_time_last_time_enqueue = m_sender->curCycle();
}
- m_msgs_this_cycle++;
- assert(m_ordering_set);
+ m_msg_counter++;
+ m_msgs_this_cycle++;
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
/*
- * Copyright (c) 2011 Mark D. Hill and David A. Wood
+ * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "mem/protocol/MemoryMsg.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
+#include "sim/system.hh"
AbstractController::AbstractController(const Params *p)
- : ClockedObject(p), Consumer(this)
+ : MemObject(p), Consumer(this), m_version(p->version),
+ m_clusterID(p->cluster_id),
+ m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
+ m_number_of_TBEs(p->number_of_TBEs),
+ m_transitions_per_cycle(p->transitions_per_cycle),
+ m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
+ memoryPort(csprintf("%s.memory", name()), this, ""),
+ m_responseFromMemory_ptr(new MessageBuffer())
{
- m_version = p->version;
- m_clusterID = p->cluster_id;
-
- m_transitions_per_cycle = p->transitions_per_cycle;
- m_buffer_size = p->buffer_size;
- m_recycle_latency = p->recycle_latency;
- m_number_of_TBEs = p->number_of_TBEs;
- m_is_blocking = false;
+ // Set the sender pointer of the response message buffer from the
+ // memory controller.
+ // This pointer is used for querying for the current time.
+ m_responseFromMemory_ptr->setSender(this);
+ m_responseFromMemory_ptr->setReceiver(this);
+ m_responseFromMemory_ptr->setOrdering(false);
if (m_version == 0) {
// Combine the statistics from all controllers
m_is_blocking = false;
}
}
+
+BaseMasterPort &
+AbstractController::getMasterPort(const std::string &if_name,
+ PortID idx)
+{
+ return memoryPort;
+}
+
+void
+AbstractController::queueMemoryRead(const MachineID &id, Address addr,
+ Cycles latency)
+{
+ RequestPtr req = new Request(addr.getAddress(),
+ RubySystem::getBlockSizeBytes(), 0,
+ m_masterId);
+
+ PacketPtr pkt = Packet::createRead(req);
+ uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
+ pkt->dataDynamic(newData);
+
+ SenderState *s = new SenderState(id);
+ pkt->pushSenderState(s);
+
+ memoryPort.schedTimingReq(pkt, clockEdge(latency));
+}
+
+void
+AbstractController::queueMemoryWrite(const MachineID &id, Address addr,
+ Cycles latency, const DataBlock &block)
+{
+ RequestPtr req = new Request(addr.getAddress(),
+ RubySystem::getBlockSizeBytes(), 0,
+ m_masterId);
+
+ PacketPtr pkt = Packet::createWrite(req);
+ uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
+ pkt->dataDynamic(newData);
+ memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
+ RubySystem::getBlockSizeBytes());
+
+ SenderState *s = new SenderState(id);
+ pkt->pushSenderState(s);
+
+ // Create a block and copy data from the block.
+ memoryPort.schedTimingReq(pkt, clockEdge(latency));
+}
+
+void
+AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr,
+ Cycles latency,
+ const DataBlock &block, int size)
+{
+ RequestPtr req = new Request(addr.getAddress(),
+ RubySystem::getBlockSizeBytes(), 0,
+ m_masterId);
+
+ PacketPtr pkt = Packet::createWrite(req);
+ uint8_t *newData = new uint8_t[size];
+ pkt->dataDynamic(newData);
+ memcpy(newData, block.getData(addr.getOffset(), size), size);
+
+ SenderState *s = new SenderState(id);
+ pkt->pushSenderState(s);
+
+ // Create a block and copy data from the block.
+ memoryPort.schedTimingReq(pkt, clockEdge(latency));
+}
+
+void
+AbstractController::functionalMemoryRead(PacketPtr pkt)
+{
+ memoryPort.sendFunctional(pkt);
+}
+
+int
+AbstractController::functionalMemoryWrite(PacketPtr pkt)
+{
+ int num_functional_writes = 0;
+
+ // Check the message buffer that runs from the memory to the controller.
+ num_functional_writes += m_responseFromMemory_ptr->functionalWrite(pkt);
+
+ // Check the buffer from the controller to the memory.
+ if (memoryPort.checkFunctional(pkt)) {
+ num_functional_writes++;
+ }
+
+ // Update memory itself.
+ memoryPort.sendFunctional(pkt);
+ return num_functional_writes + 1;
+}
+
+void
+AbstractController::recvTimingResp(PacketPtr pkt)
+{
+ assert(pkt->isResponse());
+
+ std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
+ (*msg).m_Addr.setAddress(pkt->getAddr());
+ (*msg).m_Sender = m_machineID;
+
+ SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
+ (*msg).m_OriginalRequestorMachId = s->id;
+ delete s;
+
+ if (pkt->isRead()) {
+ (*msg).m_Type = MemoryRequestType_MEMORY_READ;
+ (*msg).m_MessageSize = MessageSizeType_Response_Data;
+
+ // Copy data from the packet
+ (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
+ RubySystem::getBlockSizeBytes());
+ } else if (pkt->isWrite()) {
+ (*msg).m_Type = MemoryRequestType_MEMORY_WB;
+ (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
+ } else {
+ panic("Incorrect packet type received from memory controller!");
+ }
+
+ m_responseFromMemory_ptr->enqueue(msg);
+ delete pkt;
+}
+
+bool
+AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
+{
+ controller->recvTimingResp(pkt);
+ return true;
+}
+
+AbstractController::MemoryPort::MemoryPort(const std::string &_name,
+ AbstractController *_controller,
+ const std::string &_label)
+ : QueuedMasterPort(_name, _controller, _queue),
+ _queue(*_controller, *this, _label), controller(_controller)
+{
+}
/*
- * Copyright (c) 2009 Mark D. Hill and David A. Wood
+ * Copyright (c) 2009-2014 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/system/CacheRecorder.hh"
#include "mem/packet.hh"
+#include "mem/qport.hh"
#include "params/RubyController.hh"
-#include "sim/clocked_object.hh"
+#include "mem/mem_object.hh"
class Network;
-class AbstractController : public ClockedObject, public Consumer
+class AbstractController : public MemObject, public Consumer
{
public:
typedef RubyControllerParams Params;
//! These functions are used by ruby system to read/write the data blocks
//! that exist with in the controller.
virtual void functionalRead(const Address &addr, PacketPtr) = 0;
+ void functionalMemoryRead(PacketPtr);
//! The return value indicates the number of messages written with the
//! data from the packet.
- virtual uint32_t functionalWriteBuffers(PacketPtr&) = 0;
+ virtual int functionalWriteBuffers(PacketPtr&) = 0;
virtual int functionalWrite(const Address &addr, PacketPtr) = 0;
+ int functionalMemoryWrite(PacketPtr);
//! Function for enqueuing a prefetch request
virtual void enqueuePrefetch(const Address&, const RubyRequestType&)
//! Set the message buffer with given name.
virtual void setNetQueue(const std::string& name, MessageBuffer *b) = 0;
+ /** A function used to return the port associated with this bus object. */
+ BaseMasterPort& getMasterPort(const std::string& if_name,
+ PortID idx = InvalidPortID);
+
+ void queueMemoryRead(const MachineID &id, Address addr, Cycles latency);
+ void queueMemoryWrite(const MachineID &id, Address addr, Cycles latency,
+ const DataBlock &block);
+ void queueMemoryWritePartial(const MachineID &id, Address addr, Cycles latency,
+ const DataBlock &block, int size);
+ void recvTimingResp(PacketPtr pkt);
+
public:
MachineID getMachineID() const { return m_machineID; }
MachineID m_machineID;
NodeID m_clusterID;
+ // MasterID used by some components of gem5.
+ MasterID m_masterId;
+
Network* m_net_ptr;
bool m_is_blocking;
std::map<Address, MessageBuffer*> m_block_map;
StatsCallback(AbstractController *_ctr) : ctr(_ctr) {}
void process() {ctr->collateStats();}
};
+
+ /**
+ * Port that forwards requests and receives responses from the
+ * memory controller. It has a queue of packets not yet sent.
+ */
+ class MemoryPort : public QueuedMasterPort
+ {
+ private:
+ // Packet queue used to store outgoing requests and responses.
+ MasterPacketQueue _queue;
+
+ // Controller that operates this port.
+ AbstractController *controller;
+
+ public:
+ MemoryPort(const std::string &_name, AbstractController *_controller,
+ const std::string &_label);
+
+ // Function for receiving a timing response from the peer port.
+ // Currently the pkt is handed to the coherence controller
+ // associated with this port.
+ bool recvTimingResp(PacketPtr pkt);
+ };
+
+ /* Master port to the memory controller. */
+ MemoryPort memoryPort;
+
+ // Message Buffer for storing the response received from the
+ // memory controller.
+ MessageBuffer *m_responseFromMemory_ptr;
+
+ // State that is stored in packets sent to the memory controller.
+ struct SenderState : public Packet::SenderState
+ {
+ // Id of the machine from which the request originated.
+ MachineID id;
+
+ SenderState(MachineID _id) : id(_id)
+ {}
+ };
};
#endif // __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCONTROLLER_HH__
# Brad Beckmann
from m5.params import *
-from ClockedObject import ClockedObject
+from m5.proxy import *
+from MemObject import MemObject
-class RubyController(ClockedObject):
+class RubyController(MemObject):
type = 'RubyController'
cxx_class = 'AbstractController'
cxx_header = "mem/ruby/slicc_interface/AbstractController.hh"
number_of_TBEs = Param.Int(256, "")
ruby_system = Param.RubySystem("")
- peer = Param.RubyController(NULL, "")
+ memory = MasterPort("Port for attaching a memory controller")
+ system = Param.System(Parent.any, "system object parameter")
from m5.params import *
from m5.SimObject import SimObject
-from Controller import RubyController
class RubyCache(SimObject):
type = 'RubyCache'
cxx_header = "mem/ruby/structures/DirectoryMemory.hh"
version = Param.Int(0, "")
size = Param.MemorySize("1GB", "capacity in bytes")
- use_map = Param.Bool(False, "enable sparse memory")
- map_levels = Param.Int(4, "sparse memory map levels")
# the default value of the numa high bit is specified in the command line
# option and must be passed into the directory memory sim object
numa_high_bit = Param.Int("numa high bit")
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * Copyright (c) 2012 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "debug/RubyStats.hh"
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
-#include "mem/ruby/structures/MemoryControl.hh"
-#include "mem/ruby/system/System.hh"
-
-using namespace std;
-MemoryControl::MemoryControl(const Params *p)
- : ClockedObject(p), Consumer(this), m_event(this)
-{
- g_system_ptr->registerMemController(this);
-}
-
-MemoryControl::~MemoryControl() {};
-
-void
-MemoryControl::recordRequestType(MemoryControlRequestType request) {
- DPRINTF(RubyStats, "Recorded request: %s\n",
- MemoryControlRequestType_to_string(request));
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * Copyright (c) 2012 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __MEM_RUBY_STRUCTURES_ABSTRACT_MEMORY_CONTROL_HH__
-#define __MEM_RUBY_STRUCTURES_ABSTRACT_MEMORY_CONTROL_HH__
-
-#include <iostream>
-#include <list>
-#include <string>
-
-#include "mem/protocol/MemoryControlRequestType.hh"
-#include "mem/ruby/common/Consumer.hh"
-#include "mem/ruby/slicc_interface/Message.hh"
-#include "mem/ruby/structures/MemoryNode.hh"
-#include "params/MemoryControl.hh"
-#include "sim/clocked_object.hh"
-
-//////////////////////////////////////////////////////////////////////////////
-
-class MemoryControl : public ClockedObject, public Consumer
-{
- public:
- typedef MemoryControlParams Params;
- const Params *params() const
- { return dynamic_cast<const Params *>(_params); }
-
- MemoryControl(const Params *p);
- virtual void init() = 0;
- virtual void reset() = 0;
-
- ~MemoryControl();
-
- virtual void wakeup() = 0;
-
- virtual void setConsumer(Consumer* consumer_ptr) = 0;
- virtual Consumer* getConsumer() = 0;
- virtual void setClockObj(ClockedObject* consumer_ptr) {}
-
- virtual void setDescription(const std::string& name) = 0;
- virtual std::string getDescription() = 0;
-
- // Called from the directory:
- virtual void enqueue(const MsgPtr& message, Cycles latency) = 0;
- virtual void enqueueMemRef(MemoryNode *memRef) = 0;
- virtual void dequeue() = 0;
- virtual const Message* peek() = 0;
- virtual MemoryNode *peekNode() = 0;
- virtual bool isReady() = 0;
- virtual bool areNSlotsAvailable(int n) = 0; // infinite queue length
-
- virtual void print(std::ostream& out) const = 0;
- virtual void regStats() {};
-
- virtual const int getChannel(const physical_address_t addr) const = 0;
- virtual const int getBank(const physical_address_t addr) const = 0;
- virtual const int getRank(const physical_address_t addr) const = 0;
- virtual const int getRow(const physical_address_t addr) const = 0;
-
- //added by SS
- virtual int getBanksPerRank() = 0;
- virtual int getRanksPerDimm() = 0;
- virtual int getDimmsPerChannel() = 0;
-
- virtual void recordRequestType(MemoryControlRequestType requestType);
-
- virtual bool functionalRead(Packet *pkt)
- { fatal("Functional read access not implemented!");}
- virtual uint32_t functionalWrite(Packet *pkt)
- { fatal("Functional read access not implemented!");}
-
-protected:
- class MemCntrlEvent : public Event
- {
- public:
- MemCntrlEvent(MemoryControl* _mem_cntrl)
- {
- mem_cntrl = _mem_cntrl;
- }
- private:
- void process() { mem_cntrl->wakeup(); }
-
- MemoryControl* mem_cntrl;
- };
-
- MemCntrlEvent m_event;
-};
-
-#endif // __MEM_RUBY_STRUCTURES_ABSTRACT_MEMORY_CONTROL_HH__
+++ /dev/null
-# Copyright (c) 2009 Advanced Micro Devices, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met: redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer;
-# redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution;
-# neither the name of the copyright holders nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Authors: Steve Reinhardt
-# Brad Beckmann
-
-from m5.params import *
-from ClockedObject import ClockedObject
-
-class MemoryControl(ClockedObject):
- abstract = True
- type = 'MemoryControl'
- cxx_class = 'MemoryControl'
- cxx_header = "mem/ruby/structures/MemoryControl.hh"
- version = Param.Int("");
- ruby_system = Param.RubySystem("")
out << "[";
out << m_time << ", ";
out << m_msg_counter << ", ";
- out << m_msgptr << "; ";
+ out << pkt << "; ";
out << "]";
}
{
public:
// old constructor
- MemoryNode(const Cycles& time, int counter, const MsgPtr& msgptr,
+ MemoryNode(const Cycles& time, int counter, const PacketPtr p,
const physical_address_t addr, const bool is_mem_read)
- : m_time(time)
+ : m_time(time), pkt(p)
{
m_msg_counter = counter;
- m_msgptr = msgptr;
m_addr = addr;
m_is_mem_read = is_mem_read;
m_is_dirty_wb = !is_mem_read;
}
// new constructor
- MemoryNode(const Cycles& time, const MsgPtr& msgptr,
+ MemoryNode(const Cycles& time, const PacketPtr p,
const physical_address_t addr, const bool is_mem_read,
const bool is_dirty_wb)
- : m_time(time)
+ : m_time(time), pkt(p)
{
m_msg_counter = 0;
- m_msgptr = msgptr;
m_addr = addr;
m_is_mem_read = is_mem_read;
m_is_dirty_wb = is_dirty_wb;
Cycles m_time;
int m_msg_counter;
- MsgPtr m_msgptr;
+ PacketPtr pkt;
physical_address_t m_addr;
bool m_is_mem_read;
bool m_is_dirty_wb;
+++ /dev/null
-/*
- * Copyright (c) 2009 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __MEM_RUBY_STRUCTURES_MEMORYVECTOR_HH__
-#define __MEM_RUBY_STRUCTURES_MEMORYVECTOR_HH__
-
-#include "base/trace.hh"
-#include "debug/RubyCacheTrace.hh"
-#include "mem/ruby/common/Address.hh"
-
-class DirectoryMemory;
-
-/**
- * MemoryVector holds memory data (DRAM only)
- */
-class MemoryVector
-{
- public:
- MemoryVector();
- MemoryVector(uint64 size);
- ~MemoryVector();
- friend class DirectoryMemory;
-
- void resize(uint64 size); // destructive
-
- void write(const Address & paddr, uint8_t *data, int len);
- uint8_t *read(const Address & paddr, uint8_t *data, int len);
- uint32_t collatePages(uint8_t *&raw_data);
- void populatePages(uint8_t *raw_data);
-
- private:
- uint8_t *getBlockPtr(const PhysAddress & addr);
-
- uint64 m_size;
- uint8_t **m_pages;
- uint32_t m_num_pages;
- const uint32_t m_page_offset_mask;
- static const uint32_t PAGE_SIZE = 4096;
-};
-
-inline
-MemoryVector::MemoryVector()
- : m_page_offset_mask(4095)
-{
- m_size = 0;
- m_num_pages = 0;
- m_pages = NULL;
-}
-
-inline
-MemoryVector::MemoryVector(uint64 size)
- : m_page_offset_mask(4095)
-{
- resize(size);
-}
-
-inline
-MemoryVector::~MemoryVector()
-{
- for (int i = 0; i < m_num_pages; i++) {
- if (m_pages[i] != 0) {
- delete [] m_pages[i];
- }
- }
- delete [] m_pages;
-}
-
-inline void
-MemoryVector::resize(uint64 size)
-{
- if (m_pages != NULL){
- for (int i = 0; i < m_num_pages; i++) {
- if (m_pages[i] != 0) {
- delete [] m_pages[i];
- }
- }
- delete [] m_pages;
- }
- m_size = size;
- assert(size%PAGE_SIZE == 0);
- m_num_pages = size >> 12;
- m_pages = new uint8_t*[m_num_pages];
- memset(m_pages, 0, m_num_pages * sizeof(uint8_t*));
-}
-
-inline void
-MemoryVector::write(const Address & paddr, uint8_t *data, int len)
-{
- assert(paddr.getAddress() + len <= m_size);
- uint32_t page_num = paddr.getAddress() >> 12;
- if (m_pages[page_num] == 0) {
- bool all_zeros = true;
- for (int i = 0; i < len;i++) {
- if (data[i] != 0) {
- all_zeros = false;
- break;
- }
- }
- if (all_zeros)
- return;
- m_pages[page_num] = new uint8_t[PAGE_SIZE];
- memset(m_pages[page_num], 0, PAGE_SIZE);
- uint32_t offset = paddr.getAddress() & m_page_offset_mask;
- memcpy(&m_pages[page_num][offset], data, len);
- } else {
- memcpy(&m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
- data, len);
- }
-}
-
-inline uint8_t*
-MemoryVector::read(const Address & paddr, uint8_t *data, int len)
-{
- assert(paddr.getAddress() + len <= m_size);
- uint32_t page_num = paddr.getAddress() >> 12;
- if (m_pages[page_num] == 0) {
- memset(data, 0, len);
- } else {
- memcpy(data, &m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
- len);
- }
- return data;
-}
-
-inline uint8_t*
-MemoryVector::getBlockPtr(const PhysAddress & paddr)
-{
- uint32_t page_num = paddr.getAddress() >> 12;
- if (m_pages[page_num] == 0) {
- m_pages[page_num] = new uint8_t[PAGE_SIZE];
- memset(m_pages[page_num], 0, PAGE_SIZE);
- }
- return &m_pages[page_num][paddr.getAddress()&m_page_offset_mask];
-}
-
-/*!
- * Function for collating all the pages of the physical memory together.
- * In case a pointer for a page is NULL, this page needs only a single byte
- * to represent that the pointer is NULL. Otherwise, it needs 1 + PAGE_SIZE
- * bytes. The first represents that the page pointer is not NULL, and rest of
- * the bytes represent the data on the page.
- */
-
-inline uint32_t
-MemoryVector::collatePages(uint8_t *&raw_data)
-{
- uint32_t num_zero_pages = 0;
- uint32_t data_size = 0;
-
- for (uint32_t i = 0;i < m_num_pages; ++i)
- {
- if (m_pages[i] == 0) num_zero_pages++;
- }
-
- raw_data = new uint8_t[sizeof(uint32_t) /* number of pages*/ +
- m_num_pages /* whether the page is all zeros */ +
- PAGE_SIZE * (m_num_pages - num_zero_pages)];
-
- /* Write the number of pages to be stored. */
- memcpy(raw_data, &m_num_pages, sizeof(uint32_t));
- data_size = sizeof(uint32_t);
-
- DPRINTF(RubyCacheTrace, "collating %d pages\n", m_num_pages);
-
- for (uint32_t i = 0;i < m_num_pages; ++i)
- {
- if (m_pages[i] == 0) {
- raw_data[data_size] = 0;
- } else {
- raw_data[data_size] = 1;
- memcpy(raw_data + data_size + 1, m_pages[i], PAGE_SIZE);
- data_size += PAGE_SIZE;
- }
- data_size += 1;
- }
-
- return data_size;
-}
-
-/*!
- * Function for populating the pages of the memory using the available raw
- * data. Each page has a byte associate with it, which represents whether the
- * page was NULL or not, when all the pages were collated. The function assumes
- * that the number of pages in the memory are same as those that were recorded
- * in the checkpoint.
- */
-inline void
-MemoryVector::populatePages(uint8_t *raw_data)
-{
- uint32_t data_size = 0;
- uint32_t num_pages = 0;
-
- /* Read the number of pages that were stored. */
- memcpy(&num_pages, raw_data, sizeof(uint32_t));
- data_size = sizeof(uint32_t);
- assert(num_pages == m_num_pages);
-
- DPRINTF(RubyCacheTrace, "Populating %d pages\n", num_pages);
-
- for (uint32_t i = 0;i < m_num_pages; ++i)
- {
- assert(m_pages[i] == 0);
- if (raw_data[data_size] != 0) {
- m_pages[i] = new uint8_t[PAGE_SIZE];
- memcpy(m_pages[i], raw_data + data_size + 1, PAGE_SIZE);
- data_size += PAGE_SIZE;
- }
- data_size += 1;
- }
-}
-
-#endif // __MEM_RUBY_STRUCTURES_MEMORYVECTOR_HH__
// CONSTRUCTOR
RubyMemoryControl::RubyMemoryControl(const Params *p)
- : MemoryControl(p)
+ : AbstractMemory(p), Consumer(this), port(name() + ".port", *this),
+ m_event(this)
{
m_banks_per_rank = p->banks_per_rank;
m_ranks_per_dimm = p->ranks_per_dimm;
void
RubyMemoryControl::init()
{
- m_ram = g_system_ptr->getMemoryVector();
m_msg_counter = 0;
-
assert(m_tFaw <= 62); // must fit in a uint64 shift register
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
}
}
+BaseSlavePort&
+RubyMemoryControl::getSlavePort(const string &if_name, PortID idx)
+{
+ if (if_name != "port") {
+ return MemObject::getSlavePort(if_name, idx);
+ } else {
+ return port;
+ }
+}
+
void
RubyMemoryControl::reset()
{
}
// enqueue new request from directory
-void
-RubyMemoryControl::enqueue(const MsgPtr& message, Cycles latency)
+bool
+RubyMemoryControl::recvTimingReq(PacketPtr pkt)
{
- Cycles arrival_time = curCycle() + latency;
- const MemoryMsg* memMess = safe_cast<const MemoryMsg*>(message.get());
- physical_address_t addr = memMess->getAddr().getAddress();
- MemoryRequestType type = memMess->getType();
- bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
-
- if (is_mem_read) {
- m_ram->read(memMess->getAddr(), const_cast<uint8_t *>(
- memMess->getDataBlk().getData(0,
- RubySystem::getBlockSizeBytes())),
- RubySystem::getBlockSizeBytes());
- } else {
- m_ram->write(memMess->getAddr(), const_cast<uint8_t *>(
- memMess->getDataBlk().getData(0,
- RubySystem::getBlockSizeBytes())),
- RubySystem::getBlockSizeBytes());
- }
-
- MemoryNode *thisReq = new MemoryNode(arrival_time, message, addr,
+ Cycles arrival_time = curCycle();
+ physical_address_t addr = pkt->getAddr();
+ bool is_mem_read = pkt->isRead();
+
+ access(pkt);
+ MemoryNode *thisReq = new MemoryNode(arrival_time, pkt, addr,
is_mem_read, !is_mem_read);
enqueueMemRef(thisReq);
+ return true;
}
// Alternate entry point used when we already have a MemoryNode
}
}
-// dequeue, peek, and isReady are used to transfer completed requests
-// back to the directory
-void
-RubyMemoryControl::dequeue()
-{
- assert(isReady());
- MemoryNode *req = m_response_queue.front();
- m_response_queue.pop_front();
- delete req;
-}
-
-const Message*
-RubyMemoryControl::peek()
-{
- MemoryNode *node = peekNode();
- Message* msg_ptr = node->m_msgptr.get();
- assert(msg_ptr != NULL);
- return msg_ptr;
-}
-
-MemoryNode *
-RubyMemoryControl::peekNode()
-{
- assert(isReady());
- MemoryNode *req = m_response_queue.front();
- DPRINTF(RubyMemory, "Peek: memory request%7d: %#08x %c sched %c\n",
- req->m_msg_counter, req->m_addr, req->m_is_mem_read ? 'R':'W',
- m_event.scheduled() ? 'Y':'N');
-
- return req;
-}
-
-bool
-RubyMemoryControl::isReady()
-{
- return ((!m_response_queue.empty()) &&
- (m_response_queue.front()->m_time <= g_system_ptr->curCycle()));
-}
-
-void
-RubyMemoryControl::setConsumer(Consumer* consumer_ptr)
-{
- m_consumer_ptr = consumer_ptr;
-}
-
void
RubyMemoryControl::print(ostream& out) const
{
RubyMemoryControl::enqueueToDirectory(MemoryNode *req, Cycles latency)
{
Tick arrival_time = clockEdge(latency);
- Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time);
- req->m_time = ruby_arrival_time;
- m_response_queue.push_back(req);
+ PacketPtr pkt = req->pkt;
+
+ // access already turned the packet into a response
+ assert(pkt->isResponse());
+
+ // queue the packet in the response queue to be sent out after
+ // the static latency has passed
+ port.schedTimingResp(pkt, arrival_time);
DPRINTF(RubyMemory, "Enqueueing msg %#08x %c back to directory at %15d\n",
req->m_addr, req->m_is_mem_read ? 'R':'W', arrival_time);
-
- // schedule the wake up
- m_consumer_ptr->scheduleEventAbsolute(arrival_time);
}
// getBank returns an integer that is unique for each
req->m_is_mem_read? 'R':'W',
bank, m_event.scheduled() ? 'Y':'N');
- if (req->m_msgptr) { // don't enqueue L3 writebacks
- enqueueToDirectory(req, Cycles(m_mem_ctl_latency + m_mem_fixed_delay));
- }
+ enqueueToDirectory(req, Cycles(m_mem_ctl_latency + m_mem_fixed_delay));
+
m_oldRequest[bank] = 0;
markTfaw(rank);
m_bankBusyCounter[bank] = m_bank_busy_time;
{
for (std::list<MemoryNode *>::iterator it = m_input_queue.begin();
it != m_input_queue.end(); ++it) {
- Message* msg_ptr = (*it)->m_msgptr.get();
- if (msg_ptr->functionalRead(pkt)) {
+ PacketPtr msg = (*it)->pkt;
+ if (pkt->checkFunctional(msg)) {
return true;
}
}
for (std::list<MemoryNode *>::iterator it = m_response_queue.begin();
it != m_response_queue.end(); ++it) {
- Message* msg_ptr = (*it)->m_msgptr.get();
- if (msg_ptr->functionalRead(pkt)) {
+ PacketPtr msg = (*it)->pkt;
+ if (pkt->checkFunctional(msg)) {
return true;
}
}
for (uint32_t bank = 0; bank < m_total_banks; ++bank) {
for (std::list<MemoryNode *>::iterator it = m_bankQueues[bank].begin();
it != m_bankQueues[bank].end(); ++it) {
- Message* msg_ptr = (*it)->m_msgptr.get();
- if (msg_ptr->functionalRead(pkt)) {
+ PacketPtr msg = (*it)->pkt;
+ if (pkt->checkFunctional(msg)) {
return true;
}
}
}
- m_ram->read(Address(pkt->getAddr()), pkt->getPtr<uint8_t>(true),
- pkt->getSize());
-
+ functionalAccess(pkt);
return true;
}
for (std::list<MemoryNode *>::iterator it = m_input_queue.begin();
it != m_input_queue.end(); ++it) {
- Message* msg_ptr = (*it)->m_msgptr.get();
- if (msg_ptr->functionalWrite(pkt)) {
+ PacketPtr msg = (*it)->pkt;
+ if (pkt->checkFunctional(msg)) {
num_functional_writes++;
}
}
for (std::list<MemoryNode *>::iterator it = m_response_queue.begin();
it != m_response_queue.end(); ++it) {
- Message* msg_ptr = (*it)->m_msgptr.get();
- if (msg_ptr->functionalWrite(pkt)) {
+ PacketPtr msg = (*it)->pkt;
+ if (pkt->checkFunctional(msg)) {
num_functional_writes++;
}
}
for (uint32_t bank = 0; bank < m_total_banks; ++bank) {
for (std::list<MemoryNode *>::iterator it = m_bankQueues[bank].begin();
it != m_bankQueues[bank].end(); ++it) {
- Message* msg_ptr = (*it)->m_msgptr.get();
- if (msg_ptr->functionalWrite(pkt)) {
+ PacketPtr msg = (*it)->pkt;
+ if (pkt->checkFunctional(msg)) {
num_functional_writes++;
}
}
}
- m_ram->write(Address(pkt->getAddr()), pkt->getPtr<uint8_t>(true),
- pkt->getSize());
+ functionalAccess(pkt);
num_functional_writes++;
-
return num_functional_writes;
}
RubyMemoryControl::regStats()
{
m_profiler_ptr->regStats();
+ AbstractMemory::regStats();
}
RubyMemoryControl *
{
return new RubyMemoryControl(this);
}
+
+RubyMemoryControl::MemoryPort::MemoryPort(const std::string& name,
+ RubyMemoryControl& _memory)
+ : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
+ memory(_memory)
+{ }
+
+AddrRangeList
+RubyMemoryControl::MemoryPort::getAddrRanges() const
+{
+ AddrRangeList ranges;
+ ranges.push_back(memory.getAddrRange());
+ return ranges;
+}
+
+void
+RubyMemoryControl::MemoryPort::recvFunctional(PacketPtr pkt)
+{
+ pkt->pushLabel(memory.name());
+
+ if (!queue.checkFunctional(pkt)) {
+ // Default implementation of SimpleTimingPort::recvFunctional()
+ // calls recvAtomic() and throws away the latency; we can save a
+ // little here by just not calculating the latency.
+ memory.functionalWrite(pkt);
+ }
+
+ pkt->popLabel();
+}
+
+Tick
+RubyMemoryControl::MemoryPort::recvAtomic(PacketPtr pkt)
+{
+ panic("This controller does not support recv atomic!\n");
+}
+
+bool
+RubyMemoryControl::MemoryPort::recvTimingReq(PacketPtr pkt)
+{
+ // pass it to the memory controller
+ return memory.recvTimingReq(pkt);
+}
#include <list>
#include <string>
+#include "mem/abstract_mem.hh"
#include "mem/protocol/MemoryMsg.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/profiler/MemCntrlProfiler.hh"
-#include "mem/ruby/structures/MemoryControl.hh"
-#include "mem/ruby/structures/MemoryVector.hh"
+#include "mem/ruby/structures/MemoryNode.hh"
#include "mem/ruby/system/System.hh"
#include "params/RubyMemoryControl.hh"
//////////////////////////////////////////////////////////////////////////////
-class RubyMemoryControl : public MemoryControl
+class RubyMemoryControl : public AbstractMemory, public Consumer
{
public:
typedef RubyMemoryControlParams Params;
~RubyMemoryControl();
+ virtual BaseSlavePort& getSlavePort(const std::string& if_name,
+ PortID idx = InvalidPortID);
unsigned int drain(DrainManager *dm);
-
void wakeup();
- void setConsumer(Consumer* consumer_ptr);
- Consumer* getConsumer() { return m_consumer_ptr; };
void setDescription(const std::string& name) { m_description = name; };
std::string getDescription() { return m_description; };
// Called from the directory:
- void enqueue(const MsgPtr& message, Cycles latency);
+ bool recvTimingReq(PacketPtr pkt);
+ void recvFunctional(PacketPtr pkt);
void enqueueMemRef(MemoryNode *memRef);
- void dequeue();
- const Message* peek();
- MemoryNode *peekNode();
- bool isReady();
bool areNSlotsAvailable(int n) { return true; }; // infinite queue length
void print(std::ostream& out) const;
RubyMemoryControl (const RubyMemoryControl& obj);
RubyMemoryControl& operator=(const RubyMemoryControl& obj);
+ private:
+ // For now, make use of a queued slave port to avoid dealing with
+ // flow control for the responses being sent back
+ class MemoryPort : public QueuedSlavePort
+ {
+ SlavePacketQueue queue;
+ RubyMemoryControl& memory;
+
+ public:
+ MemoryPort(const std::string& name, RubyMemoryControl& _memory);
+
+ protected:
+ Tick recvAtomic(PacketPtr pkt);
+
+ void recvFunctional(PacketPtr pkt);
+
+ bool recvTimingReq(PacketPtr);
+
+ virtual AddrRangeList getAddrRanges() const;
+ };
+
+ /**
+ * Our incoming port, for a multi-ported controller add a crossbar
+ * in front of it
+ */
+ MemoryPort port;
+
// data members
- Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
std::string m_description;
int m_msg_counter;
MemCntrlProfiler* m_profiler_ptr;
- // Actual physical memory.
- MemoryVector* m_ram;
+ class MemCntrlEvent : public Event
+ {
+ public:
+ MemCntrlEvent(RubyMemoryControl* _mem_cntrl)
+ {
+ mem_cntrl = _mem_cntrl;
+ }
+ private:
+ void process() { mem_cntrl->wakeup(); }
+
+ RubyMemoryControl* mem_cntrl;
+ };
+
+ MemCntrlEvent m_event;
};
std::ostream& operator<<(std::ostream& out, const RubyMemoryControl& obj);
# Brad Beckmann
from m5.params import *
-from m5.SimObject import SimObject
-from MemoryControl import MemoryControl
+from AbstractMemory import AbstractMemory
-class RubyMemoryControl(MemoryControl):
+class RubyMemoryControl(AbstractMemory):
type = 'RubyMemoryControl'
cxx_class = 'RubyMemoryControl'
cxx_header = "mem/ruby/structures/RubyMemoryControl.hh"
- version = Param.Int("");
banks_per_rank = Param.Int(8, "");
ranks_per_dimm = Param.Int(2, "");
tFaw = Param.Int(0, "");
mem_random_arbitrate = Param.Int(0, "");
mem_fixed_delay = Param.Cycles(0, "");
+
+ # single-ported on the system interface side, instantiate with a
+ # crossbar in front of the controller for multiple ports
+ port = SlavePort("Slave port")
SimObject('Cache.py')
SimObject('DirectoryMemory.py')
-SimObject('MemoryControl.py')
SimObject('RubyMemoryControl.py')
SimObject('RubyPrefetcher.py')
SimObject('WireBuffer.py')
Source('DirectoryMemory.cc')
Source('CacheMemory.cc')
-Source('MemoryControl.cc')
Source('WireBuffer.cc')
Source('RubyMemoryControl.cc')
Source('MemoryNode.cc')
"insert random delays on message enqueue times");
block_size_bytes = Param.UInt32(64,
"default cache block size; must be a power of two");
- mem_size = Param.MemorySize("total memory size of the system");
- no_mem_vec = Param.Bool(False, "do not allocate Ruby's mem vector");
+ memory_size_bits = Param.UInt32(64,
+ "number of bits that a memory address requires");
# Profiler related configuration variables
hot_lines = Param.Bool(False, "")
class RubyPortProxy(RubyPort):
type = 'RubyPortProxy'
cxx_header = "mem/ruby/system/RubyPortProxy.hh"
- access_phys_mem = True
+ access_phys_mem = False
class RubySequencer(RubyPort):
type = 'RubySequencer'
bool RubySystem::m_randomization;
uint32_t RubySystem::m_block_size_bytes;
uint32_t RubySystem::m_block_size_bits;
-uint64_t RubySystem::m_memory_size_bytes;
uint32_t RubySystem::m_memory_size_bits;
RubySystem::RubySystem(const Params *p)
m_block_size_bytes = p->block_size_bytes;
assert(isPowerOf2(m_block_size_bytes));
m_block_size_bits = floorLog2(m_block_size_bytes);
-
- m_memory_size_bytes = p->mem_size;
- if (m_memory_size_bytes == 0) {
- m_memory_size_bits = 0;
- } else {
- m_memory_size_bits = ceilLog2(m_memory_size_bytes);
- }
-
- if (p->no_mem_vec) {
- m_mem_vec = NULL;
- } else {
- m_mem_vec = new MemoryVector;
- m_mem_vec->resize(m_memory_size_bytes);
- }
+ m_memory_size_bits = p->memory_size_bits;
m_warmup_enabled = false;
m_cooldown_enabled = false;
g_abs_controls[id.getType()][id.getNum()] = cntrl;
}
-void
-RubySystem::registerMemController(MemoryControl *mc) {
- m_memory_controller_vec.push_back(mc);
-}
-
RubySystem::~RubySystem()
{
delete m_network;
delete m_profiler;
- if (m_mem_vec)
- delete m_mem_vec;
}
void
// Restore curTick
setCurTick(curtick_original);
- uint8_t *raw_data = NULL;
- uint64 memory_trace_size = m_mem_vec->collatePages(raw_data);
-
- string memory_trace_file = name() + ".memory.gz";
- writeCompressedTrace(raw_data, memory_trace_file,
- memory_trace_size);
-
- SERIALIZE_SCALAR(memory_trace_file);
- SERIALIZE_SCALAR(memory_trace_size);
-
-
// Aggergate the trace entries together into a single array
- raw_data = new uint8_t[4096];
+ uint8_t *raw_data = new uint8_t[4096];
uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
4096);
string cache_trace_file = name() + ".cache.gz";
uint64 block_size_bytes = getBlockSizeBytes();
UNSERIALIZE_OPT_SCALAR(block_size_bytes);
- if (m_mem_vec != NULL) {
- string memory_trace_file;
- uint64 memory_trace_size = 0;
-
- UNSERIALIZE_SCALAR(memory_trace_file);
- UNSERIALIZE_SCALAR(memory_trace_size);
- memory_trace_file = cp->cptDir + "/" + memory_trace_file;
-
- readCompressedTrace(memory_trace_file, uncompressed_trace,
- memory_trace_size);
- m_mem_vec->populatePages(uncompressed_trace);
-
- delete [] uncompressed_trace;
- uncompressed_trace = NULL;
- }
-
string cache_trace_file;
uint64 cache_trace_size = 0;
m_cache_recorder = NULL;
m_warmup_enabled = false;
- // reset DRAM so that it's not waiting for events on the old event
- // queue
- for (int i = 0; i < m_memory_controller_vec.size(); ++i) {
- m_memory_controller_vec[i]->reset();
- }
-
// Restore eventq head
eventq_head = eventq->replaceHead(eventq_head);
// Restore curTick and Ruby System's clock
#include "base/output.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
-#include "mem/ruby/structures/MemoryControl.hh"
-#include "mem/ruby/structures/MemoryVector.hh"
#include "mem/ruby/system/CacheRecorder.hh"
#include "mem/packet.hh"
#include "params/RubySystem.hh"
static int getRandomization() { return m_randomization; }
static uint32_t getBlockSizeBytes() { return m_block_size_bytes; }
static uint32_t getBlockSizeBits() { return m_block_size_bits; }
- static uint64_t getMemorySizeBytes() { return m_memory_size_bytes; }
static uint32_t getMemorySizeBits() { return m_memory_size_bits; }
// Public Methods
return m_profiler;
}
- MemoryVector*
- getMemoryVector()
- {
- assert(m_mem_vec != NULL);
- return m_mem_vec;
- }
-
void regStats() { m_profiler->regStats(name()); }
void collateStats() { m_profiler->collateStats(); }
void resetStats();
void registerNetwork(Network*);
void registerAbstractController(AbstractController*);
- void registerMemController(MemoryControl *mc);
bool eventQueueEmpty() { return eventq->empty(); }
void enqueueRubyEvent(Tick tick)
static bool m_randomization;
static uint32_t m_block_size_bytes;
static uint32_t m_block_size_bits;
- static uint64_t m_memory_size_bytes;
static uint32_t m_memory_size_bits;
Network* m_network;
- std::vector<MemoryControl *> m_memory_controller_vec;
std::vector<AbstractController *> m_abs_cntrl_vec;
public:
Profiler* m_profiler;
- MemoryVector* m_mem_vec;
bool m_warmup_enabled;
bool m_cooldown_enabled;
CacheRecorder* m_cache_recorder;
void recordCacheTrace(int cntrl, CacheRecorder* tr);
Sequencer* getSequencer() const;
- uint32_t functionalWriteBuffers(PacketPtr&);
+ int functionalWriteBuffers(PacketPtr&);
void countTransition(${ident}_State state, ${ident}_Event event);
void possibleTransition(${ident}_State state, ${ident}_Event event);
# Function for functional writes to messages buffered in the controller
code('''
-uint32_t
+int
$c_ident::functionalWriteBuffers(PacketPtr& pkt)
{
- uint32_t num_functional_writes = 0;
+ int num_functional_writes = 0;
''')
for var in self.objects:
vtype = var.type
ac1 = dynamic_cast<AbstractController*>(o1);
ac2 = dynamic_cast<AbstractController*>(o2);
- if (ac1 || ac2) {
+ if ((ac1 || ac2) && name1 != "memory" && name2 != "memory") {
MessageBuffer *b = new MessageBuffer();
// set the message buffer associated with the provided names
# system simulated
system = System(cpu = cpus,
funcmem = SimpleMemory(in_addr_map = False),
- physmem = SimpleMemory(null = True),
funcbus = NoncoherentXBar())
# Dummy voltage domain for all our clock domains
system.voltage_domain = VoltageDomain()
cpu.interrupts.int_master = system.ruby._cpu_ports[i].slave
cpu.interrupts.int_slave = system.ruby._cpu_ports[i].master
-system.physmem = [SimpleMemory(range = r, null = True)
- for r in system.mem_ranges]
-
root = Root(full_system = True, system = system)
m5.ticks.setGlobalFrequency('1THz')
# We set the testers as cpu for ruby to find the correct clock domains
# for the L1 Objects.
-system = System(cpu = tester, physmem = SimpleMemory(null = True))
+system = System(cpu = tester)
# Dummy voltage domain for all our clock domains
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
options.num_cpus = nb_cores
# system simulated
-system = System(cpu = cpus, physmem = SimpleMemory(),
- clk_domain = SrcClockDomain(clock = '1GHz'))
+system = System(cpu = cpus, clk_domain = SrcClockDomain(clock = '1GHz'))
# Create a seperate clock domain for components that should run at
# CPUs frequency
# this is a uniprocessor only test
options.num_cpus = 1
-
cpu = TimingSimpleCPU(cpu_id=0)
-system = System(cpu = cpu, physmem = SimpleMemory(null = True))
+system = System(cpu = cpu)
+
# Dummy voltage domain for all our clock domains
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.clk_domain = SrcClockDomain(clock = '1GHz',