def bp_names():
"""Return a list of valid Branch Predictor names."""
- return _bp_classes.keys()
+ return list(_bp_classes.keys())
# Add all BPs in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_bp_class):
None, 'android-ics')]
}
-benchs = Benchmarks.keys()
+benchs = list(Benchmarks.keys())
benchs.sort()
DefinedBenchmarks = ", ".join(benchs)
if options.memchecker:
system.memchecker = MemChecker()
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
if options.caches:
icache = icache_class(size=options.l1i_size,
assoc=options.l1i_assoc)
def cpu_names():
"""Return a list of valid CPU names."""
- return _cpu_classes.keys()
+ return list(_cpu_classes.keys())
def config_etrace(cpu_cls, cpu_list, options):
if issubclass(cpu_cls, m5.objects.DerivO3CPU):
# Set up the Intel MP table
base_entries = []
ext_entries = []
- for i in xrange(numCPUs):
+ for i in range(numCPUs):
bp = X86IntelMPProcessor(
local_apic_id = i,
local_apic_version = 0x14,
def create_TLB_Coalescer(options, my_level, my_index, TLB_name, Coalescer_name):
# arguments: options, TLB level, number of private structures for this Level,
# TLB name and Coalescer name
- for i in xrange(my_index):
+ for i in range(my_index):
TLB_name.append(eval(TLB_constructor(my_level)))
Coalescer_name.append(eval(Coalescer_constructor(my_level)))
# Create the hiearchy
# Call the appropriate constructors and add objects to the system
- for i in xrange(len(TLB_hierarchy)):
+ for i in range(len(TLB_hierarchy)):
hierarchy_level = TLB_hierarchy[i]
level = i+1
for TLB_type in hierarchy_level:
# Each TLB is connected with its Coalescer through a single port.
# There is a one-to-one mapping of TLBs to Coalescers at a given level
# This won't be modified no matter what the hierarchy looks like.
- for i in xrange(len(TLB_hierarchy)):
+ for i in range(len(TLB_hierarchy)):
hierarchy_level = TLB_hierarchy[i]
level = i+1
for TLB_type in hierarchy_level:
name = TLB_type['name']
num_TLBs = TLB_type['width']
if name == 'l1': # L1 D-TLBs
- tlb_per_cu = num_TLBs / n_cu
+ tlb_per_cu = num_TLBs // n_cu
for cu_idx in range(n_cu):
if tlb_per_cu:
for tlb in range(tlb_per_cu):
num_lanes=opt.num_lanes_per_link,
link_speed=opt.serial_link_speed,
delay=opt.total_ctrl_latency) for i in
- xrange(opt.num_serial_links)]
+ range(opt.num_serial_links)]
system.hmc_host.seriallink = sl
# enable global monitor
if opt.enable_global_monitor:
system.hmc_host.lmonitor = [CommMonitor() for i in
- xrange(opt.num_serial_links)]
+ range(opt.num_serial_links)]
# set the clock frequency for serial link
- for i in xrange(opt.num_serial_links):
+ for i in range(opt.num_serial_links):
clk = opt.link_controller_frequency
vd = VoltageDomain(voltage='1V')
scd = SrcClockDomain(clock=clk, voltage_domain=vd)
hh = system.hmc_host
if opt.arch == "distributed":
mb = system.membus
- for i in xrange(opt.num_links_controllers):
+ for i in range(opt.num_links_controllers):
if opt.enable_global_monitor:
mb.master = hh.lmonitor[i].slave
hh.lmonitor[i].master = hh.seriallink[i].slave
mb.master = hh.seriallink[1].slave
if opt.arch == "same":
- for i in xrange(opt.num_links_controllers):
+ for i in range(opt.num_links_controllers):
if opt.enable_global_monitor:
hh.lmonitor[i].master = hh.seriallink[i].slave
system.mem_ranges = addr_ranges_vaults
if opt.enable_link_monitor:
- lm = [CommMonitor() for i in xrange(opt.num_links_controllers)]
+ lm = [CommMonitor() for i in range(opt.num_links_controllers)]
system.hmc_dev.lmonitor = lm
# 4 HMC Crossbars located in its logic-base (LoB)
frontend_latency=opt.xbar_frontend_latency,
forward_latency=opt.xbar_forward_latency,
response_latency=opt.xbar_response_latency) for i in
- xrange(opt.number_mem_crossbar)]
+ range(opt.number_mem_crossbar)]
system.hmc_dev.xbar = xb
- for i in xrange(opt.number_mem_crossbar):
+ for i in range(opt.number_mem_crossbar):
clk = opt.xbar_frequency
vd = VoltageDomain(voltage='1V')
scd = SrcClockDomain(clock=clk, voltage_domain=vd)
system.hmc_dev.xbar[i].clk_domain = scd
# Attach 4 serial link to 4 crossbar/s
- for i in xrange(opt.num_serial_links):
+ for i in range(opt.num_serial_links):
if opt.enable_link_monitor:
system.hmc_host.seriallink[i].master = \
system.hmc_dev.lmonitor[i].slave
# create a list of buffers
system.hmc_dev.buffers = [Bridge(req_size=opt.xbar_buffer_size_req,
resp_size=opt.xbar_buffer_size_resp)
- for i in xrange(numx*(opt.mem_chunk-1))]
+ for i in range(numx*(opt.mem_chunk-1))]
# Buffer iterator
it = iter(range(len(system.hmc_dev.buffers)))
def mem_names():
"""Return a list of valid memory names."""
- return _mem_classes.keys()
+ return list(_mem_classes.keys())
# Add all memory controllers in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_mem_class):
# array of controllers and set their parameters to match their
# address mapping in the case of a DRAM
for r in system.mem_ranges:
- for i in xrange(nbr_mem_ctrls):
+ for i in range(nbr_mem_ctrls):
mem_ctrl = create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits,
intlv_size)
# Set the number of ranks based on the command-line
subsystem.mem_ctrls = mem_ctrls
# Connect the controllers to the membus
- for i in xrange(len(subsystem.mem_ctrls)):
+ for i in range(len(subsystem.mem_ctrls)):
if opt_mem_type == "HMC_2500_1x32":
subsystem.mem_ctrls[i].port = xbar[i/4].master
# Set memory device size. There is an independent controller for
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--os-type", action="store", type="choice",
- choices=os_types[buildEnv['TARGET_ISA']], default="linux",
- help="Specifies type of OS to boot")
+ choices=os_types[str(buildEnv['TARGET_ISA'])],
+ default="linux",
+ help="Specifies type of OS to boot")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
def platform_names():
"""Return a list of valid Platform names."""
- return _platform_classes.keys() + _platform_aliases.keys()
+ return list(_platform_classes.keys()) + list(_platform_aliases.keys())
# Add all Platforms in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_platform_class):
switch_cpus = None
if options.prog_interval:
- for i in xrange(np):
+ for i in range(np):
testsys.cpu[i].progress_interval = options.prog_interval
if options.maxinsts:
- for i in xrange(np):
+ for i in range(np):
testsys.cpu[i].max_insts_any_thread = options.maxinsts
if cpu_class:
switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
- for i in xrange(np)]
+ for i in range(np)]
- for i in xrange(np):
+ for i in range(np):
if options.fast_forward:
testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
switch_cpus[i].system = testsys
CpuConfig.config_etrace(cpu_class, switch_cpus, options)
testsys.switch_cpus = switch_cpus
- switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
+ switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in range(np)]
if options.repeat_switch:
switch_class = getCPUClass(options.cpu_type)[0]
sys.exit(1)
repeat_switch_cpus = [switch_class(switched_out=True, \
- cpu_id=(i)) for i in xrange(np)]
+ cpu_id=(i)) for i in range(np)]
- for i in xrange(np):
+ for i in range(np):
repeat_switch_cpus[i].system = testsys
repeat_switch_cpus[i].workload = testsys.cpu[i].workload
repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
if cpu_class:
repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
- for i in xrange(np)]
+ for i in range(np)]
else:
repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
- for i in xrange(np)]
+ for i in range(np)]
if options.standard_switch:
switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
- for i in xrange(np)]
+ for i in range(np)]
switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
- for i in xrange(np)]
+ for i in range(np)]
- for i in xrange(np):
+ for i in range(np):
switch_cpus[i].system = testsys
switch_cpus_1[i].system = testsys
switch_cpus[i].workload = testsys.cpu[i].workload
testsys.switch_cpus = switch_cpus
testsys.switch_cpus_1 = switch_cpus_1
- switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
- switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]
+ switch_cpu_list = [
+ (testsys.cpu[i], switch_cpus[i]) for i in range(np)
+ ]
+ switch_cpu_list1 = [
+ (switch_cpus[i], switch_cpus_1[i]) for i in range(np)
+ ]
# set the checkpoint in the cpu before m5.instantiate is called
if options.take_checkpoints != None and \
offset = int(options.take_checkpoints)
# Set an instruction break point
if options.simpoint:
- for i in xrange(np):
+ for i in range(np):
if testsys.cpu[i].workload[0].simpoint == 0:
fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
options.take_checkpoints = offset
# Set all test cpus with the right number of instructions
# for the upcoming simulation
- for i in xrange(np):
+ for i in range(np):
testsys.cpu[i].max_insts_any_thread = offset
if options.take_simpoint_checkpoints != None:
#
# Authors: Ali Saidi
+
+from six import string_types
import os, sys
config_path = os.path.dirname(os.path.abspath(__file__))
_sys_paths = None
def __init__(self, subdirs, sys_paths=None):
- if isinstance(subdirs, basestring):
+ if isinstance(subdirs, string_types):
subdirs = [subdirs]
self._subdir = os.path.join(*subdirs)
if sys_paths:
paths = filter(os.path.isdir, paths)
if not paths:
- raise IOError, "Can't find a path to system files."
+ raise IOError("Can't find a path to system files.")
- self._sys_paths = paths
+ self._sys_paths = list(paths)
filepath = os.path.join(self._subdir, filename)
paths = (os.path.join(p, filepath) for p in self._sys_paths)
try:
return next(p for p in paths if os.path.exists(p))
except StopIteration:
- raise IOError, "Can't find file '%s' on path." % filename
+ raise IOError("Can't find file '%s' on path." % filename)
disk = PathSearchFunc('disks')
binary = PathSearchFunc('binaries')
defns = []
# Then apply them to the produced new env
- for i in xrange(0, len(bindings)):
+ for i in range(0, len(bindings)):
name, binding_expr = bindings[i]
defns.append(binding_expr(new_env))
try:
func = getattr(self.__class__, input_set)
except AttributeError:
- raise AttributeError, \
- 'The benchmark %s does not have the %s input set' % \
- (self.name, input_set)
+ raise AttributeError(
+ 'The benchmark %s does not have the %s input set' % \
+ (self.name, input_set))
executable = joinpath(spec_dist, 'binaries', isa, os, self.binary)
if not isfile(executable):
- raise AttributeError, '%s not found' % executable
+ raise AttributeError('%s not found' % executable)
self.executable = executable
# root of tree for input & output data files
self.input_set = input_set
if not isdir(inputs_dir):
- raise AttributeError, '%s not found' % inputs_dir
+ raise AttributeError('%s not found' % inputs_dir)
self.inputs_dir = [ inputs_dir ]
if isdir(all_dir):
elif (isa == 'sparc' or isa == 'sparc32'):
self.endian = 'bendian'
else:
- raise AttributeError, "unknown ISA %s" % isa
+ raise AttributeError("unknown ISA %s" % isa)
super(vortex, self).__init__(isa, os, input_set)
sync_repeat = options.dist_sync_repeat,
is_switch = True,
num_nodes = options.dist_size)
- for i in xrange(options.dist_size)]
+ for i in range(options.dist_size)]
for (i, link) in enumerate(switch.portlink):
link.int0 = switch.interface[i]
protolib.encodeMessage(proto_out, header)
# create a list of every single address to touch
- addrs = range(0, max_addr, burst_size)
+ addrs = list(range(0, max_addr, burst_size))
import random
random.shuffle(addrs)
# We sweep itt max using the multipliers specified by the user.
itt_max_str = args.itt_list.strip().split()
-itt_max_multiples = map(lambda x : int(x), itt_max_str)
+itt_max_multiples = [ int(x) for x in itt_max_str ]
if len(itt_max_multiples) == 0:
fatal("String for itt-max-list detected empty\n")
-itt_max_values = map(lambda m : pd_entry_time * m, itt_max_multiples)
+itt_max_values = [ pd_entry_time * m for m in itt_max_multiples ]
# Generate request addresses in the entire range, assume we start at 0
max_addr = mem_range.end
# List of compute units; one GPU can have multiple compute units
compute_units = []
-for i in xrange(n_cu):
+for i in range(n_cu):
compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane,
num_SIMDs = options.simds_per_cu,
wfSize = options.wf_size,
options.outOfOrderDataDelivery))
wavefronts = []
vrfs = []
- for j in xrange(options.simds_per_cu):
- for k in xrange(shader.n_wf):
+ for j in range(options.simds_per_cu):
+ for k in range(shader.n_wf):
wavefronts.append(Wavefront(simdId = j, wf_slot_id = k,
wfSize = options.wf_size))
vrfs.append(VectorRegisterFile(simd_id=j,
future_cpu_list = []
# Initial CPUs to be used during fast-forwarding.
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
cpu = CpuClass(cpu_id = i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
MainCpuClass = CpuClass
# CPs to be used throughout the simulation.
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
cp = MainCpuClass(cpu_id = options.num_cpus + i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
cp_list.append(cp)
# Main CPUs (to be used after fast-forwarding if fast-forwarding is specified).
-for i in xrange(options.num_cpus):
+for i in range(options.num_cpus):
cpu = MainCpuClass(cpu_id = i,
clk_domain = SrcClockDomain(
clock = options.CPUClock,
cp.workload = host_cpu.workload
if fast_forward:
- for i in xrange(len(future_cpu_list)):
+ for i in range(len(future_cpu_list)):
future_cpu_list[i].workload = cpu_list[i].workload
future_cpu_list[i].createThreads()
# List of CPUs that must be switched when moving between KVM and simulation
if fast_forward:
switch_cpu_list = \
- [(cpu_list[i], future_cpu_list[i]) for i in xrange(options.num_cpus)]
+ [(cpu_list[i], future_cpu_list[i]) for i in range(options.num_cpus)]
# Full list of processing cores in the system. Note that
# dispatcher is also added to cpu_list although it is
have_kvm_support = 'BaseKvmCPU' in globals()
if have_kvm_support and buildEnv['TARGET_ISA'] == "x86":
system.vm = KvmVM()
- for i in xrange(len(host_cpu.workload)):
+ for i in range(len(host_cpu.workload)):
host_cpu.workload[i].useArchPT = True
host_cpu.workload[i].kvmInSE = True
else:
gpu_port_idx = gpu_port_idx - options.num_cp * 2
wavefront_size = options.wf_size
-for i in xrange(n_cu):
+for i in range(n_cu):
# The pipeline issues wavefront_size number of uncoalesced requests
# in one GPU issue cycle. Hence wavefront_size mem ports.
- for j in xrange(wavefront_size):
+ for j in range(wavefront_size):
system.cpu[shader_idx].CUs[i].memory_port[j] = \
system.ruby._cpu_ports[gpu_port_idx].slave[j]
gpu_port_idx += 1
-for i in xrange(n_cu):
+for i in range(n_cu):
if i > 0 and not i % options.cu_per_sqc:
print("incrementing idx on ", i)
gpu_port_idx += 1
gpu_port_idx = gpu_port_idx + 1
# attach CP ports to Ruby
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
system.cpu[cp_idx].createInterruptController()
system.cpu[cp_idx].dcache_port = \
system.ruby._cpu_ports[gpu_port_idx + i * 2].slave
# For now, assign all the CPUs to the same clock domain
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
- for i in xrange(np)]
+ for i in range(np)]
if CpuConfig.is_kvm_cpu(TestCPUClass) or CpuConfig.is_kvm_cpu(FutureClass):
test_sys.kvm_vm = KvmVM()
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
- for i in xrange(np):
+ for i in range(np):
if options.simpoint_profile:
test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
- for i in xrange(len(drive_sys.mem_ctrls)):
+ for i in range(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile(open(os.path.join(config_root, "common", "Options.py")).read(),
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
inj_vnet=options.inj_vnet,
precision=options.precision,
num_dest=options.num_dirs) \
- for i in xrange(options.num_cpus) ]
+ for i in range(options.num_cpus) ]
# create the desired simulated system
system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])
system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd)
# add traffic generators to the system
system.tgen = [TrafficGen(config_file=options.tgen_cfg_file) for i in
- xrange(options.num_tgen)]
+ range(options.num_tgen)]
# Config memory system with given HMC arch
MemConfig.config_mem(options, system)
# Connect the traffic generatiors
if options.arch == "distributed":
- for i in xrange(options.num_tgen):
+ for i in range(options.num_tgen):
system.tgen[i].port = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
if options.arch == "mixed":
- for i in xrange(int(options.num_tgen/2)):
+ for i in range(int(options.num_tgen/2)):
system.tgen[i].port = system.membus.slave
hh = system.hmc_host
if options.enable_global_monitor:
system.system_port = system.membus.slave
if options.arch == "same":
hh = system.hmc_host
- for i in xrange(options.num_links_controllers):
+ for i in range(options.num_links_controllers):
if options.enable_global_monitor:
system.tgen[i].port = hh.lmonitor[i].slave
else:
# The levels are indexing backwards through the list
ntesters = testerspec[len(cachespec) - level]
- testers = [proto_tester() for i in xrange(ntesters)]
+ testers = [proto_tester() for i in range(ntesters)]
checkers = [MemCheckerMonitor(memchecker = system.memchecker) \
- for i in xrange(ntesters)]
+ for i in range(ntesters)]
if ntesters:
subsys.tester = testers
subsys.checkers = checkers
# Create and connect the caches, both the ones fanning out
# to create the tree, and the ones used to connect testers
# on this level
- tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
- tester_caches = [proto_l1() for i in xrange(ntesters)]
+ tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+ tester_caches = [proto_l1() for i in range(ntesters)]
subsys.cache = tester_caches + tree_caches
for cache in tree_caches:
limit = (len(cachespec) - level + 1) * 100000000
testers = [proto_tester(interval = 10 * (level * level + 1),
progress_check = limit) \
- for i in xrange(ntesters)]
+ for i in range(ntesters)]
if ntesters:
subsys.tester = testers
# Create and connect the caches, both the ones fanning out
# to create the tree, and the ones used to connect testers
# on this level
- tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
- tester_caches = [proto_l1() for i in xrange(ntesters)]
+ tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+ tester_caches = [proto_l1() for i in range(ntesters)]
subsys.cache = tester_caches + tree_caches
for cache in tree_caches:
# Assume that unnamed ports are unconnected
peers = self.config.get_port_peers(object_name, port_name)
- for index, peer in zip(xrange(0, len(peers)), peers):
+ for index, peer in zip(range(0, len(peers)), peers):
parsed_ports.append((
PortConnection(object_name, port.name, index),
PortConnection.from_string(peer)))
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+ open(os.path.join(config_root, "common", "Options.py")).read(), \
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
assert(options.num_compute_units >= 1)
n_cu = options.num_compute_units
-options.num_sqc = int((n_cu + options.cu_per_sqc - 1) / options.cu_per_sqc)
+options.num_sqc = int((n_cu + options.cu_per_sqc - 1) // options.cu_per_sqc)
if args:
print("Error: script doesn't take any positional arguments")
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+ open(os.path.join(config_root, "common", "Options.py")).read(), \
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
percent_uncacheable = 0,
progress_interval = options.progress,
suppress_func_warnings = options.suppress_func_warnings) \
- for i in xrange(options.num_cpus) ]
+ for i in range(options.num_cpus) ]
system = System(cpu = cpus,
clk_domain = SrcClockDomain(clock = options.sys_clock),
progress_interval = options.progress,
suppress_func_warnings =
not options.suppress_func_warnings) \
- for i in xrange(options.num_dmas) ]
+ for i in range(options.num_dmas) ]
system.dma_devices = dmas
else:
dmas = []
#
Ruby.define_options(parser)
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+ open(os.path.join(config_root, "common", "Options.py")).read(), \
+ os.path.join(config_root, "common", "Options.py"), 'exec'))
(options, args) = parser.parse_args()
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
-system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
+system = System(cpu = [CPUClass(cpu_id=i) for i in range(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
-for i in xrange(np):
+for i in range(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
- for i in xrange(np):
+ for i in range(np):
ruby_port = system.ruby._cpu_ports[i]
# Create the interrupt controller and connect its ports to Ruby
cpu_sequencers = []
cpuCluster = None
cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s
- for i in xrange((options.num_cpus + 1) / 2):
+ for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
block_size_bits = int(math.log(options.cacheline_size, 2))
numa_bit = block_size_bits + dir_bits - 1
- for i in xrange(options.num_dirs):
+ for i in range(options.num_dirs):
dir_ranges = []
for r in system.mem_ranges:
addr_range = m5.objects.AddrRange(r.start, size = r.size(),
# For an odd number of CPUs, still create the right number of controllers
cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
- for i in xrange((options.num_cpus + 1) / 2):
+ for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
gpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
- for i in xrange(options.num_compute_units):
+ for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = 2560) # max outstanding requests
gpuCluster.add(tcp_cntrl)
- for i in xrange(options.num_sqc):
+ for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
- for i in xrange(options.num_cp):
+ for i in range(options.num_cp):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = 2560) # max outstanding requests
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
- for i in xrange(options.num_tccs):
+ for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = options.num_compute_units * 2560)
mainCluster = Cluster(intBW=crossbar_bw)
else:
mainCluster = Cluster(intBW=8) # 16 GB/s
- for i in xrange(options.num_dirs):
+ for i in range(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
- for i in xrange((options.num_cpus + 1) / 2):
+ for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
else:
gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
- for i in xrange(options.num_compute_units):
+ for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
gpuCluster.add(tcp_cntrl)
- for i in xrange(options.num_sqc):
+ for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
- for i in xrange(options.num_cp):
+ for i in range(options.num_cp):
tcp_ID = options.num_compute_units + i
sqc_ID = options.num_sqc + i
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
- for i in xrange(options.num_tccs):
+ for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
tcc_cntrl.create(options, ruby_system, system)
# Clusters
crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
mainCluster = Cluster(intBW = crossbar_bw)
- for i in xrange(options.num_dirs):
+ for i in range(options.num_dirs):
dir_cntrl = DirCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
mainCluster.add(dir_cntrl)
cpuCluster = Cluster(extBW = crossbar_bw, intBW=crossbar_bw)
- for i in xrange((options.num_cpus + 1) / 2):
+ for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
cpuCluster.add(cp_cntrl)
gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
- for i in xrange(options.num_compute_units):
+ for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
gpuCluster.add(tcp_cntrl)
- for i in xrange(options.num_sqc):
+ for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
# Because of wire buffers, num_tccs must equal num_tccdirs
numa_bit = 6
- for i in xrange(options.num_tccs):
+ for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl()
tcc_cntrl.create(options, ruby_system, system)
# For an odd number of CPUs, still create the right number of controllers
crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw)
- for i in xrange((options.num_cpus + 1) / 2):
+ for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
cpuCluster.add(rb_cntrl)
gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw)
- for i in xrange(options.num_compute_units):
+ for i in range(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
issue_latency = 1,
gpuCluster.add(tcp_cntrl)
- for i in xrange(options.num_sqc):
+ for i in range(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
numa_bit = 6
- for i in xrange(options.num_tccs):
+ for i in range(options.num_tccs):
tcc_cntrl = TCCCntrl()
tcc_cntrl.create(options, ruby_system, system)
# controller constructors are called before the network constructor
#
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
# Only one cache exists for this protocol, so by default use the L1D
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
- for i in xrange(options.num_clusters):
- for j in xrange(num_cpus_per_cluster):
+ for i in range(options.num_clusters):
+ for j in range(num_cpus_per_cluster):
#
# First create the Ruby objects associated with this cpu
#
l1_cntrl.responseFromL2.slave = ruby_system.network.master
- for j in xrange(num_l2caches_per_cluster):
+ for j in range(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l2_index_start = block_size_bits + l2_bits
- for i in xrange(options.num_l2caches):
+ for i in range(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
#
block_size_bits = int(math.log(options.cacheline_size, 2))
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
# Only one cache exists for this protocol, so by default use the L1D
block_size_bits = int(math.log(options.cacheline_size, 2))
numa_bit = block_size_bits + dir_bits - 1
- for i in xrange(options.num_dirs):
+ for i in range(options.num_dirs):
dir_ranges = []
for r in system.mem_ranges:
addr_range = m5.objects.AddrRange(r.start, size = r.size(),
# For an odd number of CPUs, still create the right number of controllers
cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
- for i in xrange((options.num_cpus + 1) / 2):
+ for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l2_index_start = block_size_bits + l2_bits
- for i in xrange(options.num_l2caches):
+ for i in range(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l2_index_start = block_size_bits + l2_bits
- for i in xrange(options.num_l2caches):
+ for i in range(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
#
block_size_bits = int(math.log(options.cacheline_size, 2))
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
def create_directories(options, bootmem, ruby_system, system):
dir_cntrl_nodes = []
- for i in xrange(options.num_dirs):
+ for i in range(options.num_dirs):
dir_cntrl = Directory_Controller()
dir_cntrl.version = i
dir_cntrl.directory = RubyDirectoryMemory()
all_l1s = []
all_l1buses = []
if options.timing:
- clusters = [ Cluster() for i in xrange(options.numclusters)]
- for j in xrange(options.numclusters):
+ clusters = [ Cluster() for i in range(options.numclusters)]
+ for j in range(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency)
- for i in xrange(cpusPerCluster)]
+ for i in range(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
elif options.detailed:
- clusters = [ Cluster() for i in xrange(options.numclusters)]
- for j in xrange(options.numclusters):
+ clusters = [ Cluster() for i in range(options.numclusters)]
+ for j in range(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id,
clock=options.frequency)
- for i in xrange(cpusPerCluster)]
+ for i in range(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
else:
- clusters = [ Cluster() for i in xrange(options.numclusters)]
- for j in xrange(options.numclusters):
+ clusters = [ Cluster() for i in range(options.numclusters)]
+ for j in range(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency)
- for i in xrange(cpusPerCluster)]
+ for i in range(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
if options.timing:
cpus = [TimingSimpleCPU(cpu_id = i,
clock=options.frequency)
- for i in xrange(options.numcpus)]
+ for i in range(options.numcpus)]
elif options.detailed:
cpus = [DerivO3CPU(cpu_id = i,
clock=options.frequency)
- for i in xrange(options.numcpus)]
+ for i in range(options.numcpus)]
else:
cpus = [AtomicSimpleCPU(cpu_id = i,
clock=options.frequency)
- for i in xrange(options.numcpus)]
+ for i in range(options.numcpus)]
# ----------------------
# Create a system, and add system wide objects
int_links = []
# East output to West input links (weight = 1)
- for row in xrange(num_rows):
- for col in xrange(num_columns):
+ for row in range(num_rows):
+ for col in range(num_columns):
if (col + 1 < num_columns):
east_out = col + (row * num_columns)
west_in = (col + 1) + (row * num_columns)
link_count += 1
# West output to East input links (weight = 1)
- for row in xrange(num_rows):
- for col in xrange(num_columns):
+ for row in range(num_rows):
+ for col in range(num_columns):
if (col + 1 < num_columns):
east_in = col + (row * num_columns)
west_out = (col + 1) + (row * num_columns)
link_count += 1
# North output to South input links (weight = 2)
- for col in xrange(num_columns):
- for row in xrange(num_rows):
+ for col in range(num_columns):
+ for row in range(num_rows):
if (row + 1 < num_rows):
north_out = col + (row * num_columns)
south_in = col + ((row + 1) * num_columns)
link_count += 1
# South output to North input links (weight = 2)
- for col in xrange(num_columns):
- for row in xrange(num_rows):
+ for col in range(num_columns):
+ for row in range(num_rows):
if (row + 1 < num_rows):
north_in = col + (row * num_columns)
south_out = col + ((row + 1) * num_columns)
# distributed across the network.
network_nodes = []
remainder_nodes = []
- for node_index in xrange(len(nodes)):
+ for node_index in range(len(nodes)):
if node_index < (len(nodes) - remainder):
network_nodes.append(nodes[node_index])
else:
int_links = []
# East output to West input links (weight = 1)
- for row in xrange(num_rows):
- for col in xrange(num_columns):
+ for row in range(num_rows):
+ for col in range(num_columns):
if (col + 1 < num_columns):
east_out = col + (row * num_columns)
west_in = (col + 1) + (row * num_columns)
link_count += 1
# West output to East input links (weight = 1)
- for row in xrange(num_rows):
- for col in xrange(num_columns):
+ for row in range(num_rows):
+ for col in range(num_columns):
if (col + 1 < num_columns):
east_in = col + (row * num_columns)
west_out = (col + 1) + (row * num_columns)
link_count += 1
# North output to South input links (weight = 2)
- for col in xrange(num_columns):
- for row in xrange(num_rows):
+ for col in range(num_columns):
+ for row in range(num_rows):
if (row + 1 < num_rows):
north_out = col + (row * num_columns)
south_in = col + ((row + 1) * num_columns)
link_count += 1
# South output to North input links (weight = 2)
- for col in xrange(num_columns):
- for row in xrange(num_rows):
+ for col in range(num_columns):
+ for row in range(num_rows):
if (row + 1 < num_rows):
north_in = col + (row * num_columns)
south_out = col + ((row + 1) * num_columns)
# distributed across the network.
network_nodes = []
remainder_nodes = []
- for node_index in xrange(len(nodes)):
+ for node_index in range(len(nodes)):
if node_index < (len(nodes) - remainder):
network_nodes.append(nodes[node_index])
else:
int_links = []
# East output to West input links (weight = 2)
- for row in xrange(num_rows):
- for col in xrange(num_columns):
+ for row in range(num_rows):
+ for col in range(num_columns):
if (col + 1 < num_columns):
east_out = col + (row * num_columns)
west_in = (col + 1) + (row * num_columns)
link_count += 1
# West output to East input links (weight = 1)
- for row in xrange(num_rows):
- for col in xrange(num_columns):
+ for row in range(num_rows):
+ for col in range(num_columns):
if (col + 1 < num_columns):
east_in = col + (row * num_columns)
west_out = (col + 1) + (row * num_columns)
# North output to South input links (weight = 2)
- for col in xrange(num_columns):
- for row in xrange(num_rows):
+ for col in range(num_columns):
+ for row in range(num_rows):
if (row + 1 < num_rows):
north_out = col + (row * num_columns)
south_in = col + ((row + 1) * num_columns)
link_count += 1
# South output to North input links (weight = 2)
- for col in xrange(num_columns):
- for row in xrange(num_rows):
+ for col in range(num_columns):
+ for row in range(num_rows):
if (row + 1 < num_rows):
north_in = col + (row * num_columns)
south_out = col + ((row + 1) * num_columns)
link_count = len(nodes)
int_links = []
- for i in xrange(len(nodes)):
- for j in xrange(len(nodes)):
+ for i in range(len(nodes)):
+ for j in range(len(nodes)):
if (i != j):
link_count += 1
int_links.append(IntLink(link_id=link_count,