configs: Fix Python 3 iterator and exec compatibility issues
authorAndreas Sandberg <andreas.sandberg@arm.com>
Sat, 26 Jan 2019 10:57:44 +0000 (10:57 +0000)
committerAndreas Sandberg <andreas.sandberg@arm.com>
Tue, 26 Feb 2019 10:28:00 +0000 (10:28 +0000)
Python 2.7 used to return lists for operations such as map and range,
this has changed in Python 3. To make the configs Python 3 compliant,
add explicit conversions from iterators to lists where needed, replace
xrange with range, and fix changes to exec syntax.

This change doesn't fix import paths since that might require us to
restructure the configs slightly.

Change-Id: Idcea8482b286779fc98b4e144ca8f54069c08024
Signed-off-by: Andreas Sandberg <andreas.sandberg@arm.com>
Reviewed-on: https://gem5-review.googlesource.com/c/16002
Reviewed-by: Gabe Black <gabeblack@google.com>
48 files changed:
configs/common/BPConfig.py
configs/common/Benchmarks.py
configs/common/CacheConfig.py
configs/common/CpuConfig.py
configs/common/FSConfig.py
configs/common/GPUTLBConfig.py
configs/common/HMC.py
configs/common/MemConfig.py
configs/common/Options.py
configs/common/PlatformConfig.py
configs/common/Simulation.py
configs/common/SysPaths.py
configs/common/cores/arm/HPI.py
configs/common/cpu2000.py
configs/dist/sw.py
configs/dram/lat_mem_rd.py
configs/dram/low_power_sweep.py
configs/example/apu_se.py
configs/example/fs.py
configs/example/garnet_synth_traffic.py
configs/example/hmctest.py
configs/example/memcheck.py
configs/example/memtest.py
configs/example/read_config.py
configs/example/ruby_gpu_random_test.py
configs/example/ruby_mem_test.py
configs/example/ruby_random_test.py
configs/example/se.py
configs/ruby/AMD_Base_Constructor.py
configs/ruby/GPU_RfO.py
configs/ruby/GPU_VIPER.py
configs/ruby/GPU_VIPER_Baseline.py
configs/ruby/GPU_VIPER_Region.py
configs/ruby/Garnet_standalone.py
configs/ruby/MESI_Three_Level.py
configs/ruby/MESI_Two_Level.py
configs/ruby/MI_example.py
configs/ruby/MOESI_AMD_Base.py
configs/ruby/MOESI_CMP_directory.py
configs/ruby/MOESI_CMP_token.py
configs/ruby/MOESI_hammer.py
configs/ruby/Ruby.py
configs/splash2/cluster.py
configs/splash2/run.py
configs/topologies/MeshDirCorners_XY.py
configs/topologies/Mesh_XY.py
configs/topologies/Mesh_westfirst.py
configs/topologies/Pt2Pt.py

index 5e5b92f8a7c2b1ec1a1d8abadcac18ec971498d9..c4e40e79168f0fe9253091cac31794f6b45b8e98 100644 (file)
@@ -79,7 +79,7 @@ def print_bp_list():
 
 def bp_names():
     """Return a list of valid Branch Predictor names."""
-    return _bp_classes.keys()
+    return list(_bp_classes.keys())
 
 # Add all BPs in the object hierarchy.
 for name, cls in inspect.getmembers(m5.objects, is_bp_class):
index b7d10b563411e3543f25912454b313641d647619..f7d1b4d1e41e2ea55b1000d20543b4b4595d6f0c 100644 (file)
@@ -141,6 +141,6 @@ Benchmarks = {
                             None, 'android-ics')]
 }
 
-benchs = Benchmarks.keys()
+benchs = list(Benchmarks.keys())
 benchs.sort()
 DefinedBenchmarks = ", ".join(benchs)
index 3fa3676b08dbf1f6a9f10a9395f4b6b833f97f3a..368356f069f43082b882c5b891e9b9b49e15ef45 100644 (file)
@@ -97,7 +97,7 @@ def config_cache(options, system):
     if options.memchecker:
         system.memchecker = MemChecker()
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         if options.caches:
             icache = icache_class(size=options.l1i_size,
                                   assoc=options.l1i_assoc)
index 1524b16466bbf28d55bf2ae2cfcb842b1dfa69f8..80e3766ef87531f41a993d3b568691923509246f 100644 (file)
@@ -99,7 +99,7 @@ def print_cpu_list():
 
 def cpu_names():
     """Return a list of valid CPU names."""
-    return _cpu_classes.keys()
+    return list(_cpu_classes.keys())
 
 def config_etrace(cpu_cls, cpu_list, options):
     if issubclass(cpu_cls, m5.objects.DerivO3CPU):
index fc21519ab738884f76b87674182f822f5e38a2d1..13c29ef1c108a5309946bf921c6a409137540764 100644 (file)
@@ -548,7 +548,7 @@ def makeX86System(mem_mode, numCPUs=1, mdesc=None, self=None, Ruby=False):
     # Set up the Intel MP table
     base_entries = []
     ext_entries = []
-    for i in xrange(numCPUs):
+    for i in range(numCPUs):
         bp = X86IntelMPProcessor(
                 local_apic_id = i,
                 local_apic_version = 0x14,
index 3e47f1d3b21e7b974b751e740624eee63650ffb0..80aad0b789c1f0885b54674afd505312c7dedc9e 100644 (file)
@@ -69,7 +69,7 @@ def Coalescer_constructor(level):
 def create_TLB_Coalescer(options, my_level, my_index, TLB_name, Coalescer_name):
     # arguments: options, TLB level, number of private structures for this Level,
     # TLB name and  Coalescer name
-    for i in xrange(my_index):
+    for i in range(my_index):
         TLB_name.append(eval(TLB_constructor(my_level)))
         Coalescer_name.append(eval(Coalescer_constructor(my_level)))
 
@@ -109,7 +109,7 @@ def config_tlb_hierarchy(options, system, shader_idx):
     # Create the hiearchy
     # Call the appropriate constructors and add objects to the system
 
-    for i in xrange(len(TLB_hierarchy)):
+    for i in range(len(TLB_hierarchy)):
         hierarchy_level = TLB_hierarchy[i]
         level = i+1
         for TLB_type in hierarchy_level:
@@ -143,7 +143,7 @@ def config_tlb_hierarchy(options, system, shader_idx):
     # Each TLB is connected with its Coalescer through a single port.
     # There is a one-to-one mapping of TLBs to Coalescers at a given level
     # This won't be modified no matter what the hierarchy looks like.
-    for i in xrange(len(TLB_hierarchy)):
+    for i in range(len(TLB_hierarchy)):
         hierarchy_level = TLB_hierarchy[i]
         level = i+1
         for TLB_type in hierarchy_level:
@@ -159,7 +159,7 @@ def config_tlb_hierarchy(options, system, shader_idx):
         name = TLB_type['name']
         num_TLBs = TLB_type['width']
         if name == 'l1':     # L1 D-TLBs
-            tlb_per_cu = num_TLBs / n_cu
+            tlb_per_cu = num_TLBs // n_cu
             for cu_idx in range(n_cu):
                 if tlb_per_cu:
                     for tlb in range(tlb_per_cu):
index 10d8a7185da2fb4e7ed7c7383ec767cec3559909..61e521d527b54669b28f1df3b393d6d7c2a305f5 100644 (file)
@@ -337,16 +337,16 @@ def config_hmc_host_ctrl(opt, system):
                      num_lanes=opt.num_lanes_per_link,
                      link_speed=opt.serial_link_speed,
                      delay=opt.total_ctrl_latency) for i in
-          xrange(opt.num_serial_links)]
+          range(opt.num_serial_links)]
     system.hmc_host.seriallink = sl
 
     # enable global monitor
     if opt.enable_global_monitor:
         system.hmc_host.lmonitor = [CommMonitor() for i in
-                                    xrange(opt.num_serial_links)]
+                                    range(opt.num_serial_links)]
 
     # set the clock frequency for serial link
-    for i in xrange(opt.num_serial_links):
+    for i in range(opt.num_serial_links):
         clk = opt.link_controller_frequency
         vd = VoltageDomain(voltage='1V')
         scd = SrcClockDomain(clock=clk, voltage_domain=vd)
@@ -357,7 +357,7 @@ def config_hmc_host_ctrl(opt, system):
     hh = system.hmc_host
     if opt.arch == "distributed":
         mb = system.membus
-        for i in xrange(opt.num_links_controllers):
+        for i in range(opt.num_links_controllers):
             if opt.enable_global_monitor:
                 mb.master = hh.lmonitor[i].slave
                 hh.lmonitor[i].master = hh.seriallink[i].slave
@@ -375,7 +375,7 @@ def config_hmc_host_ctrl(opt, system):
             mb.master = hh.seriallink[1].slave
 
     if opt.arch == "same":
-        for i in xrange(opt.num_links_controllers):
+        for i in range(opt.num_links_controllers):
             if opt.enable_global_monitor:
                 hh.lmonitor[i].master = hh.seriallink[i].slave
 
@@ -395,7 +395,7 @@ def config_hmc_dev(opt, system, hmc_host):
     system.mem_ranges = addr_ranges_vaults
 
     if opt.enable_link_monitor:
-        lm = [CommMonitor() for i in xrange(opt.num_links_controllers)]
+        lm = [CommMonitor() for i in range(opt.num_links_controllers)]
         system.hmc_dev.lmonitor = lm
 
     # 4 HMC Crossbars located in its logic-base (LoB)
@@ -403,17 +403,17 @@ def config_hmc_dev(opt, system, hmc_host):
                           frontend_latency=opt.xbar_frontend_latency,
                           forward_latency=opt.xbar_forward_latency,
                           response_latency=opt.xbar_response_latency) for i in
-          xrange(opt.number_mem_crossbar)]
+          range(opt.number_mem_crossbar)]
     system.hmc_dev.xbar = xb
 
-    for i in xrange(opt.number_mem_crossbar):
+    for i in range(opt.number_mem_crossbar):
         clk = opt.xbar_frequency
         vd = VoltageDomain(voltage='1V')
         scd = SrcClockDomain(clock=clk, voltage_domain=vd)
         system.hmc_dev.xbar[i].clk_domain = scd
 
     # Attach 4 serial link to 4 crossbar/s
-    for i in xrange(opt.num_serial_links):
+    for i in range(opt.num_serial_links):
         if opt.enable_link_monitor:
             system.hmc_host.seriallink[i].master = \
                 system.hmc_dev.lmonitor[i].slave
@@ -429,7 +429,7 @@ def config_hmc_dev(opt, system, hmc_host):
         # create a list of buffers
         system.hmc_dev.buffers = [Bridge(req_size=opt.xbar_buffer_size_req,
                                          resp_size=opt.xbar_buffer_size_resp)
-                                  for i in xrange(numx*(opt.mem_chunk-1))]
+                                  for i in range(numx*(opt.mem_chunk-1))]
 
         # Buffer iterator
         it = iter(range(len(system.hmc_dev.buffers)))
index 36035800f75ea36a206335c050782c8873fa0ce8..b6e6663f94d09a8a75806f3357a05380506da08a 100644 (file)
@@ -86,7 +86,7 @@ def print_mem_list():
 
 def mem_names():
     """Return a list of valid memory names."""
-    return _mem_classes.keys()
+    return list(_mem_classes.keys())
 
 # Add all memory controllers in the object hierarchy.
 for name, cls in inspect.getmembers(m5.objects, is_mem_class):
@@ -215,7 +215,7 @@ def config_mem(options, system):
     # array of controllers and set their parameters to match their
     # address mapping in the case of a DRAM
     for r in system.mem_ranges:
-        for i in xrange(nbr_mem_ctrls):
+        for i in range(nbr_mem_ctrls):
             mem_ctrl = create_mem_ctrl(cls, r, i, nbr_mem_ctrls, intlv_bits,
                                        intlv_size)
             # Set the number of ranks based on the command-line
@@ -233,7 +233,7 @@ def config_mem(options, system):
     subsystem.mem_ctrls = mem_ctrls
 
     # Connect the controllers to the membus
-    for i in xrange(len(subsystem.mem_ctrls)):
+    for i in range(len(subsystem.mem_ctrls)):
         if opt_mem_type == "HMC_2500_1x32":
             subsystem.mem_ctrls[i].port = xbar[i/4].master
             # Set memory device size. There is an independent controller for
index 7963013df89f266b691094ca43e403fb263d7206..7b231c7df91cb9a675b01768162d7eb74a97685c 100644 (file)
@@ -339,8 +339,9 @@ def addFSOptions(parser):
     # System options
     parser.add_option("--kernel", action="store", type="string")
     parser.add_option("--os-type", action="store", type="choice",
-            choices=os_types[buildEnv['TARGET_ISA']], default="linux",
-            help="Specifies type of OS to boot")
+                      choices=os_types[str(buildEnv['TARGET_ISA'])],
+                      default="linux",
+                      help="Specifies type of OS to boot")
     parser.add_option("--script", action="store", type="string")
     parser.add_option("--frame-capture", action="store_true",
             help="Stores changed frame buffers from the VNC server to compressed "\
index 306b7320d4b11f2cb58ba3d10917afe2c64329f9..ae55d1a164fceba29b8a902ba4ebe5c8db89d4e4 100644 (file)
@@ -103,7 +103,7 @@ def print_platform_list():
 
 def platform_names():
     """Return a list of valid Platform names."""
-    return _platform_classes.keys() + _platform_aliases.keys()
+    return list(_platform_classes.keys()) + list(_platform_aliases.keys())
 
 # Add all Platforms in the object hierarchy.
 for name, cls in inspect.getmembers(m5.objects, is_platform_class):
index 19bd962e8f5fb5f0155e0913c13b835db6177050..5b1ab01778301ea348c060e6985aa6a62961174d 100644 (file)
@@ -453,18 +453,18 @@ def run(options, root, testsys, cpu_class):
     switch_cpus = None
 
     if options.prog_interval:
-        for i in xrange(np):
+        for i in range(np):
             testsys.cpu[i].progress_interval = options.prog_interval
 
     if options.maxinsts:
-        for i in xrange(np):
+        for i in range(np):
             testsys.cpu[i].max_insts_any_thread = options.maxinsts
 
     if cpu_class:
         switch_cpus = [cpu_class(switched_out=True, cpu_id=(i))
-                       for i in xrange(np)]
+                       for i in range(np)]
 
-        for i in xrange(np):
+        for i in range(np):
             if options.fast_forward:
                 testsys.cpu[i].max_insts_any_thread = int(options.fast_forward)
             switch_cpus[i].system = testsys
@@ -489,7 +489,7 @@ def run(options, root, testsys, cpu_class):
             CpuConfig.config_etrace(cpu_class, switch_cpus, options)
 
         testsys.switch_cpus = switch_cpus
-        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
+        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in range(np)]
 
     if options.repeat_switch:
         switch_class = getCPUClass(options.cpu_type)[0]
@@ -502,9 +502,9 @@ def run(options, root, testsys, cpu_class):
             sys.exit(1)
 
         repeat_switch_cpus = [switch_class(switched_out=True, \
-                                               cpu_id=(i)) for i in xrange(np)]
+                                               cpu_id=(i)) for i in range(np)]
 
-        for i in xrange(np):
+        for i in range(np):
             repeat_switch_cpus[i].system = testsys
             repeat_switch_cpus[i].workload = testsys.cpu[i].workload
             repeat_switch_cpus[i].clk_domain = testsys.cpu[i].clk_domain
@@ -520,18 +520,18 @@ def run(options, root, testsys, cpu_class):
 
         if cpu_class:
             repeat_switch_cpu_list = [(switch_cpus[i], repeat_switch_cpus[i])
-                                      for i in xrange(np)]
+                                      for i in range(np)]
         else:
             repeat_switch_cpu_list = [(testsys.cpu[i], repeat_switch_cpus[i])
-                                      for i in xrange(np)]
+                                      for i in range(np)]
 
     if options.standard_switch:
         switch_cpus = [TimingSimpleCPU(switched_out=True, cpu_id=(i))
-                       for i in xrange(np)]
+                       for i in range(np)]
         switch_cpus_1 = [DerivO3CPU(switched_out=True, cpu_id=(i))
-                        for i in xrange(np)]
+                        for i in range(np)]
 
-        for i in xrange(np):
+        for i in range(np):
             switch_cpus[i].system =  testsys
             switch_cpus_1[i].system =  testsys
             switch_cpus[i].workload = testsys.cpu[i].workload
@@ -572,8 +572,12 @@ def run(options, root, testsys, cpu_class):
 
         testsys.switch_cpus = switch_cpus
         testsys.switch_cpus_1 = switch_cpus_1
-        switch_cpu_list = [(testsys.cpu[i], switch_cpus[i]) for i in xrange(np)]
-        switch_cpu_list1 = [(switch_cpus[i], switch_cpus_1[i]) for i in xrange(np)]
+        switch_cpu_list = [
+            (testsys.cpu[i], switch_cpus[i]) for i in range(np)
+        ]
+        switch_cpu_list1 = [
+            (switch_cpus[i], switch_cpus_1[i]) for i in range(np)
+        ]
 
     # set the checkpoint in the cpu before m5.instantiate is called
     if options.take_checkpoints != None and \
@@ -581,7 +585,7 @@ def run(options, root, testsys, cpu_class):
         offset = int(options.take_checkpoints)
         # Set an instruction break point
         if options.simpoint:
-            for i in xrange(np):
+            for i in range(np):
                 if testsys.cpu[i].workload[0].simpoint == 0:
                     fatal('no simpoint for testsys.cpu[%d].workload[0]', i)
                 checkpoint_inst = int(testsys.cpu[i].workload[0].simpoint) + offset
@@ -592,7 +596,7 @@ def run(options, root, testsys, cpu_class):
             options.take_checkpoints = offset
             # Set all test cpus with the right number of instructions
             # for the upcoming simulation
-            for i in xrange(np):
+            for i in range(np):
                 testsys.cpu[i].max_insts_any_thread = offset
 
     if options.take_simpoint_checkpoints != None:
index 9a234ccece30315af8458fe780d35e9b70ce4ad0..17d5fb8640009a707d24f9bdda29e3229a8efb5a 100644 (file)
@@ -26,6 +26,8 @@
 #
 # Authors: Ali Saidi
 
+
+from six import string_types
 import os, sys
 
 config_path = os.path.dirname(os.path.abspath(__file__))
@@ -35,7 +37,7 @@ class PathSearchFunc(object):
     _sys_paths = None
 
     def __init__(self, subdirs, sys_paths=None):
-        if isinstance(subdirs, basestring):
+        if isinstance(subdirs, string_types):
             subdirs = [subdirs]
         self._subdir = os.path.join(*subdirs)
         if sys_paths:
@@ -55,16 +57,16 @@ class PathSearchFunc(object):
             paths = filter(os.path.isdir, paths)
 
             if not paths:
-                raise IOError, "Can't find a path to system files."
+                raise IOError("Can't find a path to system files.")
 
-            self._sys_paths = paths
+            self._sys_paths = list(paths)
 
         filepath = os.path.join(self._subdir, filename)
         paths = (os.path.join(p, filepath) for p in self._sys_paths)
         try:
             return next(p for p in paths if os.path.exists(p))
         except StopIteration:
-            raise IOError, "Can't find file '%s' on path." % filename
+            raise IOError("Can't find file '%s' on path." % filename)
 
 disk = PathSearchFunc('disks')
 binary = PathSearchFunc('binaries')
index 2efb7dfecd0ebb92f232019d608584dffff2f81f..d105790e3594636d7b1d633521698c0e37ea57c3 100644 (file)
@@ -177,7 +177,7 @@ def let(bindings, expr):
 
         defns = []
         # Then apply them to the produced new env
-        for i in xrange(0, len(bindings)):
+        for i in range(0, len(bindings)):
             name, binding_expr = bindings[i]
             defns.append(binding_expr(new_env))
 
index da87507d967e40c6c982282bb907cf725adb29f4..8143e45dec9e8dd00c5d5cf6a66a82f73c9ec7fa 100644 (file)
@@ -93,13 +93,13 @@ class Benchmark(object):
         try:
             func = getattr(self.__class__, input_set)
         except AttributeError:
-            raise AttributeError, \
-                  'The benchmark %s does not have the %s input set' % \
-                  (self.name, input_set)
+            raise AttributeError(
+                'The benchmark %s does not have the %s input set' % \
+                (self.name, input_set))
 
         executable = joinpath(spec_dist, 'binaries', isa, os, self.binary)
         if not isfile(executable):
-            raise AttributeError, '%s not found' % executable
+            raise AttributeError('%s not found' % executable)
         self.executable = executable
 
         # root of tree for input & output data files
@@ -113,7 +113,7 @@ class Benchmark(object):
         self.input_set = input_set
 
         if not isdir(inputs_dir):
-            raise AttributeError, '%s not found' % inputs_dir
+            raise AttributeError('%s not found' % inputs_dir)
 
         self.inputs_dir = [ inputs_dir ]
         if isdir(all_dir):
@@ -670,7 +670,7 @@ class vortex(Benchmark):
         elif (isa == 'sparc' or isa == 'sparc32'):
             self.endian = 'bendian'
         else:
-            raise AttributeError, "unknown ISA %s" % isa
+            raise AttributeError("unknown ISA %s" % isa)
 
         super(vortex, self).__init__(isa, os, input_set)
 
index e7f31c0f3c36def5b34345075c6626d196831047..8dca62fb4474c67322af8f7bbdc92f3d96cbfb3f 100644 (file)
@@ -57,7 +57,7 @@ def build_switch(options):
                                       sync_repeat = options.dist_sync_repeat,
                                       is_switch = True,
                                       num_nodes = options.dist_size)
-                       for i in xrange(options.dist_size)]
+                       for i in range(options.dist_size)]
 
     for (i, link) in enumerate(switch.portlink):
         link.int0 = switch.interface[i]
index dc80bd287a35c9a00bdb19cd2fbd3bad78d24049..a1aa77df41de4e709aa8871a3103c02679d40f44 100644 (file)
@@ -188,7 +188,7 @@ def create_trace(filename, max_addr, burst_size, itt):
     protolib.encodeMessage(proto_out, header)
 
     # create a list of every single address to touch
-    addrs = range(0, max_addr, burst_size)
+    addrs = list(range(0, max_addr, burst_size))
 
     import random
     random.shuffle(addrs)
index 2aa64906f3deb2c271c3a27b76373aafb0f9b47a..e9714a6dcdecb1b9b6eb4f9d1332d6c955c1a668 100644 (file)
@@ -166,11 +166,11 @@ pd_entry_time = (system.mem_ctrls[0].tRAS.value +
 
 # We sweep itt max using the multipliers specified by the user.
 itt_max_str = args.itt_list.strip().split()
-itt_max_multiples = map(lambda x : int(x), itt_max_str)
+itt_max_multiples = [ int(x) for x in itt_max_str ]
 if len(itt_max_multiples) == 0:
     fatal("String for itt-max-list detected empty\n")
 
-itt_max_values = map(lambda m : pd_entry_time * m, itt_max_multiples)
+itt_max_values = [ pd_entry_time * m for m in itt_max_multiples ]
 
 # Generate request addresses in the entire range, assume we start at 0
 max_addr = mem_range.end
index bba0d0fad0ec0aecc5928a62e30ffab1af3c0e5d..146863d62da47c66bf44d990efceb82cd5514cdd 100644 (file)
@@ -225,7 +225,7 @@ if options.TLB_config == "perLane":
 
 # List of compute units; one GPU can have multiple compute units
 compute_units = []
-for i in xrange(n_cu):
+for i in range(n_cu):
     compute_units.append(ComputeUnit(cu_id = i, perLaneTLB = per_lane,
                                      num_SIMDs = options.simds_per_cu,
                                      wfSize = options.wf_size,
@@ -255,8 +255,8 @@ for i in xrange(n_cu):
                                              options.outOfOrderDataDelivery))
     wavefronts = []
     vrfs = []
-    for j in xrange(options.simds_per_cu):
-        for k in xrange(shader.n_wf):
+    for j in range(options.simds_per_cu):
+        for k in range(shader.n_wf):
             wavefronts.append(Wavefront(simdId = j, wf_slot_id = k,
                                         wfSize = options.wf_size))
         vrfs.append(VectorRegisterFile(simd_id=j,
@@ -311,7 +311,7 @@ if fast_forward:
     future_cpu_list = []
 
     # Initial CPUs to be used during fast-forwarding.
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         cpu = CpuClass(cpu_id = i,
                        clk_domain = SrcClockDomain(
                            clock = options.CPUClock,
@@ -328,7 +328,7 @@ else:
     MainCpuClass = CpuClass
 
 # CPs to be used throughout the simulation.
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
     cp = MainCpuClass(cpu_id = options.num_cpus + i,
                       clk_domain = SrcClockDomain(
                           clock = options.CPUClock,
@@ -337,7 +337,7 @@ for i in xrange(options.num_cp):
     cp_list.append(cp)
 
 # Main CPUs (to be used after fast-forwarding if fast-forwarding is specified).
-for i in xrange(options.num_cpus):
+for i in range(options.num_cpus):
     cpu = MainCpuClass(cpu_id = i,
                        clk_domain = SrcClockDomain(
                            clock = options.CPUClock,
@@ -400,7 +400,7 @@ for cp in cp_list:
     cp.workload = host_cpu.workload
 
 if fast_forward:
-    for i in xrange(len(future_cpu_list)):
+    for i in range(len(future_cpu_list)):
         future_cpu_list[i].workload = cpu_list[i].workload
         future_cpu_list[i].createThreads()
 
@@ -408,7 +408,7 @@ if fast_forward:
 # List of CPUs that must be switched when moving between KVM and simulation
 if fast_forward:
     switch_cpu_list = \
-        [(cpu_list[i], future_cpu_list[i]) for i in xrange(options.num_cpus)]
+        [(cpu_list[i], future_cpu_list[i]) for i in range(options.num_cpus)]
 
 # Full list of processing cores in the system. Note that
 # dispatcher is also added to cpu_list although it is
@@ -431,7 +431,7 @@ if fast_forward:
     have_kvm_support = 'BaseKvmCPU' in globals()
     if have_kvm_support and buildEnv['TARGET_ISA'] == "x86":
         system.vm = KvmVM()
-        for i in xrange(len(host_cpu.workload)):
+        for i in range(len(host_cpu.workload)):
             host_cpu.workload[i].useArchPT = True
             host_cpu.workload[i].kvmInSE = True
     else:
@@ -479,15 +479,15 @@ gpu_port_idx = len(system.ruby._cpu_ports) \
 gpu_port_idx = gpu_port_idx - options.num_cp * 2
 
 wavefront_size = options.wf_size
-for i in xrange(n_cu):
+for i in range(n_cu):
     # The pipeline issues wavefront_size number of uncoalesced requests
     # in one GPU issue cycle. Hence wavefront_size mem ports.
-    for j in xrange(wavefront_size):
+    for j in range(wavefront_size):
         system.cpu[shader_idx].CUs[i].memory_port[j] = \
                   system.ruby._cpu_ports[gpu_port_idx].slave[j]
     gpu_port_idx += 1
 
-for i in xrange(n_cu):
+for i in range(n_cu):
     if i > 0 and not i % options.cu_per_sqc:
         print("incrementing idx on ", i)
         gpu_port_idx += 1
@@ -496,7 +496,7 @@ for i in xrange(n_cu):
 gpu_port_idx = gpu_port_idx + 1
 
 # attach CP ports to Ruby
-for i in xrange(options.num_cp):
+for i in range(options.num_cp):
     system.cpu[cp_idx].createInterruptController()
     system.cpu[cp_idx].dcache_port = \
                 system.ruby._cpu_ports[gpu_port_idx + i * 2].slave
index 6be9ba2c26fb027ba588c77026a4854d1fad816d..70275a0f666023b02daa2e9a7494573542da9997 100644 (file)
@@ -138,7 +138,7 @@ def build_test_system(np):
 
     # For now, assign all the CPUs to the same clock domain
     test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
-                    for i in xrange(np)]
+                    for i in range(np)]
 
     if CpuConfig.is_kvm_cpu(TestCPUClass) or CpuConfig.is_kvm_cpu(FutureClass):
         test_sys.kvm_vm = KvmVM()
@@ -194,7 +194,7 @@ def build_test_system(np):
             if np > 1:
                 fatal("SimPoint generation not supported with more than one CPUs")
 
-        for i in xrange(np):
+        for i in range(np):
             if options.simpoint_profile:
                 test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
             if options.checker:
@@ -277,7 +277,7 @@ def build_drive_system(np):
     # memory bus
     drive_sys.mem_ctrls = [DriveMemClass(range = r)
                            for r in drive_sys.mem_ranges]
-    for i in xrange(len(drive_sys.mem_ctrls)):
+    for i in range(len(drive_sys.mem_ctrls)):
         drive_sys.mem_ctrls[i].port = drive_sys.membus.master
 
     drive_sys.init_param = options.init_param
index 92fb3a04756ba24833f1ec40d839dc27bb264857..f5b7690de7371e307773016039c9fd5e88be0fe8 100644 (file)
@@ -87,7 +87,8 @@ parser.add_option("--inj-vnet", type="int", default=-1,
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile(open(os.path.join(config_root, "common", "Options.py")).read(),
+             os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
@@ -112,7 +113,7 @@ cpus = [ GarnetSyntheticTraffic(
                      inj_vnet=options.inj_vnet,
                      precision=options.precision,
                      num_dest=options.num_dirs) \
-         for i in xrange(options.num_cpus) ]
+         for i in range(options.num_cpus) ]
 
 # create the desired simulated system
 system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])
index c370d0a84942de228646255e8a1b5e355901524d..091ed8b032e0cd44e6dec6272ed7f59361ea7ac1 100644 (file)
@@ -57,17 +57,17 @@ def build_system(options):
     system.clk_domain = SrcClockDomain(clock=clk, voltage_domain=vd)
     # add traffic generators to the system
     system.tgen = [TrafficGen(config_file=options.tgen_cfg_file) for i in
-                   xrange(options.num_tgen)]
+                   range(options.num_tgen)]
     # Config memory system with given HMC arch
     MemConfig.config_mem(options, system)
     # Connect the traffic generatiors
     if options.arch == "distributed":
-        for i in xrange(options.num_tgen):
+        for i in range(options.num_tgen):
             system.tgen[i].port = system.membus.slave
         # connect the system port even if it is not used in this example
         system.system_port = system.membus.slave
     if options.arch == "mixed":
-        for i in xrange(int(options.num_tgen/2)):
+        for i in range(int(options.num_tgen/2)):
             system.tgen[i].port = system.membus.slave
         hh = system.hmc_host
         if options.enable_global_monitor:
@@ -82,7 +82,7 @@ def build_system(options):
         system.system_port = system.membus.slave
     if options.arch == "same":
         hh = system.hmc_host
-        for i in xrange(options.num_links_controllers):
+        for i in range(options.num_links_controllers):
             if options.enable_global_monitor:
                 system.tgen[i].port = hh.lmonitor[i].slave
             else:
index c2eed1959d8fbd7d49d6a022a6102b31e8616509..1dae86fc3f0e9bccda7b6635f95ae8a00aff271b 100644 (file)
@@ -246,9 +246,9 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
      # The levels are indexing backwards through the list
      ntesters = testerspec[len(cachespec) - level]
 
-     testers = [proto_tester() for i in xrange(ntesters)]
+     testers = [proto_tester() for i in range(ntesters)]
      checkers = [MemCheckerMonitor(memchecker = system.memchecker) \
-                      for i in xrange(ntesters)]
+                      for i in range(ntesters)]
      if ntesters:
           subsys.tester = testers
           subsys.checkers = checkers
@@ -264,8 +264,8 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
           # Create and connect the caches, both the ones fanning out
           # to create the tree, and the ones used to connect testers
           # on this level
-          tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
-          tester_caches = [proto_l1() for i in xrange(ntesters)]
+          tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+          tester_caches = [proto_l1() for i in range(ntesters)]
 
           subsys.cache = tester_caches + tree_caches
           for cache in tree_caches:
index d293164ce8f53fff9d12200bf31869f4713d44ee..81c826a41bb2d119dfcae69c58f8202ec5fc20fd 100644 (file)
@@ -257,7 +257,7 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
      limit = (len(cachespec) - level + 1) * 100000000
      testers = [proto_tester(interval = 10 * (level * level + 1),
                              progress_check = limit) \
-                     for i in xrange(ntesters)]
+                     for i in range(ntesters)]
      if ntesters:
           subsys.tester = testers
 
@@ -272,8 +272,8 @@ def make_cache_level(ncaches, prototypes, level, next_cache):
           # Create and connect the caches, both the ones fanning out
           # to create the tree, and the ones used to connect testers
           # on this level
-          tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
-          tester_caches = [proto_l1() for i in xrange(ntesters)]
+          tree_caches = [prototypes[0]() for i in range(ncaches[0])]
+          tester_caches = [proto_l1() for i in range(ntesters)]
 
           subsys.cache = tester_caches + tree_caches
           for cache in tree_caches:
index 3c17d4b9c8350841f75dbe07a821fb7e7063b958..0d60ec4cbcdd800eb569c3ee0e7ae3b19a109bca 100644 (file)
@@ -280,7 +280,7 @@ class ConfigManager(object):
             # Assume that unnamed ports are unconnected
             peers = self.config.get_port_peers(object_name, port_name)
 
-            for index, peer in zip(xrange(0, len(peers)), peers):
+            for index, peer in zip(range(0, len(peers)), peers):
                 parsed_ports.append((
                     PortConnection(object_name, port.name, index),
                     PortConnection.from_string(peer)))
index 162d3ff4fb4bd661c04f87cc77a540f81a7f338b..1757177013a0c9ad0773acdc294681f1c8dea5da 100644 (file)
@@ -76,7 +76,9 @@ parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+    open(os.path.join(config_root, "common", "Options.py")).read(), \
+    os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
@@ -97,7 +99,7 @@ options.l3_assoc=2
 assert(options.num_compute_units >= 1)
 n_cu = options.num_compute_units
 
-options.num_sqc = int((n_cu + options.cu_per_sqc - 1) / options.cu_per_sqc)
+options.num_sqc = int((n_cu + options.cu_per_sqc - 1) // options.cu_per_sqc)
 
 if args:
      print("Error: script doesn't take any positional arguments")
index 68ad1ca66b133b6b6eb2f2085359699aa70ed1de..880a150cd5bb47c8cff8554ff5daf005289e824b 100644 (file)
@@ -65,7 +65,9 @@ parser.add_option("--suppress-func-warnings", action="store_true",
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+    open(os.path.join(config_root, "common", "Options.py")).read(), \
+    os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
@@ -101,7 +103,7 @@ cpus = [ MemTest(max_loads = options.maxloads,
                  percent_uncacheable = 0,
                  progress_interval = options.progress,
                  suppress_func_warnings = options.suppress_func_warnings) \
-         for i in xrange(options.num_cpus) ]
+         for i in range(options.num_cpus) ]
 
 system = System(cpu = cpus,
                 clk_domain = SrcClockDomain(clock = options.sys_clock),
@@ -114,7 +116,7 @@ if options.num_dmas > 0:
                      progress_interval = options.progress,
                      suppress_func_warnings =
                                         not options.suppress_func_warnings) \
-             for i in xrange(options.num_dmas) ]
+             for i in range(options.num_dmas) ]
     system.dma_devices = dmas
 else:
     dmas = []
index d6b53cf3e85065ed99fa5afacd71dd36738a8afe..15d474cec9409dfd20ee9471dd24e4a4bd438323 100644 (file)
@@ -59,7 +59,9 @@ parser.add_option("-f", "--wakeup_freq", metavar="N", default=10,
 #
 Ruby.define_options(parser)
 
-execfile(os.path.join(config_root, "common", "Options.py"))
+exec(compile( \
+    open(os.path.join(config_root, "common", "Options.py")).read(), \
+    os.path.join(config_root, "common", "Options.py"), 'exec'))
 
 (options, args) = parser.parse_args()
 
index fa9e897453457d2436b6c79b724191a82f78fbb4..59af888e0de9ee04a440b7530c443b680983ad5f 100644 (file)
@@ -171,7 +171,7 @@ if options.smt and options.num_cpus > 1:
     fatal("You cannot use SMT with multiple CPUs!")
 
 np = options.num_cpus
-system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
+system = System(cpu = [CPUClass(cpu_id=i) for i in range(np)],
                 mem_mode = test_mem_mode,
                 mem_ranges = [AddrRange(options.mem_size)],
                 cache_line_size = options.cacheline_size)
@@ -220,7 +220,7 @@ if options.simpoint_profile:
     if np > 1:
         fatal("SimPoint generation not supported with more than one CPUs")
 
-for i in xrange(np):
+for i in range(np):
     if options.smt:
         system.cpu[i].workload = multiprocesses
     elif len(multiprocesses) == 1:
@@ -246,7 +246,7 @@ if options.ruby:
 
     system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
                                         voltage_domain = system.voltage_domain)
-    for i in xrange(np):
+    for i in range(np):
         ruby_port = system.ruby._cpu_ports[i]
 
         # Create the interrupt controller and connect its ports to Ruby
index 96f65758950ee9b70ca650cf22fa5c3dcb3d52ed..a8a0e0dded70ef4aea65b7436d7d4c73c5c2cb3a 100644 (file)
@@ -115,7 +115,7 @@ def construct(options, system, ruby_system):
     cpu_sequencers = []
     cpuCluster = None
     cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
index fea5e5af473a57e2c29300b70a583ca151ba688c..afe9614c0dff0f3790a30669afcdaef33a4fbb69 100644 (file)
@@ -470,7 +470,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         block_size_bits = int(math.log(options.cacheline_size, 2))
         numa_bit = block_size_bits + dir_bits - 1
 
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
         dir_ranges = []
         for r in system.mem_ranges:
             addr_range = m5.objects.AddrRange(r.start, size = r.size(),
@@ -511,7 +511,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
     # For an odd number of CPUs, still create the right number of controllers
     cpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -545,7 +545,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
     gpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
 
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              number_of_TBEs = 2560) # max outstanding requests
@@ -578,7 +578,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -610,7 +610,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_cp):
+    for i in range(options.num_cp):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              number_of_TBEs = 2560) # max outstanding requests
@@ -673,7 +673,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits,
                              number_of_TBEs = options.num_compute_units * 2560)
index 8d1223025ddbc294ed44698af744cadde2f665c6..94dcbefe33cac717cdecd121bb75f34bbdcf6036 100644 (file)
@@ -429,7 +429,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         mainCluster = Cluster(intBW=crossbar_bw)
     else:
         mainCluster = Cluster(intBW=8) # 16 GB/s
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
 
         dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
         dir_cntrl.create(options, ruby_system, system)
@@ -467,7 +467,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
     else:
         cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -504,7 +504,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
       gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
     else:
       gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              issue_latency = 1,
@@ -543,7 +543,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -569,7 +569,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_cp):
+    for i in range(options.num_cp):
 
         tcp_ID = options.num_compute_units + i
         sqc_ID = options.num_sqc + i
@@ -623,7 +623,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         # SQC also in GPU cluster
         gpuCluster.add(sqc_cntrl)
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
         tcc_cntrl.create(options, ruby_system, system)
index 960cbbdd64eb0e4a2603b68b52b5f0b83556a123..5c713ce2d4a936a0da1dc2ba1b16c295f047f21b 100644 (file)
@@ -407,7 +407,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
     # Clusters
     crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
     mainCluster = Cluster(intBW = crossbar_bw)
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
 
         dir_cntrl = DirCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
         dir_cntrl.create(options, ruby_system, system)
@@ -440,7 +440,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         mainCluster.add(dir_cntrl)
 
     cpuCluster = Cluster(extBW = crossbar_bw, intBW=crossbar_bw)
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -473,7 +473,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         cpuCluster.add(cp_cntrl)
 
     gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              issue_latency = 1,
@@ -510,7 +510,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -539,7 +539,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
     # Because of wire buffers, num_tccs must equal num_tccdirs
     numa_bit = 6
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl()
         tcc_cntrl.create(options, ruby_system, system)
index 90e8b773a4b8ce07c3d24ddc25c6fd7e5bbe99d3..8b317fbafb1a32ca9c03876ce2a174543fad0e3d 100644 (file)
@@ -469,7 +469,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
     # For an odd number of CPUs, still create the right number of controllers
     crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
     cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw)
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
@@ -535,7 +535,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         cpuCluster.add(rb_cntrl)
 
     gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw)
-    for i in xrange(options.num_compute_units):
+    for i in range(options.num_compute_units):
 
         tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                              issue_latency = 1,
@@ -571,7 +571,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
         gpuCluster.add(tcp_cntrl)
 
-    for i in xrange(options.num_sqc):
+    for i in range(options.num_sqc):
 
         sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
         sqc_cntrl.create(options, ruby_system, system)
@@ -599,7 +599,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
     numa_bit = 6
 
-    for i in xrange(options.num_tccs):
+    for i in range(options.num_tccs):
 
         tcc_cntrl = TCCCntrl()
         tcc_cntrl.create(options, ruby_system, system)
index a70780bf7da4a62063597bb7cc4c233e6f0a5aac..c38bdbae0985db52e3645baddbce74f60fa2da0d 100644 (file)
@@ -66,7 +66,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
     # controller constructors are called before the network constructor
     #
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         # Only one cache exists for this protocol, so by default use the L1D
index f38b7cfa61c91817554cc7cda542a86be77785a5..95ac342a726bc90519a1633cef539ade91a97f9b 100644 (file)
@@ -83,8 +83,8 @@ def create_system(options, full_system, system, dma_ports, bootmem,
     # Must create the individual controllers before the network to ensure the
     # controller constructors are called before the network constructor
     #
-    for i in xrange(options.num_clusters):
-        for j in xrange(num_cpus_per_cluster):
+    for i in range(options.num_clusters):
+        for j in range(num_cpus_per_cluster):
             #
             # First create the Ruby objects associated with this cpu
             #
@@ -164,7 +164,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
             l1_cntrl.responseFromL2.slave = ruby_system.network.master
 
 
-        for j in xrange(num_l2caches_per_cluster):
+        for j in range(num_l2caches_per_cluster):
             l2_cache = L2Cache(size = options.l2_size,
                                assoc = options.l2_assoc,
                                start_index_bit = l2_index_start)
index 52976e6bb3e64dac287403d365efbccf4dbaeb45..27ef9c8dae147fdadcd26ee85a997bbe4df629cc 100644 (file)
@@ -67,7 +67,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
     l2_bits = int(math.log(options.num_l2caches, 2))
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
@@ -135,7 +135,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
 
     l2_index_start = block_size_bits + l2_bits
 
-    for i in xrange(options.num_l2caches):
+    for i in range(options.num_l2caches):
         #
         # First create the Ruby objects associated with this cpu
         #
index 222d084a83cd7f5ec10e98ac824739b28f078818..e3395bdd2f7e382e78bc8e79ccaf23f258bc4dcf 100644 (file)
@@ -64,7 +64,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
     #
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         # Only one cache exists for this protocol, so by default use the L1D
index ad1654393040fca52b3d16cb17f21e882059116f..aeec3783822eeeea57502ced1590cc9c74e8dfc4 100644 (file)
@@ -248,7 +248,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
         block_size_bits = int(math.log(options.cacheline_size, 2))
         numa_bit = block_size_bits + dir_bits - 1
 
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
         dir_ranges = []
         for r in system.mem_ranges:
             addr_range = m5.objects.AddrRange(r.start, size = r.size(),
@@ -294,7 +294,7 @@ def create_system(options, full_system, system, dma_devices, bootmem,
 
     # For an odd number of CPUs, still create the right number of controllers
     cpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
-    for i in xrange((options.num_cpus + 1) / 2):
+    for i in range((options.num_cpus + 1) // 2):
 
         cp_cntrl = CPCntrl()
         cp_cntrl.create(options, ruby_system, system)
index 3fef48b3be535fe1a2afa75364456fb02ad80fc0..40cb7ce67612f5f2700a56ecdcddec8c80384e5d 100644 (file)
@@ -67,7 +67,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
     l2_bits = int(math.log(options.num_l2caches, 2))
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
@@ -126,7 +126,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
 
     l2_index_start = block_size_bits + l2_bits
 
-    for i in xrange(options.num_l2caches):
+    for i in range(options.num_l2caches):
         #
         # First create the Ruby objects associated with this cpu
         #
index 94a518b2afffe83f01a3c223f906c40e500dc6e5..817d6f96ce554f08be2b8e666c92bab23aa4790f 100644 (file)
@@ -80,7 +80,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
     l2_bits = int(math.log(options.num_l2caches, 2))
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
@@ -149,7 +149,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
 
     l2_index_start = block_size_bits + l2_bits
 
-    for i in xrange(options.num_l2caches):
+    for i in range(options.num_l2caches):
         #
         # First create the Ruby objects associated with this cpu
         #
index 7c31ca201c580cf16a32ce230ad47911c2aa7514..763088682c0222769da0f3c99ba040813b567336 100644 (file)
@@ -74,7 +74,7 @@ def create_system(options, full_system, system, dma_ports, bootmem,
     #
     block_size_bits = int(math.log(options.cacheline_size, 2))
 
-    for i in xrange(options.num_cpus):
+    for i in range(options.num_cpus):
         #
         # First create the Ruby objects associated with this cpu
         #
index 2ddf608fb36379b033b8d92309e7315ea12176b9..03e836eb04b21c255c888a070056770cb2ffe799 100644 (file)
@@ -214,7 +214,7 @@ def create_system(options, full_system, system, piobus = None, dma_ports = [],
 
 def create_directories(options, bootmem, ruby_system, system):
     dir_cntrl_nodes = []
-    for i in xrange(options.num_dirs):
+    for i in range(options.num_dirs):
         dir_cntrl = Directory_Controller()
         dir_cntrl.version = i
         dir_cntrl.directory = RubyDirectoryMemory()
index f819bd19e45de682a576e4047a45f867226c89cc..753fb0f70419d49c0bcef210814d8e923f72393b 100644 (file)
@@ -167,41 +167,41 @@ all_cpus = []
 all_l1s = []
 all_l1buses = []
 if options.timing:
-    clusters = [ Cluster() for i in xrange(options.numclusters)]
-    for j in xrange(options.numclusters):
+    clusters = [ Cluster() for i in range(options.numclusters)]
+    for j in range(options.numclusters):
         clusters[j].id = j
     for cluster in clusters:
         cluster.clusterbus = L2XBar(clock=busFrequency)
         all_l1buses += [cluster.clusterbus]
         cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id,
                                         clock=options.frequency)
-                        for i in xrange(cpusPerCluster)]
+                        for i in range(cpusPerCluster)]
         all_cpus += cluster.cpus
         cluster.l1 = L1(size=options.l1size, assoc = 4)
         all_l1s += [cluster.l1]
 elif options.detailed:
-    clusters = [ Cluster() for i in xrange(options.numclusters)]
-    for j in xrange(options.numclusters):
+    clusters = [ Cluster() for i in range(options.numclusters)]
+    for j in range(options.numclusters):
         clusters[j].id = j
     for cluster in clusters:
         cluster.clusterbus = L2XBar(clock=busFrequency)
         all_l1buses += [cluster.clusterbus]
         cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id,
                                    clock=options.frequency)
-                        for i in xrange(cpusPerCluster)]
+                        for i in range(cpusPerCluster)]
         all_cpus += cluster.cpus
         cluster.l1 = L1(size=options.l1size, assoc = 4)
         all_l1s += [cluster.l1]
 else:
-    clusters = [ Cluster() for i in xrange(options.numclusters)]
-    for j in xrange(options.numclusters):
+    clusters = [ Cluster() for i in range(options.numclusters)]
+    for j in range(options.numclusters):
         clusters[j].id = j
     for cluster in clusters:
         cluster.clusterbus = L2XBar(clock=busFrequency)
         all_l1buses += [cluster.clusterbus]
         cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id,
                                         clock=options.frequency)
-                        for i in xrange(cpusPerCluster)]
+                        for i in range(cpusPerCluster)]
         all_cpus += cluster.cpus
         cluster.l1 = L1(size=options.l1size, assoc = 4)
         all_l1s += [cluster.l1]
index b17eb5400de3d37b939971dc2cdec0f34f956485..f97616a517d53f6d064a2d229e39f13c934df6ed 100644 (file)
@@ -182,15 +182,15 @@ busFrequency = Frequency(options.frequency)
 if options.timing:
     cpus = [TimingSimpleCPU(cpu_id = i,
                             clock=options.frequency)
-            for i in xrange(options.numcpus)]
+            for i in range(options.numcpus)]
 elif options.detailed:
     cpus = [DerivO3CPU(cpu_id = i,
                        clock=options.frequency)
-            for i in xrange(options.numcpus)]
+            for i in range(options.numcpus)]
 else:
     cpus = [AtomicSimpleCPU(cpu_id = i,
                             clock=options.frequency)
-            for i in xrange(options.numcpus)]
+            for i in range(options.numcpus)]
 
 # ----------------------
 # Create a system, and add system wide objects
index 46f3c6fdd1b7021e432c0b9f1df638ec17e754a2..2381624919b332cafb134cad98200609a81a7dfe 100644 (file)
@@ -126,8 +126,8 @@ class MeshDirCorners_XY(SimpleTopology):
         int_links = []
 
         # East output to West input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_out = col + (row * num_columns)
                     west_in = (col + 1) + (row * num_columns)
@@ -141,8 +141,8 @@ class MeshDirCorners_XY(SimpleTopology):
                     link_count += 1
 
         # West output to East input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_in = col + (row * num_columns)
                     west_out = (col + 1) + (row * num_columns)
@@ -156,8 +156,8 @@ class MeshDirCorners_XY(SimpleTopology):
                     link_count += 1
 
         # North output to South input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_out = col + (row * num_columns)
                     south_in = col + ((row + 1) * num_columns)
@@ -171,8 +171,8 @@ class MeshDirCorners_XY(SimpleTopology):
                     link_count += 1
 
         # South output to North input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_in = col + (row * num_columns)
                     south_out = col + ((row + 1) * num_columns)
index 652ac16d8efccf5ba4cafbf00a55f3637936c1f2..200d346d9ef05d865ccaeff12b172f4404af27ba 100644 (file)
@@ -78,7 +78,7 @@ class Mesh_XY(SimpleTopology):
         # distributed across the network.
         network_nodes = []
         remainder_nodes = []
-        for node_index in xrange(len(nodes)):
+        for node_index in range(len(nodes)):
             if node_index < (len(nodes) - remainder):
                 network_nodes.append(nodes[node_index])
             else:
@@ -110,8 +110,8 @@ class Mesh_XY(SimpleTopology):
         int_links = []
 
         # East output to West input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_out = col + (row * num_columns)
                     west_in = (col + 1) + (row * num_columns)
@@ -125,8 +125,8 @@ class Mesh_XY(SimpleTopology):
                     link_count += 1
 
         # West output to East input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_in = col + (row * num_columns)
                     west_out = (col + 1) + (row * num_columns)
@@ -140,8 +140,8 @@ class Mesh_XY(SimpleTopology):
                     link_count += 1
 
         # North output to South input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_out = col + (row * num_columns)
                     south_in = col + ((row + 1) * num_columns)
@@ -155,8 +155,8 @@ class Mesh_XY(SimpleTopology):
                     link_count += 1
 
         # South output to North input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_in = col + (row * num_columns)
                     south_out = col + ((row + 1) * num_columns)
index 6139f674270ea832af66999400a5bb0b950665fe..f3278200d733d84bcb2a9c1061b3ab96c52b1fb2 100644 (file)
@@ -82,7 +82,7 @@ class Mesh_westfirst(SimpleTopology):
         # distributed across the network.
         network_nodes = []
         remainder_nodes = []
-        for node_index in xrange(len(nodes)):
+        for node_index in range(len(nodes)):
             if node_index < (len(nodes) - remainder):
                 network_nodes.append(nodes[node_index])
             else:
@@ -114,8 +114,8 @@ class Mesh_westfirst(SimpleTopology):
         int_links = []
 
         # East output to West input links (weight = 2)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_out = col + (row * num_columns)
                     west_in = (col + 1) + (row * num_columns)
@@ -127,8 +127,8 @@ class Mesh_westfirst(SimpleTopology):
                     link_count += 1
 
         # West output to East input links (weight = 1)
-        for row in xrange(num_rows):
-            for col in xrange(num_columns):
+        for row in range(num_rows):
+            for col in range(num_columns):
                 if (col + 1 < num_columns):
                     east_in = col + (row * num_columns)
                     west_out = (col + 1) + (row * num_columns)
@@ -141,8 +141,8 @@ class Mesh_westfirst(SimpleTopology):
 
 
         # North output to South input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_out = col + (row * num_columns)
                     south_in = col + ((row + 1) * num_columns)
@@ -154,8 +154,8 @@ class Mesh_westfirst(SimpleTopology):
                     link_count += 1
 
         # South output to North input links (weight = 2)
-        for col in xrange(num_columns):
-            for row in xrange(num_rows):
+        for col in range(num_columns):
+            for row in range(num_rows):
                 if (row + 1 < num_rows):
                     north_in = col + (row * num_columns)
                     south_out = col + ((row + 1) * num_columns)
index 6cbf5ad853a8b1df292a857b8e473bac093fbcdd..81d61d7d11232aa23a1863a1604995ed28b32ab2 100644 (file)
@@ -63,8 +63,8 @@ class Pt2Pt(SimpleTopology):
 
         link_count = len(nodes)
         int_links = []
-        for i in xrange(len(nodes)):
-            for j in xrange(len(nodes)):
+        for i in range(len(nodes)):
+            for j in range(len(nodes)):
                 if (i != j):
                     link_count += 1
                     int_links.append(IntLink(link_id=link_count,