from __future__ import print_function
from __future__ import absolute_import
+import six
+
import m5
from m5.objects import *
from m5.util import *
from common.Benchmarks import *
from common import ObjectList
+if six.PY3:
+ long = int
+
# Populate to reflect supported os types per target ISA
os_types = { 'mips' : [ 'linux' ],
'riscv' : [ 'linux' ], # TODO that's a lie
# We assume below that there's at least 1MB of memory. We'll require 2
# just to avoid corner cases.
- phys_mem_size = sum(map(lambda r: r.size(), self.mem_ranges))
+ phys_mem_size = sum([r.size() for r in self.mem_ranges])
assert(phys_mem_size >= 0x200000)
assert(len(self.mem_ranges) <= 2)
file_append((procdir, 'cpuinfo'), one_cpu)
file_append((procdir, 'stat'), 'cpu 0 0 0 0 0 0 0\n')
- for i in xrange(len(cpus)):
+ for i in range(len(cpus)):
file_append((procdir, 'stat'), 'cpu%d 0 0 0 0 0 0 0\n' % i)
# Set up /sys
for i in range(numx*(opt.mem_chunk-1))]
# Buffer iterator
- it = iter(range(len(system.hmc_dev.buffers)))
+ it = iter(list(range(len(system.hmc_dev.buffers))))
# necesarry to add system_port to one of the xbar
system.system_port = system.hmc_dev.xbar[3].slave
# connect xbar to all other xbars except itself
if i != j:
# get the next index of buffer
- index = it.next()
+ index = next(it)
# Change the default values for ranges of bridge
system.hmc_dev.buffers[index].ranges = system.mem_ranges[
print("Available {} classes:".format(self.base_cls))
doc_wrapper = TextWrapper(initial_indent="\t\t",
subsequent_indent="\t\t")
- for name, cls in self._sub_classes.items():
+ for name, cls in list(self._sub_classes.items()):
print("\t{}".format(name))
# Try to extract the class documentation from the class help
if self._aliases:
print("\Aliases:")
- for alias, target in self._aliases.items():
+ for alias, target in list(self._aliases.items()):
print("\t{} => {}".format(alias, target))
def get_names(self):
def _add_objects(self):
""" Add all enum values to the ObjectList """
self._sub_classes = {}
- for (key, value) in self.base_cls.__members__.items():
+ for (key, value) in list(self.base_cls.__members__.items()):
# All Enums have a value Num_NAME at the end which we
# do not want to include
if not key.startswith("Num_"):
help="Specify the physical memory size (single memory)")
parser.add_option("--enable-dram-powerdown", action="store_true",
help="Enable low-power states in DRAMCtrl")
- parser.add_option("--mem-channels-intlv", type="int",
+ parser.add_option("--mem-channels-intlv", type="int", default=0,
help="Memory channels interleave")
from __future__ import print_function
from __future__ import absolute_import
+import six
import sys
from os import getcwd
from os.path import join as joinpath
from m5.objects import *
from m5.util import *
+if six.PY3:
+ long = int
+
addToPath('../common')
def getCPUClass(cpu_type):
paths = [ '/dist/m5/system', '/n/poolfs/z/dist/m5/system' ]
# expand '~' and '~user' in paths
- paths = map(os.path.expanduser, paths)
+ paths = list(map(os.path.expanduser, paths))
# filter out non-existent directories
- paths = filter(os.path.isdir, paths)
+ paths = list(filter(os.path.isdir, paths))
if not paths:
raise IOError(
from __future__ import absolute_import
import gzip
+import six
import optparse
import os
addToPath('../../util')
import protolib
+if six.PY3:
+ long = int
+
# this script is helpful to observe the memory latency for various
# levels in a cache hierarchy, and various cache and memory
# configurations, in essence replicating the lmbench lat_mem_rd thrash
help = "Percentage of read commands")
parser.add_option("--mode", type="choice", default="DRAM",
- choices=dram_generators.keys(),
+ choices=list(dram_generators.keys()),
help = "DRAM: Random traffic; \
DRAM_ROTATE: Traffic rotating across banks and ranks")
help="Disk to instantiate")
parser.add_argument("--readfile", type=str, default="",
help = "File to return with the m5 readfile command")
- parser.add_argument("--cpu", type=str, choices=cpu_types.keys(),
+ parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()),
default="atomic",
help="CPU model to use")
parser.add_argument("--cpu-freq", type=str, default="4GHz")
from __future__ import print_function
from __future__ import absolute_import
+import six
+
import m5
from m5.objects import *
m5.util.addToPath('../../')
from common.Caches import *
from common import ObjectList
+if six.PY3:
+ long = int
+
have_kvm = "ArmV8KvmCPU" in ObjectList.cpu_list.get_names()
have_fastmodel = "FastModelCortexA76" in ObjectList.cpu_list.get_names()
help="Disks to instantiate")
parser.add_argument("--bootscript", type=str, default=default_rcs,
help="Linux bootscript")
- parser.add_argument("--cpu-type", type=str, choices=cpu_types.keys(),
+ parser.add_argument("--cpu-type", type=str, choices=list(cpu_types.keys()),
default="timing",
help="CPU simulation mode. Default: %(default)s")
parser.add_argument("--kernel-init", type=str, default="/sbin/init",
help="Disk to instantiate")
parser.add_argument("--script", type=str, default="",
help = "Linux bootscript")
- parser.add_argument("--cpu", type=str, choices=cpu_types.keys(),
+ parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()),
default="atomic",
help="CPU model to use")
parser.add_argument("--cpu-freq", type=str, default="4GHz")
parser.add_argument("commands_to_run", metavar="command(s)", nargs='*',
help="Command(s) to run")
- parser.add_argument("--cpu", type=str, choices=cpu_types.keys(),
+ parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()),
default="atomic",
help="CPU model to use")
parser.add_argument("--cpu-freq", type=str, default="4GHz")
from __future__ import absolute_import
import argparse
-import ConfigParser
+from six.moves import configparser
import inspect
import json
import re
long = int
sim_object_classes_by_name = {
- cls.__name__: cls for cls in m5.objects.__dict__.values()
+ cls.__name__: cls for cls in list(m5.objects.__dict__.values())
if inspect.isclass(cls) and issubclass(cls, m5.objects.SimObject) }
# Add some parsing functions to Param classes to handle reading in .ini
'EthernetAddr': simple_parser()
}
-for name, parser in param_parsers.items():
+for name, parser in list(param_parsers.items()):
setattr(m5.params.__dict__[name], 'parse_ini', classmethod(parser))
class PortConnection(object):
parsed_params = {}
- for param_name, param in object_class._params.items():
+ for param_name, param in list(object_class._params.items()):
if issubclass(param.ptype, m5.params.ParamValue):
if isinstance(param, m5.params.VectorParamDesc):
param_values = self.config.get_param_vector(object_name,
if object_name == 'Null':
return NULL
- for param_name, param in obj.__class__._params.items():
+ for param_name, param in list(obj.__class__._params.items()):
if issubclass(param.ptype, m5.objects.SimObject):
if isinstance(param, m5.params.VectorParamDesc):
param_values = self.config.get_param_vector(object_name,
return NULL
parsed_ports = []
- for port_name, port in obj.__class__._ports.items():
+ for port_name, port in list(obj.__class__._ports.items()):
# Assume that unnamed ports are unconnected
peers = self.config.get_port_peers(object_name, port_name)
- for index, peer in zip(range(0, len(peers)), peers):
+ for index, peer in zip(list(range(0, len(peers))), peers):
parsed_ports.append((
PortConnection(object_name, port.name, index),
PortConnection.from_string(peer)))
# Now fill in SimObject-valued parameters in the knowledge that
# this won't be interpreted as becoming the parent of objects
# which are already in the root hierarchy
- for name, obj in self.objects_by_name.items():
+ for name, obj in list(self.objects_by_name.items()):
self.fill_in_simobj_parameters(name, obj)
# Gather a list of all port-to-port connections
connections = []
- for name, obj in self.objects_by_name.items():
+ for name, obj in list(self.objects_by_name.items()):
connections += self.gather_port_connections(name, obj)
# Find an acceptable order to bind those port connections and
class ConfigIniFile(ConfigFile):
def __init__(self):
- self.parser = ConfigParser.ConfigParser()
+ self.parser = configparser.ConfigParser()
def load(self, config_file):
self.parser.read(config_file)
for elem in node:
self.find_all_objects(elem)
elif isinstance(node, dict):
- for elem in node.values():
+ for elem in list(node.values()):
self.find_all_objects(elem)
def load(self, config_file):
obj = self.object_dicts[object_name]
children = []
- for name, node in obj.items():
+ for name, node in list(obj.items()):
if self.is_sim_object(node):
children.append((name, node['path']))
elif isinstance(node, list) and node != [] and all([
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath, convert
-from CntrlBase import *
+from .CntrlBase import *
addToPath('../')
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+import six
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
-from Ruby import create_topology
-from Ruby import send_evicts
+from .Ruby import create_topology
+from .Ruby import send_evicts
addToPath('../')
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
+if six.PY3:
+ long = int
+
class CntrlBase:
_seqs = 0
@classmethod
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+import six
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
-from Ruby import create_topology
-from Ruby import send_evicts
+from .Ruby import create_topology
+from .Ruby import send_evicts
addToPath('../')
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
+if six.PY3:
+ long = int
+
class CntrlBase:
_seqs = 0
@classmethod
# Register CPUs and caches for each CorePair and directory (SE mode only)
if not full_system:
- for i in xrange((options.num_cpus + 1) // 2):
+ for i in range((options.num_cpus + 1) // 2):
FileSystemConfig.register_cpu(physical_package_id = 0,
core_siblings = \
- xrange(options.num_cpus),
+ range(options.num_cpus),
core_id = i*2,
thread_siblings = [])
FileSystemConfig.register_cpu(physical_package_id = 0,
core_siblings = \
- xrange(options.num_cpus),
+ range(options.num_cpus),
core_id = i*2+1,
thread_siblings = [])
line_size = options.cacheline_size,
assoc = options.l3_assoc,
cpus = [n for n in
- xrange(options.num_cpus)])
+ range(options.num_cpus)])
gpuCluster = None
if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+import six
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
-from Ruby import create_topology
-from Ruby import send_evicts
+from .Ruby import create_topology
+from .Ruby import send_evicts
addToPath('../')
from topologies.Cluster import Cluster
from topologies.Crossbar import Crossbar
+if six.PY3:
+ long = int
+
class CntrlBase:
_seqs = 0
@classmethod
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+import six
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
-from Ruby import send_evicts
+from .Ruby import send_evicts
addToPath('../')
from topologies.Cluster import Cluster
+if six.PY3:
+ long = int
+
class CntrlBase:
_seqs = 0
@classmethod
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
-from Ruby import create_topology, create_directories
+from .Ruby import create_topology, create_directories
#
# Declare caches used by the protocol
import m5
from m5.objects import *
from m5.defines import buildEnv
-from Ruby import create_topology, create_directories
-from Ruby import send_evicts
+from .Ruby import create_topology, create_directories
+from .Ruby import send_evicts
from common import FileSystemConfig
#
all_cntrls = all_cntrls + [io_controller]
# Register configuration with filesystem
else:
- for i in xrange(options.num_clusters):
- for j in xrange(num_cpus_per_cluster):
+ for i in range(options.num_clusters):
+ for j in range(num_cpus_per_cluster):
FileSystemConfig.register_cpu(physical_package_id = 0,
- core_siblings = xrange(options.num_cpus),
+ core_siblings = range(options.num_cpus),
core_id = i*num_cpus_per_cluster+j,
thread_siblings = [])
num_l2caches_per_cluster)+'B',
line_size = options.cacheline_size,
assoc = options.l2_assoc,
- cpus = [n for n in xrange(i*num_cpus_per_cluster, \
+ cpus = [n for n in range(i*num_cpus_per_cluster, \
(i+1)*num_cpus_per_cluster)])
ruby_system.network.number_of_virtual_networks = 3
import m5
from m5.objects import *
from m5.defines import buildEnv
-from Ruby import create_topology, create_directories
-from Ruby import send_evicts
+from .Ruby import create_topology, create_directories
+from .Ruby import send_evicts
#
# Declare caches used by the protocol
import m5
from m5.objects import *
from m5.defines import buildEnv
-from Ruby import create_topology, create_directories
-from Ruby import send_evicts
+from .Ruby import create_topology, create_directories
+from .Ruby import send_evicts
#
# Declare caches used by the protocol
l1_cntrl.responseToCache = MessageBuffer(ordered = True)
l1_cntrl.responseToCache.slave = ruby_system.network.master
- phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
+ phys_mem_size = sum([r.size() for r in system.mem_ranges])
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
-from Ruby import create_topology
-from Ruby import send_evicts
+from .Ruby import create_topology
+from .Ruby import send_evicts
from common import FileSystemConfig
addToPath('../')
# Register CPUs and caches for each CorePair and directory (SE mode only)
if not full_system:
- for i in xrange((options.num_cpus + 1) // 2):
+ for i in range((options.num_cpus + 1) // 2):
FileSystemConfig.register_cpu(physical_package_id = 0,
core_siblings =
- xrange(options.num_cpus),
+ range(options.num_cpus),
core_id = i*2,
thread_siblings = [])
FileSystemConfig.register_cpu(physical_package_id = 0,
core_siblings =
- xrange(options.num_cpus),
+ range(options.num_cpus),
core_id = i*2+1,
thread_siblings = [])
line_size = options.cacheline_size,
assoc = options.l3_assoc,
cpus = [n for n in
- xrange(options.num_cpus)])
+ range(options.num_cpus)])
# Assuming no DMA devices
assert(len(dma_devices) == 0)
import m5
from m5.objects import *
from m5.defines import buildEnv
-from Ruby import create_topology, create_directories
-from Ruby import send_evicts
+from .Ruby import create_topology, create_directories
+from .Ruby import send_evicts
#
# Declare caches used by the protocol
import m5
from m5.objects import *
from m5.defines import buildEnv
-from Ruby import create_topology, create_directories
-from Ruby import send_evicts
+from .Ruby import create_topology, create_directories
+from .Ruby import send_evicts
#
# Declare caches used by the protocol
import m5
from m5.objects import *
from m5.defines import buildEnv
-from Ruby import create_topology, create_directories
-from Ruby import send_evicts
+from .Ruby import create_topology, create_directories
+from .Ruby import send_evicts
from common import FileSystemConfig
#
all_cntrls = all_cntrls + [io_controller]
# Register configuration with filesystem
else:
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
FileSystemConfig.register_cpu(physical_package_id = 0,
core_siblings = [],
core_id = i,
# NUMA Node for each quadrant
# With odd columns or rows, the nodes will be unequal
numa_nodes = [ [], [], [], []]
- for i in xrange(num_routers):
+ for i in range(num_routers):
if i % num_columns < num_columns / 2 and \
i < num_routers / 2:
numa_nodes[0].append(i)
# Register nodes with filesystem
def registerTopology(self, options):
- for i in xrange(options.num_cpus):
+ for i in range(options.num_cpus):
FileSystemConfig.register_node([i],
MemorySize(options.mem_size) / options.num_cpus, i)