X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=configs%2Fexample%2Fse.py;h=4adfe7bb8da5955a96613bc13309c7716a950f10;hb=41beacce088e8f682a0e8ac48f22a3fa4805a43b;hp=1edd99e9b6bea6bc0edf9b399116c26f953af1a0;hpb=d7f71bf424f2ccb87366b4f464e657a185abe414;p=gem5.git diff --git a/configs/example/se.py b/configs/example/se.py index 1edd99e9b..4adfe7bb8 100644 --- a/configs/example/se.py +++ b/configs/example/se.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 ARM Limited +# Copyright (c) 2012-2013 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall @@ -42,50 +42,91 @@ # # "m5 test.py" -import os import optparse import sys -from os.path import join as joinpath +import os import m5 from m5.defines import buildEnv from m5.objects import * from m5.util import addToPath, fatal -addToPath('../common') -addToPath('../ruby') +addToPath('../') -import Ruby +from ruby import Ruby -import Simulation -import CacheConfig -from Caches import * -from cpu2000 import * +from common import Options +from common import Simulation +from common import CacheConfig +from common import CpuConfig +from common import MemConfig +from common.Caches import * +from common.cpu2000 import * -# Get paths we might need. It's expected this file is in m5/configs/example. -config_path = os.path.dirname(os.path.abspath(__file__)) -config_root = os.path.dirname(config_path) -m5_root = os.path.dirname(config_root) +# Check if KVM support has been enabled, we might need to do VM +# configuration if that's the case. +have_kvm_support = 'BaseKvmCPU' in globals() +def is_kvm_cpu(cpu_class): + return have_kvm_support and cpu_class != None and \ + issubclass(cpu_class, BaseKvmCPU) -parser = optparse.OptionParser() +def get_processes(options): + """Interprets provided options and returns a list of processes""" + + multiprocesses = [] + inputs = [] + outputs = [] + errouts = [] + pargs = [] + + workloads = options.cmd.split(';') + if options.input != "": + inputs = options.input.split(';') + if options.output != "": + outputs = options.output.split(';') + if options.errout != "": + errouts = options.errout.split(';') + if options.options != "": + pargs = options.options.split(';') + + idx = 0 + for wrkld in workloads: + process = LiveProcess() + process.executable = wrkld + process.cwd = os.getcwd() + + if options.env: + with open(options.env, 'r') as f: + process.env = [line.rstrip() for line in f] + + if len(pargs) > idx: + process.cmd = [wrkld] + pargs[idx].split() + else: + process.cmd = [wrkld] + + if len(inputs) > idx: + process.input = inputs[idx] + if len(outputs) > idx: + process.output = outputs[idx] + if len(errouts) > idx: + process.errout = errouts[idx] + + multiprocesses.append(process) + idx += 1 + + if options.smt: + assert(options.cpu_type == "detailed") + return multiprocesses, idx + else: + return multiprocesses, 1 -# Benchmark options -parser.add_option("-c", "--cmd", - default=joinpath(m5_root, "tests/test-progs/hello/bin/%s/linux/hello" % \ - buildEnv['TARGET_ISA']), - help="The binary to run in syscall emulation mode.") -parser.add_option("-o", "--options", default="", - help='The options to pass to the binary, use " " around the entire string') -parser.add_option("-i", "--input", default="", help="Read stdin from a file.") -parser.add_option("--output", default="", help="Redirect stdout to a file.") -parser.add_option("--errout", default="", help="Redirect stderr to a file.") -execfile(os.path.join(config_root, "common", "Options.py")) +parser = optparse.OptionParser() +Options.addCommonOptions(parser) +Options.addSEOptions(parser) -if buildEnv['PROTOCOL'] != 'None': - parser.add_option("--ruby", action="store_true") - if '--ruby' in sys.argv: - Ruby.define_options(parser) +if '--ruby' in sys.argv: + Ruby.define_options(parser) (options, args) = parser.parse_args() @@ -94,7 +135,7 @@ if args: sys.exit(1) multiprocesses = [] -apps = [] +numThreads = 1 if options.bench: apps = options.bench.split("-") @@ -105,98 +146,142 @@ if options.bench: for app in apps: try: if buildEnv['TARGET_ISA'] == 'alpha': - exec("workload = %s('alpha', 'tru64', 'ref')" % app) + exec("workload = %s('alpha', 'tru64', '%s')" % ( + app, options.spec_input)) + elif buildEnv['TARGET_ISA'] == 'arm': + exec("workload = %s('arm_%s', 'linux', '%s')" % ( + app, options.arm_iset, options.spec_input)) else: - exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app) + exec("workload = %s(buildEnv['TARGET_ISA', 'linux', '%s')" % ( + app, options.spec_input)) multiprocesses.append(workload.makeLiveProcess()) except: - print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app) + print >>sys.stderr, "Unable to find workload for %s: %s" % ( + buildEnv['TARGET_ISA'], app) sys.exit(1) +elif options.cmd: + multiprocesses, numThreads = get_processes(options) else: - process = LiveProcess() - process.executable = options.cmd - process.cmd = [options.cmd] + options.options.split() - multiprocesses.append(process) + print >> sys.stderr, "No workload specified. Exiting!\n" + sys.exit(1) -if options.input != "": - process.input = options.input -if options.output != "": - process.output = options.output -if options.errout != "": - process.errout = options.errout +(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options) +CPUClass.numThreads = numThreads +# Check -- do not allow SMT with multiple CPUs +if options.smt and options.num_cpus > 1: + fatal("You cannot use SMT with multiple CPUs!") -# By default, set workload to path of user-specified binary -workloads = options.cmd -numThreads = 1 +np = options.num_cpus +system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)], + mem_mode = test_mem_mode, + mem_ranges = [AddrRange(options.mem_size)], + cache_line_size = options.cacheline_size) + +if numThreads > 1: + system.multi_thread = True + +# Create a top-level voltage domain +system.voltage_domain = VoltageDomain(voltage = options.sys_voltage) + +# Create a source clock for the system and set the clock period +system.clk_domain = SrcClockDomain(clock = options.sys_clock, + voltage_domain = system.voltage_domain) + +# Create a CPU voltage domain +system.cpu_voltage_domain = VoltageDomain() + +# Create a separate clock domain for the CPUs +system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock, + voltage_domain = + system.cpu_voltage_domain) + +# If elastic tracing is enabled, then configure the cpu and attach the elastic +# trace probe +if options.elastic_trace_en: + CpuConfig.config_etrace(CPUClass, system.cpu, options) + +# All cpus belong to a common cpu_clk_domain, therefore running at a common +# frequency. +for cpu in system.cpu: + cpu.clk_domain = system.cpu_clk_domain + +if is_kvm_cpu(CPUClass) or is_kvm_cpu(FutureClass): + if buildEnv['TARGET_ISA'] == 'x86': + system.kvm_vm = KvmVM() + for process in multiprocesses: + process.useArchPT = True + process.kvmInSE = True + else: + fatal("KvmCPU can only be used in SE mode with x86") + +# Sanity check +if options.fastmem: + if CPUClass != AtomicSimpleCPU: + fatal("Fastmem can only be used with atomic CPU!") + if (options.caches or options.l2cache): + fatal("You cannot use fastmem in combination with caches!") + +if options.simpoint_profile: + if not options.fastmem: + # Atomic CPU checked with fastmem option already + fatal("SimPoint generation should be done with atomic cpu and fastmem") + if np > 1: + fatal("SimPoint generation not supported with more than one CPUs") -if options.cpu_type == "detailed" or options.cpu_type == "inorder": - #check for SMT workload - workloads = options.cmd.split(';') - if len(workloads) > 1: - process = [] - smt_idx = 0 - inputs = [] - outputs = [] - errouts = [] - - if options.input != "": - inputs = options.input.split(';') - if options.output != "": - outputs = options.output.split(';') - if options.errout != "": - errouts = options.errout.split(';') - - for wrkld in workloads: - smt_process = LiveProcess() - smt_process.executable = wrkld - smt_process.cmd = wrkld + " " + options.options - if inputs and inputs[smt_idx]: - smt_process.input = inputs[smt_idx] - if outputs and outputs[smt_idx]: - smt_process.output = outputs[smt_idx] - if errouts and errouts[smt_idx]: - smt_process.errout = errouts[smt_idx] - process += [smt_process, ] - smt_idx += 1 - numThreads = len(workloads) +for i in xrange(np): + if options.smt: + system.cpu[i].workload = multiprocesses + elif len(multiprocesses) == 1: + system.cpu[i].workload = multiprocesses[0] + else: + system.cpu[i].workload = multiprocesses[i] -if options.ruby: - if not (options.cpu_type == "detailed" or options.cpu_type == "timing"): - print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!" - sys.exit(1) + if options.fastmem: + system.cpu[i].fastmem = True -(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options) -CPUClass.clock = '2GHz' -CPUClass.numThreads = numThreads; + if options.simpoint_profile: + system.cpu[i].addSimPointProbe(options.simpoint_interval) -np = options.num_cpus + if options.checker: + system.cpu[i].addCheckerCpu() -system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)], - physmem = PhysicalMemory(range=AddrRange("512MB")), - membus = Bus(), mem_mode = test_mem_mode) + system.cpu[i].createThreads() if options.ruby: - options.use_map = True - Ruby.create_system(options, system) - assert(options.num_cpus == len(system.ruby._cpu_ruby_ports)) - system.system_port = system.ruby._sys_port_proxy.port + if options.cpu_type == "atomic" or options.cpu_type == "AtomicSimpleCPU": + print >> sys.stderr, "Ruby does not work with atomic cpu!!" + sys.exit(1) + + Ruby.create_system(options, False, system) + assert(options.num_cpus == len(system.ruby._cpu_ports)) + + system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, + voltage_domain = system.voltage_domain) + for i in xrange(np): + ruby_port = system.ruby._cpu_ports[i] + + # Create the interrupt controller and connect its ports to Ruby + # Note that the interrupt controller is always present but only + # in x86 does it have message ports that need to be connected + system.cpu[i].createInterruptController() + + # Connect the cpu's cache ports to Ruby + system.cpu[i].icache_port = ruby_port.slave + system.cpu[i].dcache_port = ruby_port.slave + if buildEnv['TARGET_ISA'] == 'x86': + system.cpu[i].interrupts[0].pio = ruby_port.master + system.cpu[i].interrupts[0].int_master = ruby_port.slave + system.cpu[i].interrupts[0].int_slave = ruby_port.master + system.cpu[i].itb.walker.port = ruby_port.slave + system.cpu[i].dtb.walker.port = ruby_port.slave else: - system.system_port = system.membus.port - system.physmem.port = system.membus.port + MemClass = Simulation.setMemClass(options) + system.membus = SystemXBar() + system.system_port = system.membus.slave CacheConfig.config_cache(options, system) - -for i in xrange(np): - system.cpu[i].workload = multiprocesses[i] - - if options.ruby: - system.cpu[i].icache_port = system.ruby._cpu_ruby_ports[i].port - system.cpu[i].dcache_port = system.ruby._cpu_ruby_ports[i].port - - if options.fastmem: - system.cpu[0].physmem_port = system.physmem.port + MemConfig.config_mem(options, system) root = Root(full_system = False, system = system) - Simulation.run(options, root, system, FutureClass)