SparcLiveProcess::SparcLiveProcess(LiveProcessParams * params,
- ObjectFile *objFile)
- : LiveProcess(params, objFile)
+ ObjectFile *objFile, Addr _StackBias)
+ : LiveProcess(params, objFile), StackBias(_StackBias)
{
// XXX all the below need to be updated for SPARC - Ali
}
void
-Sparc32LiveProcess::startup()
+SparcLiveProcess::startup()
{
- if (checkpointRestored)
- return;
-
- argsInit(32 / 8, VMPageSize);
+ Process::startup();
//From the SPARC ABI
- //The process runs in user mode with 32 bit addresses
- threadContexts[0]->setMiscReg(MISCREG_PSTATE, 0x0a);
-
//Setup default FP state
threadContexts[0]->setMiscRegNoEffect(MISCREG_FSR, 0);
threadContexts[0]->setMiscRegNoEffect(MISCREG_TICK, 0);
- //
+
/*
* Register window management registers
*/
}
void
-Sparc64LiveProcess::startup()
+Sparc32LiveProcess::startup()
{
if (checkpointRestored)
return;
- argsInit(sizeof(IntReg), VMPageSize);
-
- //From the SPARC ABI
+ SparcLiveProcess::startup();
- //The process runs in user mode
- threadContexts[0]->setMiscReg(MISCREG_PSTATE, 0x02);
+ //The process runs in user mode with 32 bit addresses
+ threadContexts[0]->setMiscReg(MISCREG_PSTATE, 0x0a);
- //Setup default FP state
- threadContexts[0]->setMiscRegNoEffect(MISCREG_FSR, 0);
+ argsInit(32 / 8, VMPageSize);
+}
- threadContexts[0]->setMiscRegNoEffect(MISCREG_TICK, 0);
+void
+Sparc64LiveProcess::startup()
+{
+ if (checkpointRestored)
+ return;
- /*
- * Register window management registers
- */
+ SparcLiveProcess::startup();
- //No windows contain info from other programs
- //threadContexts[0]->setMiscRegNoEffect(MISCREG_OTHERWIN, 0);
- threadContexts[0]->setIntReg(NumIntArchRegs + 6, 0);
- //There are no windows to pop
- //threadContexts[0]->setMiscRegNoEffect(MISCREG_CANRESTORE, 0);
- threadContexts[0]->setIntReg(NumIntArchRegs + 4, 0);
- //All windows are available to save into
- //threadContexts[0]->setMiscRegNoEffect(MISCREG_CANSAVE, NWindows - 2);
- threadContexts[0]->setIntReg(NumIntArchRegs + 3, NWindows - 2);
- //All windows are "clean"
- //threadContexts[0]->setMiscRegNoEffect(MISCREG_CLEANWIN, NWindows);
- threadContexts[0]->setIntReg(NumIntArchRegs + 5, NWindows);
- //Start with register window 0
- threadContexts[0]->setMiscRegNoEffect(MISCREG_CWP, 0);
- //Always use spill and fill traps 0
- //threadContexts[0]->setMiscRegNoEffect(MISCREG_WSTATE, 0);
- threadContexts[0]->setIntReg(NumIntArchRegs + 7, 0);
- //Set the trap level to 0
- threadContexts[0]->setMiscRegNoEffect(MISCREG_TL, 0);
- //Set the ASI register to something fixed
- threadContexts[0]->setMiscRegNoEffect(MISCREG_ASI, ASI_PRIMARY);
+ //The process runs in user mode
+ threadContexts[0]->setMiscReg(MISCREG_PSTATE, 0x02);
- /*
- * T1 specific registers
- */
- //Turn on the icache, dcache, dtb translation, and itb translation.
- threadContexts[0]->setMiscRegNoEffect(MISCREG_MMU_LSU_CTRL, 15);
+ argsInit(sizeof(IntReg), VMPageSize);
}
-M5_32_auxv_t::M5_32_auxv_t(int32_t type, int32_t val)
+template<class IntType>
+void
+SparcLiveProcess::argsInit(int pageSize)
{
- a_type = TheISA::htog(type);
- a_val = TheISA::htog(val);
-}
+ int intSize = sizeof(IntType);
-M5_64_auxv_t::M5_64_auxv_t(int64_t type, int64_t val)
-{
- a_type = TheISA::htog(type);
- a_val = TheISA::htog(val);
-}
+ typedef M5_auxv_t<IntType> auxv_t;
-void
-Sparc64LiveProcess::argsInit(int intSize, int pageSize)
-{
- typedef M5_64_auxv_t auxv_t;
- Process::startup();
+ std::vector<auxv_t> auxv;
string filename;
if(argv.size() < 1)
else
filename = argv[0];
- Addr alignmentMask = ~(intSize - 1);
+ //Even for a 32 bit process, the ABI says we still need to
+ //maintain double word alignment of the stack pointer.
+ Addr alignmentMask = ~(sizeof(uint64_t) - 1);
// load object file into target memory
objFile->loadSections(initVirtMem);
M5_HWCAP_SPARC_V9 |
M5_HWCAP_SPARC_ULTRA3;
-
//Setup the auxilliary vectors. These will already have endian conversion.
//Auxilliary vectors are loaded only for elf formatted executables.
ElfObject * elfObject = dynamic_cast<ElfObject *>(objFile);
//Figure out how big the initial stack needs to be
- // The unaccounted for 0 at the top of the stack
- int mysterious_size = intSize;
-
- //This is the name of the file which is present on the initial stack
- //It's purpose is to let the user space linker examine the original file.
- int file_name_size = filename.size() + 1;
-
- int env_data_size = 0;
- for (int i = 0; i < envp.size(); ++i) {
- env_data_size += envp[i].size() + 1;
- }
- int arg_data_size = 0;
- for (int i = 0; i < argv.size(); ++i) {
- arg_data_size += argv[i].size() + 1;
- }
-
- //The info_block needs to be padded so it's size is a multiple of the
- //alignment mask. Also, it appears that there needs to be at least some
- //padding, so if the size is already a multiple, we need to increase it
- //anyway.
- int info_block_size =
- (file_name_size +
- env_data_size +
- arg_data_size +
- intSize) & alignmentMask;
-
- int info_block_padding =
- info_block_size -
- file_name_size -
- env_data_size -
- arg_data_size;
-
- //Each auxilliary vector is two 8 byte words
- int aux_array_size = intSize * 2 * (auxv.size() + 1);
-
- int envp_array_size = intSize * (envp.size() + 1);
- int argv_array_size = intSize * (argv.size() + 1);
-
- int argc_size = intSize;
- int window_save_size = intSize * 16;
-
- int space_needed =
- mysterious_size +
- info_block_size +
- aux_array_size +
- envp_array_size +
- argv_array_size +
- argc_size +
- window_save_size;
-
- stack_min = stack_base - space_needed;
- stack_min &= alignmentMask;
- stack_size = stack_base - stack_min;
-
- // map memory
- pTable->allocate(roundDown(stack_min, pageSize),
- roundUp(stack_size, pageSize));
-
- // map out initial stack contents
- Addr mysterious_base = stack_base - mysterious_size;
- Addr file_name_base = mysterious_base - file_name_size;
- Addr env_data_base = file_name_base - env_data_size;
- Addr arg_data_base = env_data_base - arg_data_size;
- Addr auxv_array_base = arg_data_base - aux_array_size - info_block_padding;
- Addr envp_array_base = auxv_array_base - envp_array_size;
- Addr argv_array_base = envp_array_base - argv_array_size;
- Addr argc_base = argv_array_base - argc_size;
-#ifndef NDEBUG
- // only used in DPRINTF
- Addr window_save_base = argc_base - window_save_size;
-#endif
-
- DPRINTF(Sparc, "The addresses of items on the initial stack:\n");
- DPRINTF(Sparc, "0x%x - file name\n", file_name_base);
- DPRINTF(Sparc, "0x%x - env data\n", env_data_base);
- DPRINTF(Sparc, "0x%x - arg data\n", arg_data_base);
- DPRINTF(Sparc, "0x%x - auxv array\n", auxv_array_base);
- DPRINTF(Sparc, "0x%x - envp array\n", envp_array_base);
- DPRINTF(Sparc, "0x%x - argv array\n", argv_array_base);
- DPRINTF(Sparc, "0x%x - argc \n", argc_base);
- DPRINTF(Sparc, "0x%x - window save\n", window_save_base);
- DPRINTF(Sparc, "0x%x - stack min\n", stack_min);
-
- // write contents to stack
-
- // figure out argc
- uint64_t argc = argv.size();
- uint64_t guestArgc = TheISA::htog(argc);
-
- //Write out the mysterious 0
- uint64_t mysterious_zero = 0;
- initVirtMem->writeBlob(mysterious_base,
- (uint8_t*)&mysterious_zero, mysterious_size);
-
- //Write the file name
- initVirtMem->writeString(file_name_base, filename.c_str());
-
- //Copy the aux stuff
- for(int x = 0; x < auxv.size(); x++)
- {
- initVirtMem->writeBlob(auxv_array_base + x * 2 * intSize,
- (uint8_t*)&(auxv[x].a_type), intSize);
- initVirtMem->writeBlob(auxv_array_base + (x * 2 + 1) * intSize,
- (uint8_t*)&(auxv[x].a_val), intSize);
- }
- //Write out the terminating zeroed auxilliary vector
- const uint64_t zero = 0;
- initVirtMem->writeBlob(auxv_array_base + 2 * intSize * auxv.size(),
- (uint8_t*)&zero, 2 * intSize);
-
- copyStringArray(envp, envp_array_base, env_data_base, initVirtMem);
- copyStringArray(argv, argv_array_base, arg_data_base, initVirtMem);
-
- initVirtMem->writeBlob(argc_base, (uint8_t*)&guestArgc, intSize);
-
- //Stuff the trap handlers into the processes address space.
- //Since the stack grows down and is the highest area in the processes
- //address space, we can put stuff above it and stay out of the way.
- int fillSize = sizeof(MachInst) * numFillInsts;
- int spillSize = sizeof(MachInst) * numSpillInsts;
- fillStart = stack_base;
- spillStart = fillStart + fillSize;
- initVirtMem->writeBlob(fillStart, (uint8_t*)fillHandler64, fillSize);
- initVirtMem->writeBlob(spillStart, (uint8_t*)spillHandler64, spillSize);
-
- //Set up the thread context to start running the process
- assert(NumArgumentRegs >= 2);
- threadContexts[0]->setIntReg(ArgumentReg[0], argc);
- threadContexts[0]->setIntReg(ArgumentReg[1], argv_array_base);
- threadContexts[0]->setIntReg(StackPointerReg, stack_min - StackBias);
-
- // %g1 is a pointer to a function that should be run at exit. Since we
- // don't have anything like that, it should be set to 0.
- threadContexts[0]->setIntReg(1, 0);
-
- Addr prog_entry = objFile->entryPoint();
- threadContexts[0]->setPC(prog_entry);
- threadContexts[0]->setNextPC(prog_entry + sizeof(MachInst));
- threadContexts[0]->setNextNPC(prog_entry + (2 * sizeof(MachInst)));
-
- //Align the "stack_min" to a page boundary.
- stack_min = roundDown(stack_min, pageSize);
-
-// num_processes++;
-}
-
-void
-Sparc32LiveProcess::argsInit(int intSize, int pageSize)
-{
- typedef M5_32_auxv_t auxv_t;
- Process::startup();
-
- string filename;
- if(argv.size() < 1)
- filename = "";
- else
- filename = argv[0];
-
- //Even though this is a 32 bit process, the ABI says we still need to
- //maintain double word alignment of the stack pointer.
- Addr alignmentMask = ~(8 - 1);
-
- // load object file into target memory
- objFile->loadSections(initVirtMem);
-
- //These are the auxilliary vector types
- enum auxTypes
- {
- SPARC_AT_HWCAP = 16,
- SPARC_AT_PAGESZ = 6,
- SPARC_AT_CLKTCK = 17,
- SPARC_AT_PHDR = 3,
- SPARC_AT_PHENT = 4,
- SPARC_AT_PHNUM = 5,
- SPARC_AT_BASE = 7,
- SPARC_AT_FLAGS = 8,
- SPARC_AT_ENTRY = 9,
- SPARC_AT_UID = 11,
- SPARC_AT_EUID = 12,
- SPARC_AT_GID = 13,
- SPARC_AT_EGID = 14,
- SPARC_AT_SECURE = 23
- };
-
- enum hardwareCaps
- {
- M5_HWCAP_SPARC_FLUSH = 1,
- M5_HWCAP_SPARC_STBAR = 2,
- M5_HWCAP_SPARC_SWAP = 4,
- M5_HWCAP_SPARC_MULDIV = 8,
- M5_HWCAP_SPARC_V9 = 16,
- //This one should technically only be set
- //if there is a cheetah or cheetah_plus tlb,
- //but we'll use it all the time
- M5_HWCAP_SPARC_ULTRA3 = 32
- };
-
- const int64_t hwcap =
- M5_HWCAP_SPARC_FLUSH |
- M5_HWCAP_SPARC_STBAR |
- M5_HWCAP_SPARC_SWAP |
- M5_HWCAP_SPARC_MULDIV |
- M5_HWCAP_SPARC_V9 |
- M5_HWCAP_SPARC_ULTRA3;
-
-
- //Setup the auxilliary vectors. These will already have endian conversion.
- //Auxilliary vectors are loaded only for elf formatted executables.
- ElfObject * elfObject = dynamic_cast<ElfObject *>(objFile);
- if(elfObject)
- {
- //Bits which describe the system hardware capabilities
- auxv.push_back(auxv_t(SPARC_AT_HWCAP, hwcap));
- //The system page size
- auxv.push_back(auxv_t(SPARC_AT_PAGESZ, SparcISA::VMPageSize));
- //Defined to be 100 in the kernel source.
- //Frequency at which times() increments
- auxv.push_back(auxv_t(SPARC_AT_CLKTCK, 100));
- // For statically linked executables, this is the virtual address of the
- // program header tables if they appear in the executable image
- auxv.push_back(auxv_t(SPARC_AT_PHDR, elfObject->programHeaderTable()));
- // This is the size of a program header entry from the elf file.
- auxv.push_back(auxv_t(SPARC_AT_PHENT, elfObject->programHeaderSize()));
- // This is the number of program headers from the original elf file.
- auxv.push_back(auxv_t(SPARC_AT_PHNUM, elfObject->programHeaderCount()));
- //This is the address of the elf "interpreter", It should be set
- //to 0 for regular executables. It should be something else
- //(not sure what) for dynamic libraries.
- auxv.push_back(auxv_t(SPARC_AT_BASE, 0));
- //This is hardwired to 0 in the elf loading code in the kernel
- auxv.push_back(auxv_t(SPARC_AT_FLAGS, 0));
- //The entry point to the program
- auxv.push_back(auxv_t(SPARC_AT_ENTRY, objFile->entryPoint()));
- //Different user and group IDs
- auxv.push_back(auxv_t(SPARC_AT_UID, uid()));
- auxv.push_back(auxv_t(SPARC_AT_EUID, euid()));
- auxv.push_back(auxv_t(SPARC_AT_GID, gid()));
- auxv.push_back(auxv_t(SPARC_AT_EGID, egid()));
- //Whether to enable "secure mode" in the executable
- auxv.push_back(auxv_t(SPARC_AT_SECURE, 0));
- }
-
- //Figure out how big the initial stack needs to be
-
// The unaccounted for 8 byte 0 at the top of the stack
int mysterious_size = 8;
env_data_size +
arg_data_size + intSize);
- //Each auxilliary vector is two 4 byte words
+ //Each auxilliary vector is two words
int aux_array_size = intSize * 2 * (auxv.size() + 1);
int envp_array_size = intSize * (envp.size() + 1);
stack_min &= alignmentMask;
stack_size = stack_base - stack_min;
- // map memory
+ // Allocate space for the stack
pTable->allocate(roundDown(stack_min, pageSize),
roundUp(stack_size, pageSize));
// map out initial stack contents
- uint32_t window_save_base = stack_min;
- uint32_t argc_base = window_save_base + window_save_size;
- uint32_t argv_array_base = argc_base + argc_size;
- uint32_t envp_array_base = argv_array_base + argv_array_size;
- uint32_t auxv_array_base = envp_array_base + envp_array_size;
+ IntType window_save_base = stack_min;
+ IntType argc_base = window_save_base + window_save_size;
+ IntType argv_array_base = argc_base + argc_size;
+ IntType envp_array_base = argv_array_base + argv_array_size;
+ IntType auxv_array_base = envp_array_base + envp_array_size;
//The info block is pushed up against the top of the stack, while
//the rest of the initial stack frame is aligned to an 8 byte boudary.
- uint32_t arg_data_base = stack_base - info_block_size + intSize;
- uint32_t env_data_base = arg_data_base + arg_data_size;
- uint32_t file_name_base = env_data_base + env_data_size;
- uint32_t mysterious_base = file_name_base + file_name_size;
+ IntType arg_data_base = stack_base - info_block_size + intSize;
+ IntType env_data_base = arg_data_base + arg_data_size;
+ IntType file_name_base = env_data_base + env_data_size;
+ IntType mysterious_base = file_name_base + file_name_size;
DPRINTF(Sparc, "The addresses of items on the initial stack:\n");
- DPRINTF(Sparc, "0x%x - file name\n", file_name_base);
- DPRINTF(Sparc, "0x%x - env data\n", env_data_base);
- DPRINTF(Sparc, "0x%x - arg data\n", arg_data_base);
- DPRINTF(Sparc, "0x%x - auxv array\n", auxv_array_base);
- DPRINTF(Sparc, "0x%x - envp array\n", envp_array_base);
- DPRINTF(Sparc, "0x%x - argv array\n", argv_array_base);
- DPRINTF(Sparc, "0x%x - argc \n", argc_base);
- DPRINTF(Sparc, "0x%x - window save\n", window_save_base);
- DPRINTF(Sparc, "0x%x - stack min\n", stack_min);
+ DPRINTF(Sparc, "%#x - file name\n", file_name_base);
+ DPRINTF(Sparc, "%#x - env data\n", env_data_base);
+ DPRINTF(Sparc, "%#x - arg data\n", arg_data_base);
+ DPRINTF(Sparc, "%#x - auxv array\n", auxv_array_base);
+ DPRINTF(Sparc, "%#x - envp array\n", envp_array_base);
+ DPRINTF(Sparc, "%#x - argv array\n", argv_array_base);
+ DPRINTF(Sparc, "%#x - argc \n", argc_base);
+ DPRINTF(Sparc, "%#x - window save\n", window_save_base);
+ DPRINTF(Sparc, "%#x - stack min\n", stack_min);
// write contents to stack
// figure out argc
- uint32_t argc = argv.size();
- uint32_t guestArgc = TheISA::htog(argc);
+ IntType argc = argv.size();
+ IntType guestArgc = TheISA::htog(argc);
//Write out the mysterious 0
uint64_t mysterious_zero = 0;
initVirtMem->writeBlob(auxv_array_base + (x * 2 + 1) * intSize,
(uint8_t*)&(auxv[x].a_val), intSize);
}
+
//Write out the terminating zeroed auxilliary vector
- const uint64_t zero = 0;
+ const IntType zero = 0;
initVirtMem->writeBlob(auxv_array_base + 2 * intSize * auxv.size(),
(uint8_t*)&zero, 2 * intSize);
initVirtMem->writeBlob(argc_base, (uint8_t*)&guestArgc, intSize);
- //Stuff the trap handlers into the processes address space.
- //Since the stack grows down and is the highest area in the processes
- //address space, we can put stuff above it and stay out of the way.
- int fillSize = sizeof(MachInst) * numFillInsts;
- int spillSize = sizeof(MachInst) * numSpillInsts;
+ //Set up space for the trap handlers into the processes address space.
+ //Since the stack grows down and there is reserved address space abov
+ //it, we can put stuff above it and stay out of the way.
fillStart = stack_base;
- spillStart = fillStart + fillSize;
- initVirtMem->writeBlob(fillStart, (uint8_t*)fillHandler32, fillSize);
- initVirtMem->writeBlob(spillStart, (uint8_t*)spillHandler32, spillSize);
+ spillStart = fillStart + sizeof(MachInst) * numFillInsts;
//Set up the thread context to start running the process
//assert(NumArgumentRegs >= 2);
//threadContexts[0]->setIntReg(ArgumentReg[0], argc);
//threadContexts[0]->setIntReg(ArgumentReg[1], argv_array_base);
- threadContexts[0]->setIntReg(StackPointerReg, stack_min);
+ threadContexts[0]->setIntReg(StackPointerReg, stack_min - StackBias);
// %g1 is a pointer to a function that should be run at exit. Since we
// don't have anything like that, it should be set to 0.
threadContexts[0]->setIntReg(1, 0);
- uint32_t prog_entry = objFile->entryPoint();
+ Addr prog_entry = objFile->entryPoint();
threadContexts[0]->setPC(prog_entry);
threadContexts[0]->setNextPC(prog_entry + sizeof(MachInst));
threadContexts[0]->setNextNPC(prog_entry + (2 * sizeof(MachInst)));
// num_processes++;
}
+void
+Sparc64LiveProcess::argsInit(int intSize, int pageSize)
+{
+ SparcLiveProcess::argsInit<uint64_t>(pageSize);
+
+ // Stuff the trap handlers into the process address space
+ initVirtMem->writeBlob(fillStart,
+ (uint8_t*)fillHandler64, sizeof(MachInst) * numFillInsts);
+ initVirtMem->writeBlob(spillStart,
+ (uint8_t*)spillHandler64, sizeof(MachInst) * numSpillInsts);
+}
+
+void
+Sparc32LiveProcess::argsInit(int intSize, int pageSize)
+{
+ SparcLiveProcess::argsInit<uint32_t>(pageSize);
+
+ // Stuff the trap handlers into the process address space
+ initVirtMem->writeBlob(fillStart,
+ (uint8_t*)fillHandler32, sizeof(MachInst) * numFillInsts);
+ initVirtMem->writeBlob(spillStart,
+ (uint8_t*)spillHandler32, sizeof(MachInst) * numSpillInsts);
+}
+
void Sparc32LiveProcess::flushWindows(ThreadContext *tc)
{
IntReg Cansave = tc->readIntReg(NumIntArchRegs + 3);
#include <string>
#include <vector>
+#include "sim/byteswap.hh"
#include "sim/process.hh"
class ObjectFile;
{
protected:
+ const Addr StackBias;
+
//The locations of the fill and spill handlers
Addr fillStart, spillStart;
- SparcLiveProcess(LiveProcessParams * params, ObjectFile *objFile);
+ SparcLiveProcess(LiveProcessParams * params,
+ ObjectFile *objFile, Addr _StackBias);
+
+ void startup();
+
+ template<class IntType>
+ void argsInit(int pageSize);
public:
virtual void flushWindows(ThreadContext *tc) = 0;
};
-struct M5_32_auxv_t
+template<class IntType>
+struct M5_auxv_t
{
- int32_t a_type;
+ IntType a_type;
union {
- int32_t a_val;
- int32_t a_ptr;
- int32_t a_fcn;
+ IntType a_val;
+ IntType a_ptr;
+ IntType a_fcn;
};
- M5_32_auxv_t()
+ M5_auxv_t()
{}
- M5_32_auxv_t(int32_t type, int32_t val);
+ M5_auxv_t(IntType type, IntType val)
+ {
+ a_type = SparcISA::htog(type);
+ a_val = SparcISA::htog(val);
+ }
};
class Sparc32LiveProcess : public SparcLiveProcess
{
protected:
- std::vector<M5_32_auxv_t> auxv;
-
Sparc32LiveProcess(LiveProcessParams * params, ObjectFile *objFile) :
- SparcLiveProcess(params, objFile)
+ SparcLiveProcess(params, objFile, 0)
{
// Set up stack. On SPARC Linux, stack goes from the top of memory
// downward, less the hole for the kernel address space.
void flushWindows(ThreadContext *tc);
};
-struct M5_64_auxv_t
-{
- int64_t a_type;
- union {
- int64_t a_val;
- int64_t a_ptr;
- int64_t a_fcn;
- };
-
- M5_64_auxv_t()
- {}
-
- M5_64_auxv_t(int64_t type, int64_t val);
-};
-
class Sparc64LiveProcess : public SparcLiveProcess
{
protected:
- static const Addr StackBias = 2047;
-
- std::vector<M5_64_auxv_t> auxv;
-
Sparc64LiveProcess(LiveProcessParams * params, ObjectFile *objFile) :
- SparcLiveProcess(params, objFile)
+ SparcLiveProcess(params, objFile, 2047)
{
// Set up stack. On SPARC Linux, stack goes from the top of memory
// downward, less the hole for the kernel address space.
executable=/dist/m5/cpu2000/binaries/sparc/linux/gzip
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
global.BPredUnit.condPredicted 256528366 # Number of conditional branches predicted
global.BPredUnit.lookups 256528366 # Number of BP lookups
global.BPredUnit.usedRAS 0 # Number of times the RAS was used to get a target.
-host_inst_rate 101903 # Simulator instruction rate (inst/s)
-host_mem_usage 202864 # Number of bytes of host memory used
-host_seconds 13793.57 # Real time elapsed on the host
-host_tick_rate 79564409 # Simulator tick rate (ticks/s)
+host_inst_rate 135731 # Simulator instruction rate (inst/s)
+host_mem_usage 184868 # Number of bytes of host memory used
+host_seconds 10355.84 # Real time elapsed on the host
+host_tick_rate 105976601 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 458856790 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 141228058 # Number of conflicting stores.
memdepunit.memDep.insertedLoads 745627925 # Number of loads inserted to the mem dependence unit.
+0: system.remote_gdb.listener: listening for remote gdb on port 7001
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
All Rights Reserved
-M5 compiled Aug 27 2007 13:10:11
-M5 started Mon Aug 27 13:40:27 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/00.gzip/sparc/linux/o3-timing tests/run.py long/00.gzip/sparc/linux/o3-timing
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/cpu2000/binaries/sparc/linux/gzip
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 1870525 # Simulator instruction rate (inst/s)
-host_mem_usage 176480 # Number of bytes of host memory used
-host_seconds 796.31 # Real time elapsed on the host
-host_tick_rate 935265227 # Simulator tick rate (ticks/s)
+host_inst_rate 3593860 # Simulator instruction rate (inst/s)
+host_mem_usage 176592 # Number of bytes of host memory used
+host_seconds 414.46 # Real time elapsed on the host
+host_tick_rate 1796934585 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 1489514761 # Number of instructions simulated
sim_seconds 0.744760 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7000
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 22:51:35 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/00.gzip/sparc/linux/simple-atomic tests/run.py long/00.gzip/sparc/linux/simple-atomic
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/cpu2000/binaries/sparc/linux/gzip
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 1001521 # Simulator instruction rate (inst/s)
-host_mem_usage 201940 # Number of bytes of host memory used
-host_seconds 1487.25 # Real time elapsed on the host
-host_tick_rate 1392419330 # Simulator tick rate (ticks/s)
+host_inst_rate 2062336 # Simulator instruction rate (inst/s)
+host_mem_usage 183952 # Number of bytes of host memory used
+host_seconds 722.25 # Real time elapsed on the host
+host_tick_rate 2867275090 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 1489514761 # Number of instructions simulated
sim_seconds 2.070880 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7002
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 23:04:52 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/00.gzip/sparc/linux/simple-timing tests/run.py long/00.gzip/sparc/linux/simple-timing
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/cpu2000/binaries/sparc/linux/mcf
gid=100
input=/dist/m5/cpu2000/data/mcf/smred/input/mcf.in
+max_stack_size=67108864
output=cout
pid=100
ppid=99
type=PhysicalMemory
file=
latency=1
-range=0:134217727
+range=0:268435455
zero=false
port=system.membus.port[0]
---------- Begin Simulation Statistics ----------
-host_inst_rate 1759086 # Simulator instruction rate (inst/s)
-host_mem_usage 176892 # Number of bytes of host memory used
-host_seconds 138.61 # Real time elapsed on the host
-host_tick_rate 881692154 # Simulator tick rate (ticks/s)
+host_inst_rate 3488380 # Simulator instruction rate (inst/s)
+host_mem_usage 308772 # Number of bytes of host memory used
+host_seconds 69.90 # Real time elapsed on the host
+host_tick_rate 1748449689 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 243829010 # Number of instructions simulated
sim_seconds 0.122213 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7005
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 23:27:01 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/10.mcf/sparc/linux/simple-atomic tests/run.py long/10.mcf/sparc/linux/simple-atomic
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/cpu2000/binaries/sparc/linux/mcf
gid=100
input=/dist/m5/cpu2000/data/mcf/smred/input/mcf.in
+max_stack_size=67108864
output=cout
pid=100
ppid=99
type=PhysicalMemory
file=
latency=1
-range=0:134217727
+range=0:268435455
zero=false
port=system.membus.port[0]
---------- Begin Simulation Statistics ----------
-host_inst_rate 981553 # Simulator instruction rate (inst/s)
-host_mem_usage 203224 # Number of bytes of host memory used
-host_seconds 248.41 # Real time elapsed on the host
-host_tick_rate 1462749007 # Simulator tick rate (ticks/s)
+host_inst_rate 1898653 # Simulator instruction rate (inst/s)
+host_mem_usage 316136 # Number of bytes of host memory used
+host_seconds 128.42 # Real time elapsed on the host
+host_tick_rate 2829445602 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 243829010 # Number of instructions simulated
sim_seconds 0.363364 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7006
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 23:29:20 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/10.mcf/sparc/linux/simple-timing tests/run.py long/10.mcf/sparc/linux/simple-timing
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/cpu2000/binaries/sparc/linux/vortex
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 1682182 # Simulator instruction rate (inst/s)
-host_mem_usage 185356 # Number of bytes of host memory used
-host_seconds 80.93 # Real time elapsed on the host
-host_tick_rate 842064489 # Simulator tick rate (ticks/s)
+host_inst_rate 3238071 # Simulator instruction rate (inst/s)
+host_mem_usage 185492 # Number of bytes of host memory used
+host_seconds 42.04 # Real time elapsed on the host
+host_tick_rate 1620906753 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 136141055 # Number of instructions simulated
sim_seconds 0.068150 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7004
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
warn: ignoring syscall time(4026527856, 4026528256, ...)
warn: ignoring syscall time(4026527408, 1375098, ...)
warn: ignoring syscall time(4026527320, 1, ...)
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 23:33:10 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/50.vortex/sparc/linux/simple-atomic tests/run.py long/50.vortex/sparc/linux/simple-atomic
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/cpu2000/binaries/sparc/linux/vortex
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 941673 # Simulator instruction rate (inst/s)
-host_mem_usage 210848 # Number of bytes of host memory used
-host_seconds 144.57 # Real time elapsed on the host
-host_tick_rate 1385565564 # Simulator tick rate (ticks/s)
+host_inst_rate 1846845 # Simulator instruction rate (inst/s)
+host_mem_usage 192856 # Number of bytes of host memory used
+host_seconds 73.72 # Real time elapsed on the host
+host_tick_rate 2717419538 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 136141055 # Number of instructions simulated
sim_seconds 0.200317 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7003
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
warn: ignoring syscall time(4026527856, 4026528256, ...)
warn: ignoring syscall time(4026527408, 1375098, ...)
warn: ignoring syscall time(4026527320, 1, ...)
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 23:34:32 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/50.vortex/sparc/linux/simple-timing tests/run.py long/50.vortex/sparc/linux/simple-timing
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/cpu2000/binaries/sparc/linux/twolf
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 1618953 # Simulator instruction rate (inst/s)
-host_mem_usage 181044 # Number of bytes of host memory used
-host_seconds 119.48 # Real time elapsed on the host
-host_tick_rate 809478979 # Simulator tick rate (ticks/s)
+host_inst_rate 2409312 # Simulator instruction rate (inst/s)
+host_mem_usage 181120 # Number of bytes of host memory used
+host_seconds 80.29 # Real time elapsed on the host
+host_tick_rate 1204659062 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 193435005 # Number of instructions simulated
sim_seconds 0.096718 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7007
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
warn: Increasing stack size by one page.
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 23:36:54 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/70.twolf/sparc/linux/simple-atomic tests/run.py long/70.twolf/sparc/linux/simple-atomic
-Couldn't unlink build/SPARC_SE/tests/fast/long/70.twolf/sparc/linux/simple-atomic/smred.sav
-Couldn't unlink build/SPARC_SE/tests/fast/long/70.twolf/sparc/linux/simple-atomic/smred.sv2
Global frequency set at 1000000000000 ticks per second
Exiting @ tick 96718067000 because target called exit()
executable=/dist/m5/cpu2000/binaries/sparc/linux/twolf
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 958305 # Simulator instruction rate (inst/s)
-host_mem_usage 206472 # Number of bytes of host memory used
-host_seconds 201.85 # Real time elapsed on the host
-host_tick_rate 1339588721 # Simulator tick rate (ticks/s)
+host_inst_rate 1633041 # Simulator instruction rate (inst/s)
+host_mem_usage 188484 # Number of bytes of host memory used
+host_seconds 118.45 # Real time elapsed on the host
+host_tick_rate 2282781107 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 193435005 # Number of instructions simulated
sim_seconds 0.270398 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7008
warn: Entering event queue @ 0. Starting simulation...
-warn: Ignoring request to flush register windows.
warn: Increasing stack size by one page.
All Rights Reserved
-M5 compiled Aug 14 2007 22:48:17
-M5 started Tue Aug 14 23:38:54 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/long/70.twolf/sparc/linux/simple-timing tests/run.py long/70.twolf/sparc/linux/simple-timing
-Couldn't unlink build/SPARC_SE/tests/fast/long/70.twolf/sparc/linux/simple-timing/smred.sav
-Couldn't unlink build/SPARC_SE/tests/fast/long/70.twolf/sparc/linux/simple-timing/smred.sv2
Global frequency set at 1000000000000 ticks per second
Exiting @ tick 270397899000 because target called exit()
executable=/dist/m5/regression/test-progs/hello/bin/sparc/linux/hello
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 5187 # Simulator instruction rate (inst/s)
-host_mem_usage 173740 # Number of bytes of host memory used
-host_seconds 0.93 # Real time elapsed on the host
-host_tick_rate 2625893 # Simulator tick rate (ticks/s)
+host_inst_rate 1203 # Simulator instruction rate (inst/s)
+host_mem_usage 173832 # Number of bytes of host memory used
+host_seconds 4.02 # Real time elapsed on the host
+host_tick_rate 609327 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 4833 # Number of instructions simulated
sim_seconds 0.000002 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7003
warn: Entering event queue @ 0. Starting simulation...
All Rights Reserved
-M5 compiled Aug 14 2007 22:08:21
-M5 started Tue Aug 14 22:08:22 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/quick/00.hello/sparc/linux/simple-atomic tests/run.py quick/00.hello/sparc/linux/simple-atomic
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/regression/test-progs/hello/bin/sparc/linux/hello
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 290934 # Simulator instruction rate (inst/s)
-host_mem_usage 197920 # Number of bytes of host memory used
-host_seconds 0.02 # Real time elapsed on the host
-host_tick_rate 939694341 # Simulator tick rate (ticks/s)
+host_inst_rate 1173 # Simulator instruction rate (inst/s)
+host_mem_usage 181108 # Number of bytes of host memory used
+host_seconds 4.12 # Real time elapsed on the host
+host_tick_rate 3847579 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 4833 # Number of instructions simulated
sim_seconds 0.000016 # Number of seconds simulated
-sim_ticks 15925000 # Number of ticks simulated
+sim_ticks 15853000 # Number of ticks simulated
system.cpu.dcache.ReadReq_accesses 608 # number of ReadReq accesses(hits+misses)
system.cpu.dcache.ReadReq_avg_miss_latency 24777.777778 # average ReadReq miss latency
system.cpu.dcache.ReadReq_avg_mshr_miss_latency 22777.777778 # average ReadReq mshr miss latency
system.cpu.dcache.WriteReq_accesses 661 # number of WriteReq accesses(hits+misses)
system.cpu.dcache.WriteReq_avg_miss_latency 25000 # average WriteReq miss latency
system.cpu.dcache.WriteReq_avg_mshr_miss_latency 23000 # average WriteReq mshr miss latency
-system.cpu.dcache.WriteReq_hits 562 # number of WriteReq hits
-system.cpu.dcache.WriteReq_miss_latency 2475000 # number of WriteReq miss cycles
-system.cpu.dcache.WriteReq_miss_rate 0.149773 # miss rate for WriteReq accesses
-system.cpu.dcache.WriteReq_misses 99 # number of WriteReq misses
-system.cpu.dcache.WriteReq_mshr_miss_latency 2277000 # number of WriteReq MSHR miss cycles
-system.cpu.dcache.WriteReq_mshr_miss_rate 0.149773 # mshr miss rate for WriteReq accesses
-system.cpu.dcache.WriteReq_mshr_misses 99 # number of WriteReq MSHR misses
+system.cpu.dcache.WriteReq_hits 565 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 2400000 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.145234 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 96 # number of WriteReq misses
+system.cpu.dcache.WriteReq_mshr_miss_latency 2208000 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.145234 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 96 # number of WriteReq MSHR misses
system.cpu.dcache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.dcache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.dcache.avg_refs 8.195652 # Average number of references to valid blocks.
+system.cpu.dcache.avg_refs 8.400000 # Average number of references to valid blocks.
system.cpu.dcache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.dcache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.dcache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.dcache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.dcache.cache_copies 0 # number of cache copies performed
system.cpu.dcache.demand_accesses 1269 # number of demand (read+write) accesses
-system.cpu.dcache.demand_avg_miss_latency 24921.568627 # average overall miss latency
-system.cpu.dcache.demand_avg_mshr_miss_latency 22921.568627 # average overall mshr miss latency
-system.cpu.dcache.demand_hits 1116 # number of demand (read+write) hits
-system.cpu.dcache.demand_miss_latency 3813000 # number of demand (read+write) miss cycles
-system.cpu.dcache.demand_miss_rate 0.120567 # miss rate for demand accesses
-system.cpu.dcache.demand_misses 153 # number of demand (read+write) misses
+system.cpu.dcache.demand_avg_miss_latency 24920 # average overall miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 22920 # average overall mshr miss latency
+system.cpu.dcache.demand_hits 1119 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 3738000 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.118203 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 150 # number of demand (read+write) misses
system.cpu.dcache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
-system.cpu.dcache.demand_mshr_miss_latency 3507000 # number of demand (read+write) MSHR miss cycles
-system.cpu.dcache.demand_mshr_miss_rate 0.120567 # mshr miss rate for demand accesses
-system.cpu.dcache.demand_mshr_misses 153 # number of demand (read+write) MSHR misses
+system.cpu.dcache.demand_mshr_miss_latency 3438000 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.118203 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 150 # number of demand (read+write) MSHR misses
system.cpu.dcache.fast_writes 0 # number of fast writes performed
system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
system.cpu.dcache.overall_accesses 1269 # number of overall (read+write) accesses
-system.cpu.dcache.overall_avg_miss_latency 24921.568627 # average overall miss latency
-system.cpu.dcache.overall_avg_mshr_miss_latency 22921.568627 # average overall mshr miss latency
+system.cpu.dcache.overall_avg_miss_latency 24920 # average overall miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 22920 # average overall mshr miss latency
system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.dcache.overall_hits 1116 # number of overall hits
-system.cpu.dcache.overall_miss_latency 3813000 # number of overall miss cycles
-system.cpu.dcache.overall_miss_rate 0.120567 # miss rate for overall accesses
-system.cpu.dcache.overall_misses 153 # number of overall misses
+system.cpu.dcache.overall_hits 1119 # number of overall hits
+system.cpu.dcache.overall_miss_latency 3738000 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.118203 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 150 # number of overall misses
system.cpu.dcache.overall_mshr_hits 0 # number of overall MSHR hits
-system.cpu.dcache.overall_mshr_miss_latency 3507000 # number of overall MSHR miss cycles
-system.cpu.dcache.overall_mshr_miss_rate 0.120567 # mshr miss rate for overall accesses
-system.cpu.dcache.overall_mshr_misses 153 # number of overall MSHR misses
+system.cpu.dcache.overall_mshr_miss_latency 3438000 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.118203 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 150 # number of overall MSHR misses
system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
system.cpu.dcache.prefetcher.num_hwpf_already_in_cache 0 # number of hwpf that were already in the cache
system.cpu.dcache.prefetcher.num_hwpf_span_page 0 # number of hwpf spanning a virtual page
system.cpu.dcache.prefetcher.num_hwpf_squashed_from_miss 0 # number of hwpf that got squashed due to a miss aborting calculation time
system.cpu.dcache.replacements 0 # number of replacements
-system.cpu.dcache.sampled_refs 138 # Sample count of references to valid blocks.
+system.cpu.dcache.sampled_refs 135 # Sample count of references to valid blocks.
system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.dcache.tagsinuse 83.440192 # Cycle average of tags in use
-system.cpu.dcache.total_refs 1131 # Total number of references to valid blocks.
+system.cpu.dcache.tagsinuse 81.746424 # Cycle average of tags in use
+system.cpu.dcache.total_refs 1134 # Total number of references to valid blocks.
system.cpu.dcache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.dcache.writebacks 0 # number of writebacks
system.cpu.icache.ReadReq_accesses 4877 # number of ReadReq accesses(hits+misses)
system.cpu.icache.replacements 0 # number of replacements
system.cpu.icache.sampled_refs 256 # Sample count of references to valid blocks.
system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.icache.tagsinuse 114.921642 # Cycle average of tags in use
+system.cpu.icache.tagsinuse 114.989412 # Cycle average of tags in use
system.cpu.icache.total_refs 4621 # Total number of references to valid blocks.
system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.icache.writebacks 0 # number of writebacks
system.cpu.idle_fraction 0 # Percentage of idle cycles
-system.cpu.l2cache.ReadExReq_accesses 84 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_accesses 81 # number of ReadExReq accesses(hits+misses)
system.cpu.l2cache.ReadExReq_avg_miss_latency 22000 # average ReadExReq miss latency
system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 11000 # average ReadExReq mshr miss latency
-system.cpu.l2cache.ReadExReq_miss_latency 1848000 # number of ReadExReq miss cycles
+system.cpu.l2cache.ReadExReq_miss_latency 1782000 # number of ReadExReq miss cycles
system.cpu.l2cache.ReadExReq_miss_rate 1 # miss rate for ReadExReq accesses
-system.cpu.l2cache.ReadExReq_misses 84 # number of ReadExReq misses
-system.cpu.l2cache.ReadExReq_mshr_miss_latency 924000 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_misses 81 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 891000 # number of ReadExReq MSHR miss cycles
system.cpu.l2cache.ReadExReq_mshr_miss_rate 1 # mshr miss rate for ReadExReq accesses
-system.cpu.l2cache.ReadExReq_mshr_misses 84 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadExReq_mshr_misses 81 # number of ReadExReq MSHR misses
system.cpu.l2cache.ReadReq_accesses 310 # number of ReadReq accesses(hits+misses)
system.cpu.l2cache.ReadReq_avg_miss_latency 22000 # average ReadReq miss latency
system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 11000 # average ReadReq mshr miss latency
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 394 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 391 # number of demand (read+write) accesses
system.cpu.l2cache.demand_avg_miss_latency 22000 # average overall miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency 11000 # average overall mshr miss latency
system.cpu.l2cache.demand_hits 3 # number of demand (read+write) hits
-system.cpu.l2cache.demand_miss_latency 8602000 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.992386 # miss rate for demand accesses
-system.cpu.l2cache.demand_misses 391 # number of demand (read+write) misses
+system.cpu.l2cache.demand_miss_latency 8536000 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.992327 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 388 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
-system.cpu.l2cache.demand_mshr_miss_latency 4301000 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.992386 # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_misses 391 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.demand_mshr_miss_latency 4268000 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.992327 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 388 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 394 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 391 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 22000 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 11000 # average overall mshr miss latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 3 # number of overall hits
-system.cpu.l2cache.overall_miss_latency 8602000 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.992386 # miss rate for overall accesses
-system.cpu.l2cache.overall_misses 391 # number of overall misses
+system.cpu.l2cache.overall_miss_latency 8536000 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.992327 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 388 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
-system.cpu.l2cache.overall_mshr_miss_latency 4301000 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.992386 # mshr miss rate for overall accesses
-system.cpu.l2cache.overall_mshr_misses 391 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_miss_latency 4268000 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.992327 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 388 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
system.cpu.l2cache.prefetcher.num_hwpf_already_in_cache 0 # number of hwpf that were already in the cache
system.cpu.l2cache.replacements 0 # number of replacements
system.cpu.l2cache.sampled_refs 292 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.l2cache.tagsinuse 133.706132 # Cycle average of tags in use
+system.cpu.l2cache.tagsinuse 133.763146 # Cycle average of tags in use
system.cpu.l2cache.total_refs 3 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.not_idle_fraction 1 # Percentage of non-idle cycles
-system.cpu.numCycles 31850 # number of cpu cycles simulated
+system.cpu.numCycles 31706 # number of cpu cycles simulated
system.cpu.num_insts 4833 # Number of instructions executed
system.cpu.num_refs 1282 # Number of memory references
system.cpu.workload.PROG:num_syscalls 11 # Number of system calls
+0: system.remote_gdb.listener: listening for remote gdb on port 7000
warn: Entering event queue @ 0. Starting simulation...
All Rights Reserved
-M5 compiled Aug 14 2007 22:08:21
-M5 started Tue Aug 14 22:08:24 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/quick/00.hello/sparc/linux/simple-timing tests/run.py quick/00.hello/sparc/linux/simple-timing
Global frequency set at 1000000000000 ticks per second
-Exiting @ tick 15925000 because target called exit()
+Exiting @ tick 15853000 because target called exit()
executable=/dist/m5/regression/test-progs/insttest/bin/sparc/linux/insttest
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
global.BPredUnit.BTBCorrect 0 # Number of correct BTB predictions (this stat may not work properly.
-global.BPredUnit.BTBHits 2711 # Number of BTB hits
-global.BPredUnit.BTBLookups 6964 # Number of BTB lookups
+global.BPredUnit.BTBHits 2712 # Number of BTB hits
+global.BPredUnit.BTBLookups 6973 # Number of BTB lookups
global.BPredUnit.RASInCorrect 0 # Number of incorrect RAS predictions.
-global.BPredUnit.condIncorrect 2012 # Number of conditional branches incorrect
-global.BPredUnit.condPredicted 7659 # Number of conditional branches predicted
-global.BPredUnit.lookups 7659 # Number of BP lookups
+global.BPredUnit.condIncorrect 2013 # Number of conditional branches incorrect
+global.BPredUnit.condPredicted 7668 # Number of conditional branches predicted
+global.BPredUnit.lookups 7668 # Number of BP lookups
global.BPredUnit.usedRAS 0 # Number of times the RAS was used to get a target.
-host_inst_rate 64485 # Simulator instruction rate (inst/s)
-host_mem_usage 198296 # Number of bytes of host memory used
-host_seconds 0.16 # Real time elapsed on the host
-host_tick_rate 92733729 # Simulator tick rate (ticks/s)
+host_inst_rate 2391 # Simulator instruction rate (inst/s)
+host_mem_usage 181652 # Number of bytes of host memory used
+host_seconds 4.36 # Real time elapsed on the host
+host_tick_rate 3443166 # Simulator tick rate (ticks/s)
memdepunit.memDep.conflictingLoads 15 # Number of conflicting loads.
memdepunit.memDep.conflictingStores 0 # Number of conflicting stores.
-memdepunit.memDep.insertedLoads 3077 # Number of loads inserted to the mem dependence unit.
-memdepunit.memDep.insertedStores 2956 # Number of stores inserted to the mem dependence unit.
+memdepunit.memDep.insertedLoads 3078 # Number of loads inserted to the mem dependence unit.
+memdepunit.memDep.insertedStores 2957 # Number of stores inserted to the mem dependence unit.
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 10411 # Number of instructions simulated
sim_seconds 0.000015 # Number of seconds simulated
-sim_ticks 14990500 # Number of ticks simulated
+sim_ticks 14995500 # Number of ticks simulated
system.cpu.commit.COM:branches 2152 # Number of branches committed
system.cpu.commit.COM:bw_lim_events 87 # number cycles where commit BW limit reached
system.cpu.commit.COM:bw_limited 0 # number of insts not committed due to BW limits
system.cpu.commit.COM:committed_per_cycle.start_dist # Number of insts commited each cycle
-system.cpu.commit.COM:committed_per_cycle.samples 26989
+system.cpu.commit.COM:committed_per_cycle.samples 26996
system.cpu.commit.COM:committed_per_cycle.min_value 0
- 0 21416 7935.08%
- 1 3114 1153.80%
- 2 1160 429.80%
- 3 589 218.24%
- 4 306 113.38%
+ 0 21423 7935.62%
+ 1 3114 1153.50%
+ 2 1160 429.69%
+ 3 589 218.18%
+ 4 306 113.35%
5 84 31.12%
- 6 196 72.62%
+ 6 196 72.60%
7 37 13.71%
- 8 87 32.24%
+ 8 87 32.23%
system.cpu.commit.COM:committed_per_cycle.max_value 8
system.cpu.commit.COM:committed_per_cycle.end_dist
system.cpu.commit.COM:membars 0 # Number of memory barriers committed
system.cpu.commit.COM:refs 2760 # Number of memory references committed
system.cpu.commit.COM:swp_count 0 # Number of s/w prefetches committed
-system.cpu.commit.branchMispredicts 2012 # The number of times a branch was mispredicted
+system.cpu.commit.branchMispredicts 2013 # The number of times a branch was mispredicted
system.cpu.commit.commitCommittedInsts 10976 # The number of committed instructions
system.cpu.commit.commitNonSpecStalls 329 # The number of times commit has been forced to stall to communicate backwards
-system.cpu.commit.commitSquashedInsts 13198 # The number of squashed insts skipped by commit
+system.cpu.commit.commitSquashedInsts 13215 # The number of squashed insts skipped by commit
system.cpu.committedInsts 10411 # Number of Instructions Simulated
system.cpu.committedInsts_total 10411 # Number of Instructions Simulated
-system.cpu.cpi 2.879839 # CPI: Cycles Per Instruction
-system.cpu.cpi_total 2.879839 # CPI: Total CPI of All Threads
-system.cpu.dcache.ReadReq_accesses 2274 # number of ReadReq accesses(hits+misses)
-system.cpu.dcache.ReadReq_avg_miss_latency 9734.848485 # average ReadReq miss latency
-system.cpu.dcache.ReadReq_avg_mshr_miss_latency 5560.606061 # average ReadReq mshr miss latency
-system.cpu.dcache.ReadReq_hits 2208 # number of ReadReq hits
-system.cpu.dcache.ReadReq_miss_latency 642500 # number of ReadReq miss cycles
-system.cpu.dcache.ReadReq_miss_rate 0.029024 # miss rate for ReadReq accesses
+system.cpu.cpi 2.880799 # CPI: Cycles Per Instruction
+system.cpu.cpi_total 2.880799 # CPI: Total CPI of All Threads
+system.cpu.dcache.ReadReq_accesses 2269 # number of ReadReq accesses(hits+misses)
+system.cpu.dcache.ReadReq_avg_miss_latency 9992.424242 # average ReadReq miss latency
+system.cpu.dcache.ReadReq_avg_mshr_miss_latency 5515.151515 # average ReadReq mshr miss latency
+system.cpu.dcache.ReadReq_hits 2203 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 659500 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.029088 # miss rate for ReadReq accesses
system.cpu.dcache.ReadReq_misses 66 # number of ReadReq misses
-system.cpu.dcache.ReadReq_mshr_hits 25 # number of ReadReq MSHR hits
-system.cpu.dcache.ReadReq_mshr_miss_latency 367000 # number of ReadReq MSHR miss cycles
-system.cpu.dcache.ReadReq_mshr_miss_rate 0.029024 # mshr miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_mshr_hits 29 # number of ReadReq MSHR hits
+system.cpu.dcache.ReadReq_mshr_miss_latency 364000 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_rate 0.029088 # mshr miss rate for ReadReq accesses
system.cpu.dcache.ReadReq_mshr_misses 66 # number of ReadReq MSHR misses
system.cpu.dcache.SwapReq_accesses 6 # number of SwapReq accesses(hits+misses)
system.cpu.dcache.SwapReq_hits 6 # number of SwapReq hits
system.cpu.dcache.WriteReq_accesses 1171 # number of WriteReq accesses(hits+misses)
-system.cpu.dcache.WriteReq_avg_miss_latency 16414.285714 # average WriteReq miss latency
-system.cpu.dcache.WriteReq_avg_mshr_miss_latency 5623.809524 # average WriteReq mshr miss latency
-system.cpu.dcache.WriteReq_hits 1066 # number of WriteReq hits
-system.cpu.dcache.WriteReq_miss_latency 1723500 # number of WriteReq miss cycles
-system.cpu.dcache.WriteReq_miss_rate 0.089667 # miss rate for WriteReq accesses
-system.cpu.dcache.WriteReq_misses 105 # number of WriteReq misses
+system.cpu.dcache.WriteReq_avg_miss_latency 16051.886792 # average WriteReq miss latency
+system.cpu.dcache.WriteReq_avg_mshr_miss_latency 5589.622642 # average WriteReq mshr miss latency
+system.cpu.dcache.WriteReq_hits 1065 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 1701500 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.090521 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 106 # number of WriteReq misses
system.cpu.dcache.WriteReq_mshr_hits 121 # number of WriteReq MSHR hits
-system.cpu.dcache.WriteReq_mshr_miss_latency 590500 # number of WriteReq MSHR miss cycles
-system.cpu.dcache.WriteReq_mshr_miss_rate 0.089667 # mshr miss rate for WriteReq accesses
-system.cpu.dcache.WriteReq_mshr_misses 105 # number of WriteReq MSHR misses
+system.cpu.dcache.WriteReq_mshr_miss_latency 592500 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.090521 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 106 # number of WriteReq MSHR misses
system.cpu.dcache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.dcache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.dcache.avg_refs 21.703947 # Average number of references to valid blocks.
+system.cpu.dcache.avg_refs 21.376623 # Average number of references to valid blocks.
system.cpu.dcache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.dcache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.dcache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.dcache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.dcache.cache_copies 0 # number of cache copies performed
-system.cpu.dcache.demand_accesses 3445 # number of demand (read+write) accesses
-system.cpu.dcache.demand_avg_miss_latency 13836.257310 # average overall miss latency
-system.cpu.dcache.demand_avg_mshr_miss_latency 5599.415205 # average overall mshr miss latency
-system.cpu.dcache.demand_hits 3274 # number of demand (read+write) hits
-system.cpu.dcache.demand_miss_latency 2366000 # number of demand (read+write) miss cycles
-system.cpu.dcache.demand_miss_rate 0.049637 # miss rate for demand accesses
-system.cpu.dcache.demand_misses 171 # number of demand (read+write) misses
-system.cpu.dcache.demand_mshr_hits 146 # number of demand (read+write) MSHR hits
-system.cpu.dcache.demand_mshr_miss_latency 957500 # number of demand (read+write) MSHR miss cycles
-system.cpu.dcache.demand_mshr_miss_rate 0.049637 # mshr miss rate for demand accesses
-system.cpu.dcache.demand_mshr_misses 171 # number of demand (read+write) MSHR misses
+system.cpu.dcache.demand_accesses 3440 # number of demand (read+write) accesses
+system.cpu.dcache.demand_avg_miss_latency 13726.744186 # average overall miss latency
+system.cpu.dcache.demand_avg_mshr_miss_latency 5561.046512 # average overall mshr miss latency
+system.cpu.dcache.demand_hits 3268 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 2361000 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.050000 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 172 # number of demand (read+write) misses
+system.cpu.dcache.demand_mshr_hits 150 # number of demand (read+write) MSHR hits
+system.cpu.dcache.demand_mshr_miss_latency 956500 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.050000 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 172 # number of demand (read+write) MSHR misses
system.cpu.dcache.fast_writes 0 # number of fast writes performed
system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.dcache.overall_accesses 3445 # number of overall (read+write) accesses
-system.cpu.dcache.overall_avg_miss_latency 13836.257310 # average overall miss latency
-system.cpu.dcache.overall_avg_mshr_miss_latency 5599.415205 # average overall mshr miss latency
+system.cpu.dcache.overall_accesses 3440 # number of overall (read+write) accesses
+system.cpu.dcache.overall_avg_miss_latency 13726.744186 # average overall miss latency
+system.cpu.dcache.overall_avg_mshr_miss_latency 5561.046512 # average overall mshr miss latency
system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.dcache.overall_hits 3274 # number of overall hits
-system.cpu.dcache.overall_miss_latency 2366000 # number of overall miss cycles
-system.cpu.dcache.overall_miss_rate 0.049637 # miss rate for overall accesses
-system.cpu.dcache.overall_misses 171 # number of overall misses
-system.cpu.dcache.overall_mshr_hits 146 # number of overall MSHR hits
-system.cpu.dcache.overall_mshr_miss_latency 957500 # number of overall MSHR miss cycles
-system.cpu.dcache.overall_mshr_miss_rate 0.049637 # mshr miss rate for overall accesses
-system.cpu.dcache.overall_mshr_misses 171 # number of overall MSHR misses
+system.cpu.dcache.overall_hits 3268 # number of overall hits
+system.cpu.dcache.overall_miss_latency 2361000 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.050000 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 172 # number of overall misses
+system.cpu.dcache.overall_mshr_hits 150 # number of overall MSHR hits
+system.cpu.dcache.overall_mshr_miss_latency 956500 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.050000 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 172 # number of overall MSHR misses
system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
system.cpu.dcache.prefetcher.num_hwpf_already_in_cache 0 # number of hwpf that were already in the cache
system.cpu.dcache.prefetcher.num_hwpf_span_page 0 # number of hwpf spanning a virtual page
system.cpu.dcache.prefetcher.num_hwpf_squashed_from_miss 0 # number of hwpf that got squashed due to a miss aborting calculation time
system.cpu.dcache.replacements 0 # number of replacements
-system.cpu.dcache.sampled_refs 152 # Sample count of references to valid blocks.
+system.cpu.dcache.sampled_refs 154 # Sample count of references to valid blocks.
system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.dcache.tagsinuse 111.288485 # Cycle average of tags in use
-system.cpu.dcache.total_refs 3299 # Total number of references to valid blocks.
+system.cpu.dcache.tagsinuse 112.808512 # Cycle average of tags in use
+system.cpu.dcache.total_refs 3292 # Total number of references to valid blocks.
system.cpu.dcache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.dcache.writebacks 0 # number of writebacks
-system.cpu.decode.DECODE:BlockedCycles 3945 # Number of cycles decode is blocked
-system.cpu.decode.DECODE:DecodedInsts 38084 # Number of instructions handled by decode
-system.cpu.decode.DECODE:IdleCycles 12820 # Number of cycles decode is idle
-system.cpu.decode.DECODE:RunCycles 10159 # Number of cycles decode is running
-system.cpu.decode.DECODE:SquashCycles 2909 # Number of cycles decode is squashing
+system.cpu.decode.DECODE:BlockedCycles 3940 # Number of cycles decode is blocked
+system.cpu.decode.DECODE:DecodedInsts 38117 # Number of instructions handled by decode
+system.cpu.decode.DECODE:IdleCycles 12825 # Number of cycles decode is idle
+system.cpu.decode.DECODE:RunCycles 10166 # Number of cycles decode is running
+system.cpu.decode.DECODE:SquashCycles 2912 # Number of cycles decode is squashing
system.cpu.decode.DECODE:UnblockCycles 65 # Number of cycles decode is unblocking
-system.cpu.fetch.Branches 7659 # Number of branches that fetch encountered
-system.cpu.fetch.CacheLines 4927 # Number of cache lines fetched
-system.cpu.fetch.Cycles 16219 # Number of cycles fetch has run and was not squashing or blocked
-system.cpu.fetch.IcacheSquashes 589 # Number of outstanding Icache misses that were squashed
-system.cpu.fetch.Insts 42202 # Number of instructions fetch has processed
-system.cpu.fetch.SquashCycles 2099 # Number of cycles fetch has spent squashing
-system.cpu.fetch.branchRate 0.255453 # Number of branch fetches per cycle
-system.cpu.fetch.icacheStallCycles 4927 # Number of cycles fetch is stalled on an Icache miss
-system.cpu.fetch.predictedBranches 2711 # Number of branches that fetch has predicted taken
-system.cpu.fetch.rate 1.407578 # Number of inst fetches per cycle
+system.cpu.fetch.Branches 7668 # Number of branches that fetch encountered
+system.cpu.fetch.CacheLines 4931 # Number of cache lines fetched
+system.cpu.fetch.Cycles 16230 # Number of cycles fetch has run and was not squashing or blocked
+system.cpu.fetch.IcacheSquashes 591 # Number of outstanding Icache misses that were squashed
+system.cpu.fetch.Insts 42235 # Number of instructions fetch has processed
+system.cpu.fetch.SquashCycles 2100 # Number of cycles fetch has spent squashing
+system.cpu.fetch.branchRate 0.255668 # Number of branch fetches per cycle
+system.cpu.fetch.icacheStallCycles 4931 # Number of cycles fetch is stalled on an Icache miss
+system.cpu.fetch.predictedBranches 2712 # Number of branches that fetch has predicted taken
+system.cpu.fetch.rate 1.408209 # Number of inst fetches per cycle
system.cpu.fetch.rateDist.start_dist # Number of instructions fetched each cycle (Total)
-system.cpu.fetch.rateDist.samples 29898
+system.cpu.fetch.rateDist.samples 29908
system.cpu.fetch.rateDist.min_value 0
- 0 18628 6230.52%
- 1 4885 1633.89%
- 2 619 207.04%
- 3 712 238.14%
- 4 788 263.56%
- 5 640 214.06%
- 6 611 204.36%
- 7 195 65.22%
- 8 2820 943.21%
+ 0 18631 6229.44%
+ 1 4887 1634.01%
+ 2 620 207.30%
+ 3 712 238.06%
+ 4 788 263.47%
+ 5 640 213.99%
+ 6 612 204.63%
+ 7 196 65.53%
+ 8 2822 943.56%
system.cpu.fetch.rateDist.max_value 8
system.cpu.fetch.rateDist.end_dist
-system.cpu.icache.ReadReq_accesses 4907 # number of ReadReq accesses(hits+misses)
+system.cpu.icache.ReadReq_accesses 4912 # number of ReadReq accesses(hits+misses)
system.cpu.icache.ReadReq_avg_miss_latency 7495.945946 # average ReadReq miss latency
-system.cpu.icache.ReadReq_avg_mshr_miss_latency 5325.675676 # average ReadReq mshr miss latency
-system.cpu.icache.ReadReq_hits 4537 # number of ReadReq hits
+system.cpu.icache.ReadReq_avg_mshr_miss_latency 5332.432432 # average ReadReq mshr miss latency
+system.cpu.icache.ReadReq_hits 4542 # number of ReadReq hits
system.cpu.icache.ReadReq_miss_latency 2773500 # number of ReadReq miss cycles
-system.cpu.icache.ReadReq_miss_rate 0.075402 # miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_miss_rate 0.075326 # miss rate for ReadReq accesses
system.cpu.icache.ReadReq_misses 370 # number of ReadReq misses
-system.cpu.icache.ReadReq_mshr_hits 20 # number of ReadReq MSHR hits
-system.cpu.icache.ReadReq_mshr_miss_latency 1970500 # number of ReadReq MSHR miss cycles
-system.cpu.icache.ReadReq_mshr_miss_rate 0.075402 # mshr miss rate for ReadReq accesses
+system.cpu.icache.ReadReq_mshr_hits 19 # number of ReadReq MSHR hits
+system.cpu.icache.ReadReq_mshr_miss_latency 1973000 # number of ReadReq MSHR miss cycles
+system.cpu.icache.ReadReq_mshr_miss_rate 0.075326 # mshr miss rate for ReadReq accesses
system.cpu.icache.ReadReq_mshr_misses 370 # number of ReadReq MSHR misses
system.cpu.icache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.icache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.icache.avg_refs 12.262162 # Average number of references to valid blocks.
+system.cpu.icache.avg_refs 12.275676 # Average number of references to valid blocks.
system.cpu.icache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.icache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.icache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.icache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.icache.cache_copies 0 # number of cache copies performed
-system.cpu.icache.demand_accesses 4907 # number of demand (read+write) accesses
+system.cpu.icache.demand_accesses 4912 # number of demand (read+write) accesses
system.cpu.icache.demand_avg_miss_latency 7495.945946 # average overall miss latency
-system.cpu.icache.demand_avg_mshr_miss_latency 5325.675676 # average overall mshr miss latency
-system.cpu.icache.demand_hits 4537 # number of demand (read+write) hits
+system.cpu.icache.demand_avg_mshr_miss_latency 5332.432432 # average overall mshr miss latency
+system.cpu.icache.demand_hits 4542 # number of demand (read+write) hits
system.cpu.icache.demand_miss_latency 2773500 # number of demand (read+write) miss cycles
-system.cpu.icache.demand_miss_rate 0.075402 # miss rate for demand accesses
+system.cpu.icache.demand_miss_rate 0.075326 # miss rate for demand accesses
system.cpu.icache.demand_misses 370 # number of demand (read+write) misses
-system.cpu.icache.demand_mshr_hits 20 # number of demand (read+write) MSHR hits
-system.cpu.icache.demand_mshr_miss_latency 1970500 # number of demand (read+write) MSHR miss cycles
-system.cpu.icache.demand_mshr_miss_rate 0.075402 # mshr miss rate for demand accesses
+system.cpu.icache.demand_mshr_hits 19 # number of demand (read+write) MSHR hits
+system.cpu.icache.demand_mshr_miss_latency 1973000 # number of demand (read+write) MSHR miss cycles
+system.cpu.icache.demand_mshr_miss_rate 0.075326 # mshr miss rate for demand accesses
system.cpu.icache.demand_mshr_misses 370 # number of demand (read+write) MSHR misses
system.cpu.icache.fast_writes 0 # number of fast writes performed
system.cpu.icache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.icache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.icache.overall_accesses 4907 # number of overall (read+write) accesses
+system.cpu.icache.overall_accesses 4912 # number of overall (read+write) accesses
system.cpu.icache.overall_avg_miss_latency 7495.945946 # average overall miss latency
-system.cpu.icache.overall_avg_mshr_miss_latency 5325.675676 # average overall mshr miss latency
+system.cpu.icache.overall_avg_mshr_miss_latency 5332.432432 # average overall mshr miss latency
system.cpu.icache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.icache.overall_hits 4537 # number of overall hits
+system.cpu.icache.overall_hits 4542 # number of overall hits
system.cpu.icache.overall_miss_latency 2773500 # number of overall miss cycles
-system.cpu.icache.overall_miss_rate 0.075402 # miss rate for overall accesses
+system.cpu.icache.overall_miss_rate 0.075326 # miss rate for overall accesses
system.cpu.icache.overall_misses 370 # number of overall misses
-system.cpu.icache.overall_mshr_hits 20 # number of overall MSHR hits
-system.cpu.icache.overall_mshr_miss_latency 1970500 # number of overall MSHR miss cycles
-system.cpu.icache.overall_mshr_miss_rate 0.075402 # mshr miss rate for overall accesses
+system.cpu.icache.overall_mshr_hits 19 # number of overall MSHR hits
+system.cpu.icache.overall_mshr_miss_latency 1973000 # number of overall MSHR miss cycles
+system.cpu.icache.overall_mshr_miss_rate 0.075326 # mshr miss rate for overall accesses
system.cpu.icache.overall_mshr_misses 370 # number of overall MSHR misses
system.cpu.icache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.icache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
system.cpu.icache.replacements 1 # number of replacements
system.cpu.icache.sampled_refs 370 # Sample count of references to valid blocks.
system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.icache.tagsinuse 233.477311 # Cycle average of tags in use
-system.cpu.icache.total_refs 4537 # Total number of references to valid blocks.
+system.cpu.icache.tagsinuse 233.392727 # Cycle average of tags in use
+system.cpu.icache.total_refs 4542 # Total number of references to valid blocks.
system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.icache.writebacks 0 # number of writebacks
system.cpu.idleCycles 84 # Total number of cycles that the CPU has spent unscheduled due to idling
-system.cpu.iew.EXEC:branches 3086 # Number of branches executed
+system.cpu.iew.EXEC:branches 3087 # Number of branches executed
system.cpu.iew.EXEC:nop 1794 # number of nop insts executed
-system.cpu.iew.EXEC:rate 0.575379 # Inst execution rate
-system.cpu.iew.EXEC:refs 4543 # number of memory reference insts executed
+system.cpu.iew.EXEC:rate 0.575487 # Inst execution rate
+system.cpu.iew.EXEC:refs 4542 # number of memory reference insts executed
system.cpu.iew.EXEC:stores 2116 # Number of stores executed
system.cpu.iew.EXEC:swp 0 # number of swp insts executed
-system.cpu.iew.WB:consumers 9189 # num instructions consuming a value
-system.cpu.iew.WB:count 16618 # cumulative count of insts written-back
-system.cpu.iew.WB:fanout 0.827620 # average fanout of values written-back
+system.cpu.iew.WB:consumers 9197 # num instructions consuming a value
+system.cpu.iew.WB:count 16627 # cumulative count of insts written-back
+system.cpu.iew.WB:fanout 0.827444 # average fanout of values written-back
system.cpu.iew.WB:penalized 0 # number of instrctions required to write to 'other' IQ
system.cpu.iew.WB:penalized_rate 0 # fraction of instructions written-back that wrote to 'other' IQ
-system.cpu.iew.WB:producers 7605 # num instructions producing a value
-system.cpu.iew.WB:rate 0.554266 # insts written-back per cycle
-system.cpu.iew.WB:sent 16830 # cumulative count of insts sent to commit
-system.cpu.iew.branchMispredicts 2216 # Number of branch mispredicts detected at execute
+system.cpu.iew.WB:producers 7610 # num instructions producing a value
+system.cpu.iew.WB:rate 0.554381 # insts written-back per cycle
+system.cpu.iew.WB:sent 16839 # cumulative count of insts sent to commit
+system.cpu.iew.branchMispredicts 2217 # Number of branch mispredicts detected at execute
system.cpu.iew.iewBlockCycles 0 # Number of cycles IEW is blocking
-system.cpu.iew.iewDispLoadInsts 3077 # Number of dispatched load instructions
+system.cpu.iew.iewDispLoadInsts 3078 # Number of dispatched load instructions
system.cpu.iew.iewDispNonSpecInsts 612 # Number of dispatched non-speculative instructions
-system.cpu.iew.iewDispSquashedInsts 2973 # Number of squashed instructions skipped by dispatch
-system.cpu.iew.iewDispStoreInsts 2956 # Number of dispatched store instructions
-system.cpu.iew.iewDispatchedInsts 24330 # Number of instructions dispatched to IQ
-system.cpu.iew.iewExecLoadInsts 2427 # Number of load instructions executed
-system.cpu.iew.iewExecSquashedInsts 2838 # Number of squashed instructions skipped in execute
-system.cpu.iew.iewExecutedInsts 17251 # Number of executed instructions
+system.cpu.iew.iewDispSquashedInsts 2981 # Number of squashed instructions skipped by dispatch
+system.cpu.iew.iewDispStoreInsts 2957 # Number of dispatched store instructions
+system.cpu.iew.iewDispatchedInsts 24347 # Number of instructions dispatched to IQ
+system.cpu.iew.iewExecLoadInsts 2426 # Number of load instructions executed
+system.cpu.iew.iewExecSquashedInsts 2842 # Number of squashed instructions skipped in execute
+system.cpu.iew.iewExecutedInsts 17260 # Number of executed instructions
system.cpu.iew.iewIQFullEvents 0 # Number of times the IQ has become full, causing a stall
system.cpu.iew.iewIdleCycles 0 # Number of cycles IEW is idle
system.cpu.iew.iewLSQFullEvents 0 # Number of times the LSQ has become full, causing a stall
-system.cpu.iew.iewSquashCycles 2909 # Number of cycles IEW is squashing
+system.cpu.iew.iewSquashCycles 2912 # Number of cycles IEW is squashing
system.cpu.iew.iewUnblockCycles 0 # Number of cycles IEW is unblocking
system.cpu.iew.lsq.thread.0.blockedLoads 0 # Number of blocked loads due to partial load-store forwarding
system.cpu.iew.lsq.thread.0.cacheBlocked 0 # Number of times an access to memory failed due to the cache being blocked
system.cpu.iew.lsq.thread.0.invAddrSwpfs 0 # Number of software prefetches ignored due to an invalid address
system.cpu.iew.lsq.thread.0.memOrderViolation 57 # Number of memory ordering violations
system.cpu.iew.lsq.thread.0.rescheduledLoads 1 # Number of loads that were rescheduled
-system.cpu.iew.lsq.thread.0.squashedLoads 1615 # Number of loads squashed
-system.cpu.iew.lsq.thread.0.squashedStores 1658 # Number of stores squashed
+system.cpu.iew.lsq.thread.0.squashedLoads 1616 # Number of loads squashed
+system.cpu.iew.lsq.thread.0.squashedStores 1659 # Number of stores squashed
system.cpu.iew.memOrderViolationEvents 57 # Number of memory order violations
system.cpu.iew.predictedNotTakenIncorrect 695 # Number of branches that were predicted not taken incorrectly
-system.cpu.iew.predictedTakenIncorrect 1521 # Number of branches that were predicted taken incorrectly
-system.cpu.ipc 0.347242 # IPC: Instructions Per Cycle
-system.cpu.ipc_total 0.347242 # IPC: Total IPC of All Threads
-system.cpu.iq.ISSUE:FU_type_0 20089 # Type of FU issued
+system.cpu.iew.predictedTakenIncorrect 1522 # Number of branches that were predicted taken incorrectly
+system.cpu.ipc 0.347126 # IPC: Instructions Per Cycle
+system.cpu.ipc_total 0.347126 # IPC: Total IPC of All Threads
+system.cpu.iq.ISSUE:FU_type_0 20102 # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.start_dist
No_OpClass 0 0.00% # Type of FU issued
- IntAlu 14535 72.35% # Type of FU issued
+ IntAlu 14548 72.37% # Type of FU issued
IntMult 0 0.00% # Type of FU issued
IntDiv 0 0.00% # Type of FU issued
FloatAdd 0 0.00% # Type of FU issued
FloatMult 0 0.00% # Type of FU issued
FloatDiv 0 0.00% # Type of FU issued
FloatSqrt 0 0.00% # Type of FU issued
- MemRead 2907 14.47% # Type of FU issued
- MemWrite 2647 13.18% # Type of FU issued
+ MemRead 2907 14.46% # Type of FU issued
+ MemWrite 2647 13.17% # Type of FU issued
IprAccess 0 0.00% # Type of FU issued
InstPrefetch 0 0.00% # Type of FU issued
system.cpu.iq.ISSUE:FU_type_0.end_dist
system.cpu.iq.ISSUE:fu_busy_cnt 188 # FU busy when requested
-system.cpu.iq.ISSUE:fu_busy_rate 0.009358 # FU busy rate (busy events/executed inst)
+system.cpu.iq.ISSUE:fu_busy_rate 0.009352 # FU busy rate (busy events/executed inst)
system.cpu.iq.ISSUE:fu_full.start_dist
No_OpClass 0 0.00% # attempts to use FU when none available
IntAlu 50 26.60% # attempts to use FU when none available
InstPrefetch 0 0.00% # attempts to use FU when none available
system.cpu.iq.ISSUE:fu_full.end_dist
system.cpu.iq.ISSUE:issued_per_cycle.start_dist # Number of insts issued each cycle
-system.cpu.iq.ISSUE:issued_per_cycle.samples 29898
+system.cpu.iq.ISSUE:issued_per_cycle.samples 29908
system.cpu.iq.ISSUE:issued_per_cycle.min_value 0
- 0 21040 7037.26%
- 1 3621 1211.12%
- 2 2127 711.42%
- 3 1561 522.11%
- 4 748 250.18%
- 5 407 136.13%
- 6 293 98.00%
- 7 62 20.74%
+ 0 21042 7035.58%
+ 1 3623 1211.38%
+ 2 2132 712.85%
+ 3 1562 522.27%
+ 4 750 250.77%
+ 5 405 135.42%
+ 6 293 97.97%
+ 7 62 20.73%
8 39 13.04%
system.cpu.iq.ISSUE:issued_per_cycle.max_value 8
system.cpu.iq.ISSUE:issued_per_cycle.end_dist
-system.cpu.iq.ISSUE:rate 0.670035 # Inst issue rate
-system.cpu.iq.iqInstsAdded 21924 # Number of instructions added to the IQ (excludes non-spec)
-system.cpu.iq.iqInstsIssued 20089 # Number of instructions issued
+system.cpu.iq.ISSUE:rate 0.670245 # Inst issue rate
+system.cpu.iq.iqInstsAdded 21941 # Number of instructions added to the IQ (excludes non-spec)
+system.cpu.iq.iqInstsIssued 20102 # Number of instructions issued
system.cpu.iq.iqNonSpecInstsAdded 612 # Number of non-speculative instructions added to the IQ
-system.cpu.iq.iqSquashedInstsExamined 10307 # Number of squashed instructions iterated over during squash; mainly for profiling
+system.cpu.iq.iqSquashedInstsExamined 10302 # Number of squashed instructions iterated over during squash; mainly for profiling
system.cpu.iq.iqSquashedInstsIssued 110 # Number of squashed instructions issued
system.cpu.iq.iqSquashedNonSpecRemoved 283 # Number of squashed non-spec instructions that were removed
-system.cpu.iq.iqSquashedOperandsExamined 8241 # Number of squashed operands that are examined and possibly removed from graph
-system.cpu.l2cache.ReadExReq_accesses 86 # number of ReadExReq accesses(hits+misses)
-system.cpu.l2cache.ReadExReq_avg_miss_latency 4424.418605 # average ReadExReq miss latency
-system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 2424.418605 # average ReadExReq mshr miss latency
-system.cpu.l2cache.ReadExReq_miss_latency 380500 # number of ReadExReq miss cycles
+system.cpu.iq.iqSquashedOperandsExamined 8248 # Number of squashed operands that are examined and possibly removed from graph
+system.cpu.l2cache.ReadExReq_accesses 88 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_avg_miss_latency 4431.818182 # average ReadExReq miss latency
+system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 2431.818182 # average ReadExReq mshr miss latency
+system.cpu.l2cache.ReadExReq_miss_latency 390000 # number of ReadExReq miss cycles
system.cpu.l2cache.ReadExReq_miss_rate 1 # miss rate for ReadExReq accesses
-system.cpu.l2cache.ReadExReq_misses 86 # number of ReadExReq misses
-system.cpu.l2cache.ReadExReq_mshr_miss_latency 208500 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_misses 88 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 214000 # number of ReadExReq MSHR miss cycles
system.cpu.l2cache.ReadExReq_mshr_miss_rate 1 # mshr miss rate for ReadExReq accesses
-system.cpu.l2cache.ReadExReq_mshr_misses 86 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadExReq_mshr_misses 88 # number of ReadExReq MSHR misses
system.cpu.l2cache.ReadReq_accesses 436 # number of ReadReq accesses(hits+misses)
-system.cpu.l2cache.ReadReq_avg_miss_latency 4287.037037 # average ReadReq miss latency
-system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 2287.037037 # average ReadReq mshr miss latency
+system.cpu.l2cache.ReadReq_avg_miss_latency 4283.564815 # average ReadReq miss latency
+system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 2283.564815 # average ReadReq mshr miss latency
system.cpu.l2cache.ReadReq_hits 4 # number of ReadReq hits
-system.cpu.l2cache.ReadReq_miss_latency 1852000 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_latency 1850500 # number of ReadReq miss cycles
system.cpu.l2cache.ReadReq_miss_rate 0.990826 # miss rate for ReadReq accesses
system.cpu.l2cache.ReadReq_misses 432 # number of ReadReq misses
-system.cpu.l2cache.ReadReq_mshr_miss_latency 988000 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_latency 986500 # number of ReadReq MSHR miss cycles
system.cpu.l2cache.ReadReq_mshr_miss_rate 0.990826 # mshr miss rate for ReadReq accesses
system.cpu.l2cache.ReadReq_mshr_misses 432 # number of ReadReq MSHR misses
-system.cpu.l2cache.UpgradeReq_accesses 19 # number of UpgradeReq accesses(hits+misses)
-system.cpu.l2cache.UpgradeReq_avg_miss_latency 4421.052632 # average UpgradeReq miss latency
-system.cpu.l2cache.UpgradeReq_avg_mshr_miss_latency 2421.052632 # average UpgradeReq mshr miss latency
-system.cpu.l2cache.UpgradeReq_miss_latency 84000 # number of UpgradeReq miss cycles
+system.cpu.l2cache.UpgradeReq_accesses 18 # number of UpgradeReq accesses(hits+misses)
+system.cpu.l2cache.UpgradeReq_avg_miss_latency 4416.666667 # average UpgradeReq miss latency
+system.cpu.l2cache.UpgradeReq_avg_mshr_miss_latency 2416.666667 # average UpgradeReq mshr miss latency
+system.cpu.l2cache.UpgradeReq_miss_latency 79500 # number of UpgradeReq miss cycles
system.cpu.l2cache.UpgradeReq_miss_rate 1 # miss rate for UpgradeReq accesses
-system.cpu.l2cache.UpgradeReq_misses 19 # number of UpgradeReq misses
-system.cpu.l2cache.UpgradeReq_mshr_miss_latency 46000 # number of UpgradeReq MSHR miss cycles
+system.cpu.l2cache.UpgradeReq_misses 18 # number of UpgradeReq misses
+system.cpu.l2cache.UpgradeReq_mshr_miss_latency 43500 # number of UpgradeReq MSHR miss cycles
system.cpu.l2cache.UpgradeReq_mshr_miss_rate 1 # mshr miss rate for UpgradeReq accesses
-system.cpu.l2cache.UpgradeReq_mshr_misses 19 # number of UpgradeReq MSHR misses
+system.cpu.l2cache.UpgradeReq_mshr_misses 18 # number of UpgradeReq MSHR misses
system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.009685 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_refs 0.009662 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 522 # number of demand (read+write) accesses
-system.cpu.l2cache.demand_avg_miss_latency 4309.845560 # average overall miss latency
-system.cpu.l2cache.demand_avg_mshr_miss_latency 2309.845560 # average overall mshr miss latency
+system.cpu.l2cache.demand_accesses 524 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_avg_miss_latency 4308.653846 # average overall miss latency
+system.cpu.l2cache.demand_avg_mshr_miss_latency 2308.653846 # average overall mshr miss latency
system.cpu.l2cache.demand_hits 4 # number of demand (read+write) hits
-system.cpu.l2cache.demand_miss_latency 2232500 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.992337 # miss rate for demand accesses
-system.cpu.l2cache.demand_misses 518 # number of demand (read+write) misses
+system.cpu.l2cache.demand_miss_latency 2240500 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.992366 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 520 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
-system.cpu.l2cache.demand_mshr_miss_latency 1196500 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.992337 # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_misses 518 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.demand_mshr_miss_latency 1200500 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.992366 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 520 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 522 # number of overall (read+write) accesses
-system.cpu.l2cache.overall_avg_miss_latency 4309.845560 # average overall miss latency
-system.cpu.l2cache.overall_avg_mshr_miss_latency 2309.845560 # average overall mshr miss latency
+system.cpu.l2cache.overall_accesses 524 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_avg_miss_latency 4308.653846 # average overall miss latency
+system.cpu.l2cache.overall_avg_mshr_miss_latency 2308.653846 # average overall mshr miss latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 4 # number of overall hits
-system.cpu.l2cache.overall_miss_latency 2232500 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.992337 # miss rate for overall accesses
-system.cpu.l2cache.overall_misses 518 # number of overall misses
+system.cpu.l2cache.overall_miss_latency 2240500 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.992366 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 520 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
-system.cpu.l2cache.overall_mshr_miss_latency 1196500 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.992337 # mshr miss rate for overall accesses
-system.cpu.l2cache.overall_mshr_misses 518 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_miss_latency 1200500 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.992366 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 520 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
system.cpu.l2cache.prefetcher.num_hwpf_already_in_cache 0 # number of hwpf that were already in the cache
system.cpu.l2cache.prefetcher.num_hwpf_span_page 0 # number of hwpf spanning a virtual page
system.cpu.l2cache.prefetcher.num_hwpf_squashed_from_miss 0 # number of hwpf that got squashed due to a miss aborting calculation time
system.cpu.l2cache.replacements 0 # number of replacements
-system.cpu.l2cache.sampled_refs 413 # Sample count of references to valid blocks.
+system.cpu.l2cache.sampled_refs 414 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.l2cache.tagsinuse 259.708792 # Cycle average of tags in use
+system.cpu.l2cache.tagsinuse 260.564179 # Cycle average of tags in use
system.cpu.l2cache.total_refs 4 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
-system.cpu.numCycles 29982 # number of cpu cycles simulated
+system.cpu.numCycles 29992 # number of cpu cycles simulated
system.cpu.rename.RENAME:CommittedMaps 9868 # Number of HB maps that are committed
-system.cpu.rename.RENAME:IdleCycles 14192 # Number of cycles rename is idle
-system.cpu.rename.RENAME:RenameLookups 51924 # Number of register rename lookups that rename has made
-system.cpu.rename.RENAME:RenamedInsts 30001 # Number of instructions processed by rename
-system.cpu.rename.RENAME:RenamedOperands 24487 # Number of destination operands rename has renamed
-system.cpu.rename.RENAME:RunCycles 8874 # Number of cycles rename is running
-system.cpu.rename.RENAME:SquashCycles 2909 # Number of cycles rename is squashing
+system.cpu.rename.RENAME:IdleCycles 14199 # Number of cycles rename is idle
+system.cpu.rename.RENAME:RenameLookups 51943 # Number of register rename lookups that rename has made
+system.cpu.rename.RENAME:RenamedInsts 30018 # Number of instructions processed by rename
+system.cpu.rename.RENAME:RenamedOperands 24503 # Number of destination operands rename has renamed
+system.cpu.rename.RENAME:RunCycles 8879 # Number of cycles rename is running
+system.cpu.rename.RENAME:SquashCycles 2912 # Number of cycles rename is squashing
system.cpu.rename.RENAME:UnblockCycles 230 # Number of cycles rename is unblocking
-system.cpu.rename.RENAME:UndoneMaps 14619 # Number of HB maps that are undone due to squashing
-system.cpu.rename.RENAME:serializeStallCycles 3693 # count of cycles rename stalled for serializing inst
+system.cpu.rename.RENAME:UndoneMaps 14635 # Number of HB maps that are undone due to squashing
+system.cpu.rename.RENAME:serializeStallCycles 3688 # count of cycles rename stalled for serializing inst
system.cpu.rename.RENAME:serializingInsts 648 # count of serializing insts renamed
system.cpu.rename.RENAME:skidInsts 4472 # count of insts added to the skid buffer
system.cpu.rename.RENAME:tempSerializingInsts 685 # count of temporary serializing insts renamed
+0: system.remote_gdb.listener: listening for remote gdb on port 7004
warn: Entering event queue @ 0. Starting simulation...
All Rights Reserved
-M5 compiled Aug 19 2007 19:19:06
-M5 started Sun Aug 19 19:19:36 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
-command line: build/SPARC_SE/m5.debug -d build/SPARC_SE/tests/debug/quick/02.insttest/sparc/linux/o3-timing tests/run.py quick/02.insttest/sparc/linux/o3-timing
+command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/quick/02.insttest/sparc/linux/o3-timing tests/run.py quick/02.insttest/sparc/linux/o3-timing
Global frequency set at 1000000000000 ticks per second
-Exiting @ tick 14990500 because target called exit()
+Exiting @ tick 14995500 because target called exit()
executable=/dist/m5/regression/test-progs/insttest/bin/sparc/linux/insttest
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 464357 # Simulator instruction rate (inst/s)
-host_mem_usage 173536 # Number of bytes of host memory used
-host_seconds 0.02 # Real time elapsed on the host
-host_tick_rate 229778722 # Simulator tick rate (ticks/s)
+host_inst_rate 2681 # Simulator instruction rate (inst/s)
+host_mem_usage 173616 # Number of bytes of host memory used
+host_seconds 4.09 # Real time elapsed on the host
+host_tick_rate 1346729 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 10976 # Number of instructions simulated
sim_seconds 0.000006 # Number of seconds simulated
+0: system.remote_gdb.listener: listening for remote gdb on port 7001
warn: Entering event queue @ 0. Starting simulation...
All Rights Reserved
-M5 compiled Aug 14 2007 22:08:21
-M5 started Tue Aug 14 22:08:25 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/quick/02.insttest/sparc/linux/simple-atomic tests/run.py quick/02.insttest/sparc/linux/simple-atomic
Global frequency set at 1000000000000 ticks per second
executable=/dist/m5/regression/test-progs/insttest/bin/sparc/linux/insttest
gid=100
input=cin
+max_stack_size=67108864
output=cout
pid=100
ppid=99
---------- Begin Simulation Statistics ----------
-host_inst_rate 472716 # Simulator instruction rate (inst/s)
-host_mem_usage 197656 # Number of bytes of host memory used
-host_seconds 0.02 # Real time elapsed on the host
-host_tick_rate 1037354119 # Simulator tick rate (ticks/s)
+host_inst_rate 2502 # Simulator instruction rate (inst/s)
+host_mem_usage 180992 # Number of bytes of host memory used
+host_seconds 4.39 # Real time elapsed on the host
+host_tick_rate 5561973 # Simulator tick rate (ticks/s)
sim_freq 1000000000000 # Frequency of simulated ticks
sim_insts 10976 # Number of instructions simulated
sim_seconds 0.000024 # Number of seconds simulated
-sim_ticks 24355000 # Number of ticks simulated
+sim_ticks 24403000 # Number of ticks simulated
system.cpu.dcache.ReadReq_accesses 1462 # number of ReadReq accesses(hits+misses)
system.cpu.dcache.ReadReq_avg_miss_latency 25000 # average ReadReq miss latency
system.cpu.dcache.ReadReq_avg_mshr_miss_latency 23000 # average ReadReq mshr miss latency
-system.cpu.dcache.ReadReq_hits 1408 # number of ReadReq hits
-system.cpu.dcache.ReadReq_miss_latency 1350000 # number of ReadReq miss cycles
-system.cpu.dcache.ReadReq_miss_rate 0.036936 # miss rate for ReadReq accesses
-system.cpu.dcache.ReadReq_misses 54 # number of ReadReq misses
-system.cpu.dcache.ReadReq_mshr_miss_latency 1242000 # number of ReadReq MSHR miss cycles
-system.cpu.dcache.ReadReq_mshr_miss_rate 0.036936 # mshr miss rate for ReadReq accesses
-system.cpu.dcache.ReadReq_mshr_misses 54 # number of ReadReq MSHR misses
+system.cpu.dcache.ReadReq_hits 1407 # number of ReadReq hits
+system.cpu.dcache.ReadReq_miss_latency 1375000 # number of ReadReq miss cycles
+system.cpu.dcache.ReadReq_miss_rate 0.037620 # miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_misses 55 # number of ReadReq misses
+system.cpu.dcache.ReadReq_mshr_miss_latency 1265000 # number of ReadReq MSHR miss cycles
+system.cpu.dcache.ReadReq_mshr_miss_rate 0.037620 # mshr miss rate for ReadReq accesses
+system.cpu.dcache.ReadReq_mshr_misses 55 # number of ReadReq MSHR misses
system.cpu.dcache.SwapReq_accesses 6 # number of SwapReq accesses(hits+misses)
system.cpu.dcache.SwapReq_hits 6 # number of SwapReq hits
system.cpu.dcache.WriteReq_accesses 1292 # number of WriteReq accesses(hits+misses)
system.cpu.dcache.WriteReq_avg_miss_latency 25000 # average WriteReq miss latency
system.cpu.dcache.WriteReq_avg_mshr_miss_latency 23000 # average WriteReq mshr miss latency
-system.cpu.dcache.WriteReq_hits 1187 # number of WriteReq hits
-system.cpu.dcache.WriteReq_miss_latency 2625000 # number of WriteReq miss cycles
-system.cpu.dcache.WriteReq_miss_rate 0.081269 # miss rate for WriteReq accesses
-system.cpu.dcache.WriteReq_misses 105 # number of WriteReq misses
-system.cpu.dcache.WriteReq_mshr_miss_latency 2415000 # number of WriteReq MSHR miss cycles
-system.cpu.dcache.WriteReq_mshr_miss_rate 0.081269 # mshr miss rate for WriteReq accesses
-system.cpu.dcache.WriteReq_mshr_misses 105 # number of WriteReq MSHR misses
+system.cpu.dcache.WriteReq_hits 1186 # number of WriteReq hits
+system.cpu.dcache.WriteReq_miss_latency 2650000 # number of WriteReq miss cycles
+system.cpu.dcache.WriteReq_miss_rate 0.082043 # miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_misses 106 # number of WriteReq misses
+system.cpu.dcache.WriteReq_mshr_miss_latency 2438000 # number of WriteReq MSHR miss cycles
+system.cpu.dcache.WriteReq_mshr_miss_rate 0.082043 # mshr miss rate for WriteReq accesses
+system.cpu.dcache.WriteReq_mshr_misses 106 # number of WriteReq MSHR misses
system.cpu.dcache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.dcache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.dcache.avg_refs 18.436620 # Average number of references to valid blocks.
+system.cpu.dcache.avg_refs 18.166667 # Average number of references to valid blocks.
system.cpu.dcache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.dcache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.dcache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.dcache.demand_accesses 2754 # number of demand (read+write) accesses
system.cpu.dcache.demand_avg_miss_latency 25000 # average overall miss latency
system.cpu.dcache.demand_avg_mshr_miss_latency 23000 # average overall mshr miss latency
-system.cpu.dcache.demand_hits 2595 # number of demand (read+write) hits
-system.cpu.dcache.demand_miss_latency 3975000 # number of demand (read+write) miss cycles
-system.cpu.dcache.demand_miss_rate 0.057734 # miss rate for demand accesses
-system.cpu.dcache.demand_misses 159 # number of demand (read+write) misses
+system.cpu.dcache.demand_hits 2593 # number of demand (read+write) hits
+system.cpu.dcache.demand_miss_latency 4025000 # number of demand (read+write) miss cycles
+system.cpu.dcache.demand_miss_rate 0.058460 # miss rate for demand accesses
+system.cpu.dcache.demand_misses 161 # number of demand (read+write) misses
system.cpu.dcache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
-system.cpu.dcache.demand_mshr_miss_latency 3657000 # number of demand (read+write) MSHR miss cycles
-system.cpu.dcache.demand_mshr_miss_rate 0.057734 # mshr miss rate for demand accesses
-system.cpu.dcache.demand_mshr_misses 159 # number of demand (read+write) MSHR misses
+system.cpu.dcache.demand_mshr_miss_latency 3703000 # number of demand (read+write) MSHR miss cycles
+system.cpu.dcache.demand_mshr_miss_rate 0.058460 # mshr miss rate for demand accesses
+system.cpu.dcache.demand_mshr_misses 161 # number of demand (read+write) MSHR misses
system.cpu.dcache.fast_writes 0 # number of fast writes performed
system.cpu.dcache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.dcache.no_allocate_misses 0 # Number of misses that were no-allocate
system.cpu.dcache.overall_avg_miss_latency 25000 # average overall miss latency
system.cpu.dcache.overall_avg_mshr_miss_latency 23000 # average overall mshr miss latency
system.cpu.dcache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
-system.cpu.dcache.overall_hits 2595 # number of overall hits
-system.cpu.dcache.overall_miss_latency 3975000 # number of overall miss cycles
-system.cpu.dcache.overall_miss_rate 0.057734 # miss rate for overall accesses
-system.cpu.dcache.overall_misses 159 # number of overall misses
+system.cpu.dcache.overall_hits 2593 # number of overall hits
+system.cpu.dcache.overall_miss_latency 4025000 # number of overall miss cycles
+system.cpu.dcache.overall_miss_rate 0.058460 # miss rate for overall accesses
+system.cpu.dcache.overall_misses 161 # number of overall misses
system.cpu.dcache.overall_mshr_hits 0 # number of overall MSHR hits
-system.cpu.dcache.overall_mshr_miss_latency 3657000 # number of overall MSHR miss cycles
-system.cpu.dcache.overall_mshr_miss_rate 0.057734 # mshr miss rate for overall accesses
-system.cpu.dcache.overall_mshr_misses 159 # number of overall MSHR misses
+system.cpu.dcache.overall_mshr_miss_latency 3703000 # number of overall MSHR miss cycles
+system.cpu.dcache.overall_mshr_miss_rate 0.058460 # mshr miss rate for overall accesses
+system.cpu.dcache.overall_mshr_misses 161 # number of overall MSHR misses
system.cpu.dcache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.dcache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
system.cpu.dcache.prefetcher.num_hwpf_already_in_cache 0 # number of hwpf that were already in the cache
system.cpu.dcache.prefetcher.num_hwpf_span_page 0 # number of hwpf spanning a virtual page
system.cpu.dcache.prefetcher.num_hwpf_squashed_from_miss 0 # number of hwpf that got squashed due to a miss aborting calculation time
system.cpu.dcache.replacements 0 # number of replacements
-system.cpu.dcache.sampled_refs 142 # Sample count of references to valid blocks.
+system.cpu.dcache.sampled_refs 144 # Sample count of references to valid blocks.
system.cpu.dcache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.dcache.tagsinuse 100.373888 # Cycle average of tags in use
-system.cpu.dcache.total_refs 2618 # Total number of references to valid blocks.
+system.cpu.dcache.tagsinuse 101.761875 # Cycle average of tags in use
+system.cpu.dcache.total_refs 2616 # Total number of references to valid blocks.
system.cpu.dcache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.dcache.writebacks 0 # number of writebacks
system.cpu.icache.ReadReq_accesses 11012 # number of ReadReq accesses(hits+misses)
system.cpu.icache.replacements 0 # number of replacements
system.cpu.icache.sampled_refs 283 # Sample count of references to valid blocks.
system.cpu.icache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.icache.tagsinuse 155.977710 # Cycle average of tags in use
+system.cpu.icache.tagsinuse 155.854818 # Cycle average of tags in use
system.cpu.icache.total_refs 10729 # Total number of references to valid blocks.
system.cpu.icache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.icache.writebacks 0 # number of writebacks
system.cpu.idle_fraction 0 # Percentage of idle cycles
-system.cpu.l2cache.ReadExReq_accesses 88 # number of ReadExReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_accesses 89 # number of ReadExReq accesses(hits+misses)
system.cpu.l2cache.ReadExReq_avg_miss_latency 22000 # average ReadExReq miss latency
system.cpu.l2cache.ReadExReq_avg_mshr_miss_latency 11000 # average ReadExReq mshr miss latency
-system.cpu.l2cache.ReadExReq_miss_latency 1936000 # number of ReadExReq miss cycles
+system.cpu.l2cache.ReadExReq_miss_latency 1958000 # number of ReadExReq miss cycles
system.cpu.l2cache.ReadExReq_miss_rate 1 # miss rate for ReadExReq accesses
-system.cpu.l2cache.ReadExReq_misses 88 # number of ReadExReq misses
-system.cpu.l2cache.ReadExReq_mshr_miss_latency 968000 # number of ReadExReq MSHR miss cycles
+system.cpu.l2cache.ReadExReq_misses 89 # number of ReadExReq misses
+system.cpu.l2cache.ReadExReq_mshr_miss_latency 979000 # number of ReadExReq MSHR miss cycles
system.cpu.l2cache.ReadExReq_mshr_miss_rate 1 # mshr miss rate for ReadExReq accesses
-system.cpu.l2cache.ReadExReq_mshr_misses 88 # number of ReadExReq MSHR misses
-system.cpu.l2cache.ReadReq_accesses 337 # number of ReadReq accesses(hits+misses)
+system.cpu.l2cache.ReadExReq_mshr_misses 89 # number of ReadExReq MSHR misses
+system.cpu.l2cache.ReadReq_accesses 338 # number of ReadReq accesses(hits+misses)
system.cpu.l2cache.ReadReq_avg_miss_latency 22000 # average ReadReq miss latency
system.cpu.l2cache.ReadReq_avg_mshr_miss_latency 11000 # average ReadReq mshr miss latency
system.cpu.l2cache.ReadReq_hits 2 # number of ReadReq hits
-system.cpu.l2cache.ReadReq_miss_latency 7370000 # number of ReadReq miss cycles
-system.cpu.l2cache.ReadReq_miss_rate 0.994065 # miss rate for ReadReq accesses
-system.cpu.l2cache.ReadReq_misses 335 # number of ReadReq misses
-system.cpu.l2cache.ReadReq_mshr_miss_latency 3685000 # number of ReadReq MSHR miss cycles
-system.cpu.l2cache.ReadReq_mshr_miss_rate 0.994065 # mshr miss rate for ReadReq accesses
-system.cpu.l2cache.ReadReq_mshr_misses 335 # number of ReadReq MSHR misses
+system.cpu.l2cache.ReadReq_miss_latency 7392000 # number of ReadReq miss cycles
+system.cpu.l2cache.ReadReq_miss_rate 0.994083 # miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_misses 336 # number of ReadReq misses
+system.cpu.l2cache.ReadReq_mshr_miss_latency 3696000 # number of ReadReq MSHR miss cycles
+system.cpu.l2cache.ReadReq_mshr_miss_rate 0.994083 # mshr miss rate for ReadReq accesses
+system.cpu.l2cache.ReadReq_mshr_misses 336 # number of ReadReq MSHR misses
system.cpu.l2cache.UpgradeReq_accesses 17 # number of UpgradeReq accesses(hits+misses)
system.cpu.l2cache.UpgradeReq_avg_miss_latency 22000 # average UpgradeReq miss latency
system.cpu.l2cache.UpgradeReq_avg_mshr_miss_latency 11000 # average UpgradeReq mshr miss latency
system.cpu.l2cache.UpgradeReq_mshr_misses 17 # number of UpgradeReq MSHR misses
system.cpu.l2cache.avg_blocked_cycles_no_mshrs <err: div-0> # average number of cycles each access was blocked
system.cpu.l2cache.avg_blocked_cycles_no_targets <err: div-0> # average number of cycles each access was blocked
-system.cpu.l2cache.avg_refs 0.006289 # Average number of references to valid blocks.
+system.cpu.l2cache.avg_refs 0.006270 # Average number of references to valid blocks.
system.cpu.l2cache.blocked_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_mshrs 0 # number of cycles access was blocked
system.cpu.l2cache.blocked_cycles_no_targets 0 # number of cycles access was blocked
system.cpu.l2cache.cache_copies 0 # number of cache copies performed
-system.cpu.l2cache.demand_accesses 425 # number of demand (read+write) accesses
+system.cpu.l2cache.demand_accesses 427 # number of demand (read+write) accesses
system.cpu.l2cache.demand_avg_miss_latency 22000 # average overall miss latency
system.cpu.l2cache.demand_avg_mshr_miss_latency 11000 # average overall mshr miss latency
system.cpu.l2cache.demand_hits 2 # number of demand (read+write) hits
-system.cpu.l2cache.demand_miss_latency 9306000 # number of demand (read+write) miss cycles
-system.cpu.l2cache.demand_miss_rate 0.995294 # miss rate for demand accesses
-system.cpu.l2cache.demand_misses 423 # number of demand (read+write) misses
+system.cpu.l2cache.demand_miss_latency 9350000 # number of demand (read+write) miss cycles
+system.cpu.l2cache.demand_miss_rate 0.995316 # miss rate for demand accesses
+system.cpu.l2cache.demand_misses 425 # number of demand (read+write) misses
system.cpu.l2cache.demand_mshr_hits 0 # number of demand (read+write) MSHR hits
-system.cpu.l2cache.demand_mshr_miss_latency 4653000 # number of demand (read+write) MSHR miss cycles
-system.cpu.l2cache.demand_mshr_miss_rate 0.995294 # mshr miss rate for demand accesses
-system.cpu.l2cache.demand_mshr_misses 423 # number of demand (read+write) MSHR misses
+system.cpu.l2cache.demand_mshr_miss_latency 4675000 # number of demand (read+write) MSHR miss cycles
+system.cpu.l2cache.demand_mshr_miss_rate 0.995316 # mshr miss rate for demand accesses
+system.cpu.l2cache.demand_mshr_misses 425 # number of demand (read+write) MSHR misses
system.cpu.l2cache.fast_writes 0 # number of fast writes performed
system.cpu.l2cache.mshr_cap_events 0 # number of times MSHR cap was activated
system.cpu.l2cache.no_allocate_misses 0 # Number of misses that were no-allocate
-system.cpu.l2cache.overall_accesses 425 # number of overall (read+write) accesses
+system.cpu.l2cache.overall_accesses 427 # number of overall (read+write) accesses
system.cpu.l2cache.overall_avg_miss_latency 22000 # average overall miss latency
system.cpu.l2cache.overall_avg_mshr_miss_latency 11000 # average overall mshr miss latency
system.cpu.l2cache.overall_avg_mshr_uncacheable_latency <err: div-0> # average overall mshr uncacheable latency
system.cpu.l2cache.overall_hits 2 # number of overall hits
-system.cpu.l2cache.overall_miss_latency 9306000 # number of overall miss cycles
-system.cpu.l2cache.overall_miss_rate 0.995294 # miss rate for overall accesses
-system.cpu.l2cache.overall_misses 423 # number of overall misses
+system.cpu.l2cache.overall_miss_latency 9350000 # number of overall miss cycles
+system.cpu.l2cache.overall_miss_rate 0.995316 # miss rate for overall accesses
+system.cpu.l2cache.overall_misses 425 # number of overall misses
system.cpu.l2cache.overall_mshr_hits 0 # number of overall MSHR hits
-system.cpu.l2cache.overall_mshr_miss_latency 4653000 # number of overall MSHR miss cycles
-system.cpu.l2cache.overall_mshr_miss_rate 0.995294 # mshr miss rate for overall accesses
-system.cpu.l2cache.overall_mshr_misses 423 # number of overall MSHR misses
+system.cpu.l2cache.overall_mshr_miss_latency 4675000 # number of overall MSHR miss cycles
+system.cpu.l2cache.overall_mshr_miss_rate 0.995316 # mshr miss rate for overall accesses
+system.cpu.l2cache.overall_mshr_misses 425 # number of overall MSHR misses
system.cpu.l2cache.overall_mshr_uncacheable_latency 0 # number of overall MSHR uncacheable cycles
system.cpu.l2cache.overall_mshr_uncacheable_misses 0 # number of overall MSHR uncacheable misses
system.cpu.l2cache.prefetcher.num_hwpf_already_in_cache 0 # number of hwpf that were already in the cache
system.cpu.l2cache.prefetcher.num_hwpf_span_page 0 # number of hwpf spanning a virtual page
system.cpu.l2cache.prefetcher.num_hwpf_squashed_from_miss 0 # number of hwpf that got squashed due to a miss aborting calculation time
system.cpu.l2cache.replacements 0 # number of replacements
-system.cpu.l2cache.sampled_refs 318 # Sample count of references to valid blocks.
+system.cpu.l2cache.sampled_refs 319 # Sample count of references to valid blocks.
system.cpu.l2cache.soft_prefetch_mshr_full 0 # number of mshr full events for SW prefetching instrutions
-system.cpu.l2cache.tagsinuse 178.108320 # Cycle average of tags in use
+system.cpu.l2cache.tagsinuse 178.928867 # Cycle average of tags in use
system.cpu.l2cache.total_refs 2 # Total number of references to valid blocks.
system.cpu.l2cache.warmup_cycle 0 # Cycle when the warmup percentage was hit.
system.cpu.l2cache.writebacks 0 # number of writebacks
system.cpu.not_idle_fraction 1 # Percentage of non-idle cycles
-system.cpu.numCycles 48710 # number of cpu cycles simulated
+system.cpu.numCycles 48806 # number of cpu cycles simulated
system.cpu.num_insts 10976 # Number of instructions executed
system.cpu.num_refs 2770 # Number of memory references
system.cpu.workload.PROG:num_syscalls 8 # Number of system calls
+0: system.remote_gdb.listener: listening for remote gdb on port 7002
warn: Entering event queue @ 0. Starting simulation...
All Rights Reserved
-M5 compiled Aug 14 2007 22:08:21
-M5 started Tue Aug 14 22:08:25 2007
+M5 compiled Nov 28 2007 15:13:45
+M5 started Wed Nov 28 15:13:46 2007
M5 executing on nacho
command line: build/SPARC_SE/m5.fast -d build/SPARC_SE/tests/fast/quick/02.insttest/sparc/linux/simple-timing tests/run.py quick/02.insttest/sparc/linux/simple-timing
Global frequency set at 1000000000000 ticks per second
-Exiting @ tick 24355000 because target called exit()
+Exiting @ tick 24403000 because target called exit()