From: Kevin Lim Date: Sun, 4 Jun 2006 20:07:54 +0000 (-0400) Subject: Merge ktlim@zamp:/z/ktlim2/clean/m5-o3 X-Git-Tag: m5_2.0_beta1~36^2~108 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=984c2a4ff677803ff7687a178f1dceb1f0204c30;p=gem5.git Merge ktlim@zamp:/z/ktlim2/clean/m5-o3 into zamp.eecs.umich.edu:/z/ktlim2/clean/newmem-merge src/cpu/checker/o3_cpu_builder.cc: src/cpu/o3/alpha_cpu.hh: src/cpu/o3/alpha_cpu_impl.hh: src/cpu/o3/alpha_dyn_inst_impl.hh: src/cpu/o3/bpred_unit.cc: src/cpu/o3/commit.hh: src/cpu/o3/fetch_impl.hh: src/cpu/o3/lsq_unit.hh: src/cpu/o3/lsq_unit_impl.hh: src/cpu/o3/thread_state.hh: Hand merge. --HG-- rename : cpu/activity.cc => src/cpu/activity.cc rename : cpu/activity.hh => src/cpu/activity.hh rename : cpu/base_dyn_inst.cc => src/cpu/base_dyn_inst.cc rename : cpu/checker/cpu.hh => src/cpu/checker/cpu.hh rename : cpu/checker/cpu_builder.cc => src/cpu/checker/cpu_builder.cc rename : cpu/checker/exec_context.hh => src/cpu/checker/exec_context.hh rename : cpu/checker/o3_cpu_builder.cc => src/cpu/checker/o3_cpu_builder.cc rename : cpu/o3/2bit_local_pred.cc => src/cpu/o3/2bit_local_pred.cc rename : cpu/o3/2bit_local_pred.hh => src/cpu/o3/2bit_local_pred.hh rename : cpu/o3/alpha_cpu.hh => src/cpu/o3/alpha_cpu.hh rename : cpu/o3/alpha_cpu_builder.cc => src/cpu/o3/alpha_cpu_builder.cc rename : cpu/o3/alpha_cpu_impl.hh => src/cpu/o3/alpha_cpu_impl.hh rename : cpu/o3/alpha_dyn_inst.hh => src/cpu/o3/alpha_dyn_inst.hh rename : cpu/o3/alpha_dyn_inst_impl.hh => src/cpu/o3/alpha_dyn_inst_impl.hh rename : cpu/o3/alpha_params.hh => src/cpu/o3/alpha_params.hh rename : cpu/o3/bpred_unit.cc => src/cpu/o3/bpred_unit.cc rename : cpu/o3/bpred_unit.hh => src/cpu/o3/bpred_unit.hh rename : cpu/o3/bpred_unit_impl.hh => src/cpu/o3/bpred_unit_impl.hh rename : cpu/o3/comm.hh => src/cpu/o3/comm.hh rename : cpu/o3/commit.hh => src/cpu/o3/commit.hh rename : cpu/o3/commit_impl.hh => src/cpu/o3/commit_impl.hh rename : cpu/o3/cpu.hh => src/cpu/o3/cpu.hh rename : cpu/o3/cpu_policy.hh => src/cpu/o3/cpu_policy.hh rename : cpu/o3/decode.hh => src/cpu/o3/decode.hh rename : cpu/o3/decode_impl.hh => src/cpu/o3/decode_impl.hh rename : cpu/o3/dep_graph.hh => src/cpu/o3/dep_graph.hh rename : cpu/o3/fetch.hh => src/cpu/o3/fetch.hh rename : cpu/o3/fetch_impl.hh => src/cpu/o3/fetch_impl.hh rename : cpu/o3/fu_pool.cc => src/cpu/o3/fu_pool.cc rename : cpu/o3/fu_pool.hh => src/cpu/o3/fu_pool.hh rename : cpu/o3/iew.hh => src/cpu/o3/iew.hh rename : cpu/o3/iew_impl.hh => src/cpu/o3/iew_impl.hh rename : cpu/o3/inst_queue.hh => src/cpu/o3/inst_queue.hh rename : cpu/o3/inst_queue_impl.hh => src/cpu/o3/inst_queue_impl.hh rename : cpu/o3/lsq.hh => src/cpu/o3/lsq.hh rename : cpu/o3/lsq_unit.hh => src/cpu/o3/lsq_unit.hh rename : cpu/o3/lsq_unit_impl.hh => src/cpu/o3/lsq_unit_impl.hh rename : cpu/o3/mem_dep_unit.hh => src/cpu/o3/mem_dep_unit.hh rename : cpu/o3/mem_dep_unit_impl.hh => src/cpu/o3/mem_dep_unit_impl.hh rename : cpu/o3/rename.hh => src/cpu/o3/rename.hh rename : cpu/o3/rename_impl.hh => src/cpu/o3/rename_impl.hh rename : cpu/o3/rename_map.hh => src/cpu/o3/rename_map.hh rename : cpu/o3/rob.hh => src/cpu/o3/rob.hh rename : cpu/o3/store_set.cc => src/cpu/o3/store_set.cc rename : cpu/o3/store_set.hh => src/cpu/o3/store_set.hh rename : cpu/o3/thread_state.hh => src/cpu/o3/thread_state.hh rename : cpu/o3/tournament_pred.cc => src/cpu/o3/tournament_pred.cc rename : cpu/o3/tournament_pred.hh => src/cpu/o3/tournament_pred.hh rename : cpu/ozone/cpu_builder.cc => src/cpu/ozone/cpu_builder.cc rename : cpu/ozone/ozone_impl.hh => src/cpu/ozone/ozone_impl.hh rename : cpu/ozone/simple_impl.hh => src/cpu/ozone/simple_impl.hh rename : cpu/ozone/simple_params.hh => src/cpu/ozone/simple_params.hh rename : python/m5/objects/AlphaFullCPU.py => src/python/m5/objects/AlphaFullCPU.py rename : python/m5/objects/OzoneCPU.py => src/python/m5/objects/OzoneCPU.py extra : convert_revision : b7be30474dd03dd3970e737a9d0489aeb2ead84f --- 984c2a4ff677803ff7687a178f1dceb1f0204c30 diff --cc src/cpu/activity.cc index 6dcb6e341,000000000..b0b16446c mode 100644,000000..100644 --- a/src/cpu/activity.cc +++ b/src/cpu/activity.cc @@@ -1,122 -1,0 +1,155 @@@ ++/* ++ * Copyright (c) 2006 The Regents of The University of Michigan ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer; ++ * redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution; ++ * neither the name of the copyright holders nor the names of its ++ * contributors may be used to endorse or promote products derived from ++ * this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +#include "base/timebuf.hh" +#include "cpu/activity.hh" + +ActivityRecorder::ActivityRecorder(int num_stages, int longest_latency, + int activity) + : activityBuffer(longest_latency, 0), longestLatency(longest_latency), + activityCount(activity), numStages(num_stages) +{ + stageActive = new bool[numStages]; + memset(stageActive, 0, numStages); +} + +void +ActivityRecorder::activity() +{ ++ // If we've already recorded activity for this cycle, we don't ++ // want to increment the count any more. + if (activityBuffer[0]) { + return; + } + + activityBuffer[0] = true; + + ++activityCount; + + DPRINTF(Activity, "Activity: %i\n", activityCount); +} + +void +ActivityRecorder::advance() +{ ++ // If there's a 1 in the slot that is about to be erased once the ++ // time buffer advances, then decrement the activityCount. + if (activityBuffer[-longestLatency]) { + --activityCount; + + assert(activityCount >= 0); + + DPRINTF(Activity, "Activity: %i\n", activityCount); + + if (activityCount == 0) { + DPRINTF(Activity, "No activity left!\n"); + } + } + + activityBuffer.advance(); +} + +void +ActivityRecorder::activateStage(const int idx) +{ ++ // Increment the activity count if this stage wasn't already active. + if (!stageActive[idx]) { + ++activityCount; + + stageActive[idx] = true; + + DPRINTF(Activity, "Activity: %i\n", activityCount); + } else { + DPRINTF(Activity, "Stage %i already active.\n", idx); + } + +// assert(activityCount < longestLatency + numStages + 1); +} + +void +ActivityRecorder::deactivateStage(const int idx) +{ ++ // Decrement the activity count if this stage was active. + if (stageActive[idx]) { + --activityCount; + + stageActive[idx] = false; + + DPRINTF(Activity, "Activity: %i\n", activityCount); + } else { + DPRINTF(Activity, "Stage %i already inactive.\n", idx); + } + + assert(activityCount >= 0); +} + +void +ActivityRecorder::reset() +{ + activityCount = 0; + memset(stageActive, 0, numStages); + for (int i = 0; i < longestLatency + 1; ++i) + activityBuffer.advance(); +} + +void +ActivityRecorder::dump() +{ + for (int i = 0; i <= longestLatency; ++i) { + cprintf("[Idx:%i %i] ", i, activityBuffer[-i]); + } + + cprintf("\n"); + + for (int i = 0; i < numStages; ++i) { + cprintf("[Stage:%i %i]\n", i, stageActive[i]); + } + + cprintf("\n"); + + cprintf("Activity count: %i\n", activityCount); +} + +void +ActivityRecorder::validate() +{ + int count = 0; + for (int i = 0; i <= longestLatency; ++i) { + if (activityBuffer[-i]) { + count++; + } + } + + for (int i = 0; i < numStages; ++i) { + if (stageActive[i]) { + count++; + } + } + + assert(count == activityCount); +} diff --cc src/cpu/activity.hh index 2d53dc4bb,000000000..2c0df5efb mode 100644,000000..100644 --- a/src/cpu/activity.hh +++ b/src/cpu/activity.hh @@@ -1,67 -1,0 +1,124 @@@ ++/* ++ * Copyright (c) 2006 The Regents of The University of Michigan ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer; ++ * redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution; ++ * neither the name of the copyright holders nor the names of its ++ * contributors may be used to endorse or promote products derived from ++ * this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +#ifndef __CPU_ACTIVITY_HH__ +#define __CPU_ACTIVITY_HH__ + +#include "base/timebuf.hh" +#include "base/trace.hh" + ++/** ++ * ActivityRecorder helper class that informs the CPU if it can switch ++ * over to being idle or not. It works by having a time buffer as ++ * long as any time buffer in the CPU, and the CPU and all of its ++ * stages inform the ActivityRecorder when they write to any time ++ * buffer. The ActivityRecorder marks a 1 in the "0" slot of the time ++ * buffer any time a stage writes to a time buffer, and it advances ++ * its time buffer at the same time as all other stages. The ++ * ActivityRecorder also records if a stage has activity to do next ++ * cycle. The recorder keeps a count of these two. Thus any time the ++ * count is non-zero, there is either communication still in flight, ++ * or activity that still must be done, meaning that the CPU can not ++ * idle. If count is zero, then the CPU can safely idle as it has no ++ * more outstanding work to do. ++ */ +class ActivityRecorder { + public: + ActivityRecorder(int num_stages, int longest_latency, int count); + + /** Records that there is activity this cycle. */ + void activity(); - /** Advances the activity buffer, decrementing the activityCount if active - * communication just left the time buffer, and descheduling the CPU if - * there is no activity. ++ ++ /** Advances the activity buffer, decrementing the activityCount ++ * if active communication just left the time buffer, and ++ * determining if there is no activity. + */ + void advance(); ++ + /** Marks a stage as active. */ + void activateStage(const int idx); ++ + /** Deactivates a stage. */ + void deactivateStage(const int idx); + ++ /** Returns how many things are active within the recorder. */ + int getActivityCount() { return activityCount; } + ++ /** Sets the count to a starting value. Can be used to disable ++ * the idling option. ++ */ + void setActivityCount(int count) + { activityCount = count; } + ++ /** Returns if the CPU should be active. */ + bool active() { return activityCount; } + ++ /** Clears the time buffer and the activity count. */ + void reset(); + ++ /** Debug function to dump the contents of the time buffer. */ + void dump(); + ++ /** Debug function to ensure that the activity count matches the ++ * contents of the time buffer. ++ */ + void validate(); + + private: + /** Time buffer that tracks if any cycles has active communication + * in them. It should be as long as the longest communication + * latency in the system. Each time any time buffer is written, + * the activity buffer should also be written to. The + * activityBuffer is advanced along with all the other time + * buffers, so it should have a 1 somewhere in it only if there + * is active communication in a time buffer. + */ + TimeBuffer activityBuffer; + ++ /** Longest latency time buffer in the CPU. */ + int longestLatency; + + /** Tracks how many stages and cycles of time buffer have + * activity. Stages increment this count when they switch to + * active, and decrement it when they switch to + * inactive. Whenever a cycle that previously had no information + * is written in the time buffer, this is incremented. When a + * cycle that had information exits the time buffer due to age, + * this count is decremented. When the count is 0, there is no + * activity in the CPU, and it can be descheduled. + */ + int activityCount; + ++ /** Number of stages that can be marked as active or inactive. */ + int numStages; + + /** Records which stages are active/inactive. */ + bool *stageActive; +}; + +#endif // __CPU_ACTIVITY_HH__ diff --cc src/cpu/base_dyn_inst.cc index 7eb974d36,000000000..66e425d5c mode 100644,000000..100644 --- a/src/cpu/base_dyn_inst.cc +++ b/src/cpu/base_dyn_inst.cc @@@ -1,460 -1,0 +1,462 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include +#include +#include +#include + +#include "base/cprintf.hh" +#include "base/trace.hh" + +#include "arch/faults.hh" +#include "cpu/exetrace.hh" +#include "mem/request.hh" + +#include "cpu/base_dyn_inst.hh" +#include "cpu/o3/alpha_impl.hh" +#include "cpu/o3/alpha_cpu.hh" +//#include "cpu/ozone/simple_impl.hh" +//#include "cpu/ozone/ozone_impl.hh" + +using namespace std; +using namespace TheISA; + +#define NOHASH +#ifndef NOHASH + +#include "base/hashmap.hh" + +unsigned int MyHashFunc(const BaseDynInst *addr) +{ + unsigned a = (unsigned)addr; + unsigned hash = (((a >> 14) ^ ((a >> 2) & 0xffff))) & 0x7FFFFFFF; + + return hash; +} + +typedef m5::hash_map +my_hash_t; + +my_hash_t thishash; +#endif + +template +BaseDynInst::BaseDynInst(ExtMachInst machInst, Addr inst_PC, + Addr pred_PC, InstSeqNum seq_num, + FullCPU *cpu) + : staticInst(machInst), traceData(NULL), cpu(cpu)/*, xc(cpu->xcBase())*/ +{ + seqNum = seq_num; + + PC = inst_PC; + nextPC = PC + sizeof(MachInst); + predPC = pred_PC; + + initVars(); +} + +template +BaseDynInst::BaseDynInst(StaticInstPtr &_staticInst) + : staticInst(_staticInst), traceData(NULL) +{ + seqNum = 0; + initVars(); +} + +template +void +BaseDynInst::initVars() +{ + req = NULL; + effAddr = 0; + physEffAddr = 0; + storeSize = 0; + + readyRegs = 0; + + completed = false; + resultReady = false; + canIssue = false; + issued = false; + executed = false; + canCommit = false; + committed = false; + squashed = false; + squashedInIQ = false; + squashedInLSQ = false; + squashedInROB = false; + eaCalcDone = false; + memOpDone = false; + lqIdx = -1; + sqIdx = -1; + reachedCommit = false; + + blockingInst = false; + recoverInst = false; + + iqEntry = false; + robEntry = false; + + serializeBefore = false; + serializeAfter = false; + serializeHandled = false; + + // Eventually make this a parameter. + threadNumber = 0; + + // Also make this a parameter, or perhaps get it from xc or cpu. + asid = 0; + + // Initialize the fault to be unimplemented opcode. +// fault = new UnimplementedOpcodeFault; + fault = NoFault; + + ++instcount; + + if (instcount > 1500) { + cpu->dumpInsts(); +#ifdef DEBUG + dumpSNList(); +#endif + assert(instcount <= 1500); + } + + DPRINTF(DynInst, "DynInst: [sn:%lli] Instruction created. Instcount=%i\n", + seqNum, instcount); + +#ifdef DEBUG + cpu->snList.insert(seqNum); +#endif +} + +template +BaseDynInst::~BaseDynInst() +{ + if (req) { + req = NULL; + } + + if (traceData) { + delete traceData; + } + ++ fault = NoFault; ++ + --instcount; + + DPRINTF(DynInst, "DynInst: [sn:%lli] Instruction destroyed. Instcount=%i\n", + seqNum, instcount); +#ifdef DEBUG + cpu->snList.erase(seqNum); +#endif +} + +#ifdef DEBUG +template +void +BaseDynInst::dumpSNList() +{ + std::set::iterator sn_it = cpu->snList.begin(); + + int count = 0; + while (sn_it != cpu->snList.end()) { + cprintf("%i: [sn:%lli] not destroyed\n", count, (*sn_it)); + count++; + sn_it++; + } +} +#endif + +template +void +BaseDynInst::prefetch(Addr addr, unsigned flags) +{ + // This is the "functional" implementation of prefetch. Not much + // happens here since prefetches don't affect the architectural + // state. +/* + // Generate a MemReq so we can translate the effective address. + MemReqPtr req = new MemReq(addr, thread->getXCProxy(), 1, flags); + req->asid = asid; + + // Prefetches never cause faults. + fault = NoFault; + + // note this is a local, not BaseDynInst::fault + Fault trans_fault = cpu->translateDataReadReq(req); + + if (trans_fault == NoFault && !(req->flags & UNCACHEABLE)) { + // It's a valid address to cacheable space. Record key MemReq + // parameters so we can generate another one just like it for + // the timing access without calling translate() again (which + // might mess up the TLB). + effAddr = req->vaddr; + physEffAddr = req->paddr; + memReqFlags = req->flags; + } else { + // Bogus address (invalid or uncacheable space). Mark it by + // setting the eff_addr to InvalidAddr. + effAddr = physEffAddr = MemReq::inval_addr; + } + + if (traceData) { + traceData->setAddr(addr); + } +*/ +} + +template +void +BaseDynInst::writeHint(Addr addr, int size, unsigned flags) +{ + // Need to create a MemReq here so we can do a translation. This + // will casue a TLB miss trap if necessary... not sure whether + // that's the best thing to do or not. We don't really need the + // MemReq otherwise, since wh64 has no functional effect. +/* + MemReqPtr req = new MemReq(addr, thread->getXCProxy(), size, flags); + req->asid = asid; + + fault = cpu->translateDataWriteReq(req); + + if (fault == NoFault && !(req->flags & UNCACHEABLE)) { + // Record key MemReq parameters so we can generate another one + // just like it for the timing access without calling translate() + // again (which might mess up the TLB). + effAddr = req->vaddr; + physEffAddr = req->paddr; + memReqFlags = req->flags; + } else { + // ignore faults & accesses to uncacheable space... treat as no-op + effAddr = physEffAddr = MemReq::inval_addr; + } + + storeSize = size; + storeData = 0; +*/ +} + +/** + * @todo Need to find a way to get the cache block size here. + */ +template +Fault +BaseDynInst::copySrcTranslate(Addr src) +{ +/* + MemReqPtr req = new MemReq(src, thread->getXCProxy(), 64); + req->asid = asid; + + // translate to physical address + Fault fault = cpu->translateDataReadReq(req); + + if (fault == NoFault) { + thread->copySrcAddr = src; + thread->copySrcPhysAddr = req->paddr; + } else { + thread->copySrcAddr = 0; + thread->copySrcPhysAddr = 0; + } + return fault; +*/ + return NoFault; +} + +/** + * @todo Need to find a way to get the cache block size here. + */ +template +Fault +BaseDynInst::copy(Addr dest) +{ +/* + uint8_t data[64]; + FunctionalMemory *mem = thread->mem; - assert(thread->copySrcPhysAddr || thread->misspeculating()); ++ assert(thread->copySrcPhysAddr); + MemReqPtr req = new MemReq(dest, thread->getXCProxy(), 64); + req->asid = asid; + + // translate to physical address + Fault fault = cpu->translateDataWriteReq(req); + + if (fault == NoFault) { + Addr dest_addr = req->paddr; + // Need to read straight from memory since we have more than 8 bytes. + req->paddr = thread->copySrcPhysAddr; + mem->read(req, data); + req->paddr = dest_addr; + mem->write(req, data); + } + return fault; +*/ + return NoFault; +} + +template +void +BaseDynInst::dump() +{ + cprintf("T%d : %#08d `", threadNumber, PC); + cout << staticInst->disassemble(PC); + cprintf("'\n"); +} + +template +void +BaseDynInst::dump(std::string &outstring) +{ + std::ostringstream s; + s << "T" << threadNumber << " : 0x" << PC << " " + << staticInst->disassemble(PC); + + outstring = s.str(); +} + +#if 0 +template +Fault +BaseDynInst::mem_access(mem_cmd cmd, Addr addr, void *p, int nbytes) +{ + Fault fault; + + // check alignments, even speculative this test should always pass + if ((nbytes & nbytes - 1) != 0 || (addr & nbytes - 1) != 0) { + for (int i = 0; i < nbytes; i++) + ((char *) p)[i] = 0; + + // I added the following because according to the comment above, + // we should never get here. The comment lies +#if 0 + panic("unaligned access. Cycle = %n", curTick); +#endif + return NoFault; + } + + MemReqPtr req = new MemReq(addr, thread, nbytes); + switch(cmd) { + case Read: + fault = spec_mem->read(req, (uint8_t *)p); + break; + + case Write: + fault = spec_mem->write(req, (uint8_t *)p); + if (fault != NoFault) + break; + + specMemWrite = true; + storeSize = nbytes; + switch(nbytes) { + case sizeof(uint8_t): + *(uint8_t)&storeData = (uint8_t *)p; + break; + case sizeof(uint16_t): + *(uint16_t)&storeData = (uint16_t *)p; + break; + case sizeof(uint32_t): + *(uint32_t)&storeData = (uint32_t *)p; + break; + case sizeof(uint64_t): + *(uint64_t)&storeData = (uint64_t *)p; + break; + } + break; + + default: + fault = genMachineCheckFault(); + break; + } + + trace_mem(fault, cmd, addr, p, nbytes); + + return fault; +} + +#endif + +template +void +BaseDynInst::markSrcRegReady() +{ + if (++readyRegs == numSrcRegs()) { + canIssue = true; + } +} + +template +void +BaseDynInst::markSrcRegReady(RegIndex src_idx) +{ + ++readyRegs; + + _readySrcRegIdx[src_idx] = true; + + if (readyRegs == numSrcRegs()) { + canIssue = true; + } +} + +template +bool +BaseDynInst::eaSrcsReady() +{ + // For now I am assuming that src registers 1..n-1 are the ones that the + // EA calc depends on. (i.e. src reg 0 is the source of the data to be + // stored) + + for (int i = 1; i < numSrcRegs(); ++i) { + if (!_readySrcRegIdx[i]) + return false; + } + + return true; +} + +// Forward declaration +template class BaseDynInst; + +template <> +int +BaseDynInst::instcount = 0; +/* +// Forward declaration +template class BaseDynInst; + +template <> +int +BaseDynInst::instcount = 0; + +// Forward declaration +template class BaseDynInst; + +template <> +int +BaseDynInst::instcount = 0; +*/ diff --cc src/cpu/checker/cpu.hh index 7e63febb6,000000000..2f9689028 mode 100644,000000..100644 --- a/src/cpu/checker/cpu.hh +++ b/src/cpu/checker/cpu.hh @@@ -1,354 -1,0 +1,375 @@@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_CHECKER_CPU_HH__ +#define __CPU_CHECKER_CPU_HH__ + +#include +#include +#include + +#include "arch/types.hh" +#include "base/statistics.hh" +#include "config/full_system.hh" +#include "cpu/base.hh" +#include "cpu/base_dyn_inst.hh" +#include "cpu/cpu_exec_context.hh" +#include "cpu/pc_event.hh" +#include "cpu/static_inst.hh" +#include "sim/eventq.hh" + +// forward declarations +#if FULL_SYSTEM +class Processor; +class AlphaITB; +class AlphaDTB; +class PhysicalMemory; + +class RemoteGDB; +class GDBListener; + +#else + +class Process; + +#endif // FULL_SYSTEM +template +class BaseDynInst; +class ExecContext; +class MemInterface; +class Checkpoint; +class Request; +class Sampler; + ++/** ++ * CheckerCPU class. Dynamically verifies instructions as they are ++ * completed by making sure that the instruction and its results match ++ * the independent execution of the benchmark inside the checker. The ++ * checker verifies instructions in order, regardless of the order in ++ * which instructions complete. There are certain results that can ++ * not be verified, specifically the result of a store conditional or ++ * the values of uncached accesses. In these cases, and with ++ * instructions marked as "IsUnverifiable", the checker assumes that ++ * the value from the main CPU's execution is correct and simply ++ * copies that value. It provides a CheckerExecContext (see ++ * checker/exec_context.hh) that provides hooks for updating the ++ * Checker's state through any ExecContext accesses. This allows the ++ * checker to be able to correctly verify instructions, even with ++ * external accesses to the ExecContext that change state. ++ */ +class CheckerCPU : public BaseCPU +{ + protected: + typedef TheISA::MachInst MachInst; + typedef TheISA::FloatReg FloatReg; + typedef TheISA::FloatRegBits FloatRegBits; + typedef TheISA::MiscReg MiscReg; + public: - // main simulation loop (one cycle) + virtual void init(); + + struct Params : public BaseCPU::Params + { +#if FULL_SYSTEM + AlphaITB *itb; + AlphaDTB *dtb; + FunctionalMemory *mem; +#else + Process *process; +#endif + bool exitOnError; + }; + + public: + CheckerCPU(Params *p); + virtual ~CheckerCPU(); + + void setMemory(MemObject *mem); + + MemObject *memPtr; + +#if FULL_SYSTEM + void setSystem(System *system); + + System *systemPtr; +#endif + public: + // execution context + CPUExecContext *cpuXC; + + ExecContext *xcProxy; + + AlphaITB *itb; + AlphaDTB *dtb; + +#if FULL_SYSTEM + Addr dbg_vtophys(Addr addr); +#endif + + union Result { + uint64_t integer; + float fp; + double dbl; + }; + + Result result; + + // current instruction + MachInst machInst; + + // Refcounted pointer to the one memory request. + Request *memReq; + + StaticInstPtr curStaticInst; + + // number of simulated instructions + Counter numInst; + Counter startNumInst; + + std::queue miscRegIdxs; + + virtual Counter totalInstructions() const + { + return numInst - startNumInst; + } + + // number of simulated loads + Counter numLoad; + Counter startNumLoad; + + virtual void serialize(std::ostream &os); + virtual void unserialize(Checkpoint *cp, const std::string §ion); + + template + Fault read(Addr addr, T &data, unsigned flags); + + template + Fault write(T data, Addr addr, unsigned flags, uint64_t *res); + + // These functions are only used in CPU models that split + // effective address computation from the actual memory access. + void setEA(Addr EA) { panic("SimpleCPU::setEA() not implemented\n"); } + Addr getEA() { panic("SimpleCPU::getEA() not implemented\n"); } + + void prefetch(Addr addr, unsigned flags) + { + // need to do this... + } + + void writeHint(Addr addr, int size, unsigned flags) + { + // need to do this... + } + + Fault copySrcTranslate(Addr src); + + Fault copy(Addr dest); + + // The register accessor methods provide the index of the + // instruction's operand (e.g., 0 or 1), not the architectural + // register index, to simplify the implementation of register + // renaming. We find the architectural register index by indexing + // into the instruction's own operand index table. Note that a + // raw pointer to the StaticInst is provided instead of a + // ref-counted StaticInstPtr to redice overhead. This is fine as + // long as these methods don't copy the pointer into any long-term + // storage (which is pretty hard to imagine they would have reason + // to do). + + uint64_t readIntReg(const StaticInst *si, int idx) + { + return cpuXC->readIntReg(si->srcRegIdx(idx)); + } + + FloatReg readFloatReg(const StaticInst *si, int idx, int width) + { + int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag; + return cpuXC->readFloatReg(reg_idx, width); + } + + FloatReg readFloatReg(const StaticInst *si, int idx) + { + int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag; + return cpuXC->readFloatReg(reg_idx); + } + + FloatRegBits readFloatRegBits(const StaticInst *si, int idx, int width) + { + int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag; + return cpuXC->readFloatRegBits(reg_idx, width); + } + + FloatRegBits readFloatRegBits(const StaticInst *si, int idx) + { + int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag; + return cpuXC->readFloatRegBits(reg_idx); + } + + void setIntReg(const StaticInst *si, int idx, uint64_t val) + { + cpuXC->setIntReg(si->destRegIdx(idx), val); + result.integer = val; + } + + void setFloatReg(const StaticInst *si, int idx, FloatReg val, int width) + { + int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag; + cpuXC->setFloatReg(reg_idx, val, width); + switch(width) { + case 32: + result.fp = val; + break; + case 64: + result.dbl = val; + break; + }; + } + + void setFloatReg(const StaticInst *si, int idx, FloatReg val) + { + int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag; + cpuXC->setFloatReg(reg_idx, val); + result.fp = val; + } + + void setFloatRegBits(const StaticInst *si, int idx, FloatRegBits val, + int width) + { + int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag; + cpuXC->setFloatRegBits(reg_idx, val, width); + result.integer = val; + } + + void setFloatRegBits(const StaticInst *si, int idx, FloatRegBits val) + { + int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag; + cpuXC->setFloatRegBits(reg_idx, val); + result.integer = val; + } + + uint64_t readPC() { return cpuXC->readPC(); } + + uint64_t readNextPC() { return cpuXC->readNextPC(); } + + void setNextPC(uint64_t val) { + cpuXC->setNextPC(val); + } + + MiscReg readMiscReg(int misc_reg) + { + return cpuXC->readMiscReg(misc_reg); + } + + MiscReg readMiscRegWithEffect(int misc_reg, Fault &fault) + { + return cpuXC->readMiscRegWithEffect(misc_reg, fault); + } + + Fault setMiscReg(int misc_reg, const MiscReg &val) + { + result.integer = val; + miscRegIdxs.push(misc_reg); + return cpuXC->setMiscReg(misc_reg, val); + } + + Fault setMiscRegWithEffect(int misc_reg, const MiscReg &val) + { + miscRegIdxs.push(misc_reg); + return cpuXC->setMiscRegWithEffect(misc_reg, val); + } + + void recordPCChange(uint64_t val) { changedPC = true; } + void recordNextPCChange(uint64_t val) { changedNextPC = true; } + + bool translateInstReq(Request *req); + void translateDataWriteReq(Request *req); + void translateDataReadReq(Request *req); + +#if FULL_SYSTEM + Fault hwrei() { return cpuXC->hwrei(); } + int readIntrFlag() { return cpuXC->readIntrFlag(); } + void setIntrFlag(int val) { cpuXC->setIntrFlag(val); } + bool inPalMode() { return cpuXC->inPalMode(); } + void ev5_trap(Fault fault) { fault->invoke(xcProxy); } + bool simPalCheck(int palFunc) { return cpuXC->simPalCheck(palFunc); } +#else + // Assume that the normal CPU's call to syscall was successful. + // The checker's state would have already been updated by the syscall. + void syscall(uint64_t callnum) { } +#endif + + void handleError() + { + if (exitOnError) + panic("Checker found error!"); + } + bool checkFlags(Request *req); + + ExecContext *xcBase() { return xcProxy; } + CPUExecContext *cpuXCBase() { return cpuXC; } + + Result unverifiedResult; + Request *unverifiedReq; + + bool changedPC; + bool willChangePC; + uint64_t newPC; + bool changedNextPC; + bool exitOnError; + + InstSeqNum youngestSN; +}; + ++/** ++ * Templated Checker class. This Checker class is templated on the ++ * DynInstPtr of the instruction type that will be verified. Proper ++ * template instantiations of the Checker must be placed at the bottom ++ * of checker/cpu.cc. ++ */ +template +class Checker : public CheckerCPU +{ + public: + Checker(Params *p) + : CheckerCPU(p) + { } + + void switchOut(Sampler *s); + void takeOverFrom(BaseCPU *oldCPU); + + void tick(DynInstPtr &inst); + + void validateInst(DynInstPtr &inst); + void validateExecution(DynInstPtr &inst); + void validateState(); + + std::list instList; + typedef typename std::list::iterator InstListIt; + void dumpInsts(); +}; + +#endif // __CPU_CHECKER_CPU_HH__ diff --cc src/cpu/checker/cpu_builder.cc index 397ccab14,000000000..d80daef97 mode 100644,000000..100644 --- a/src/cpu/checker/cpu_builder.cc +++ b/src/cpu/checker/cpu_builder.cc @@@ -1,126 -1,0 +1,156 @@@ ++/* ++ * Copyright (c) 2006 The Regents of The University of Michigan ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer; ++ * redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution; ++ * neither the name of the copyright holders nor the names of its ++ * contributors may be used to endorse or promote products derived from ++ * this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +#include + +#include "cpu/checker/cpu.hh" +#include "cpu/inst_seq.hh" +#include "cpu/ozone/dyn_inst.hh" +#include "cpu/ozone/ozone_impl.hh" +#include "mem/base_mem.hh" +#include "sim/builder.hh" +#include "sim/process.hh" +#include "sim/sim_object.hh" + ++/** ++ * Specific non-templated derived class used for SimObject configuration. ++ */ +class OzoneChecker : public Checker > > +{ + public: + OzoneChecker(Params *p) + : Checker > >(p) + { } +}; + +//////////////////////////////////////////////////////////////////////// +// +// CheckerCPU Simulation Object +// +BEGIN_DECLARE_SIM_OBJECT_PARAMS(OzoneChecker) + + Param max_insts_any_thread; + Param max_insts_all_threads; + Param max_loads_any_thread; + Param max_loads_all_threads; + +#if FULL_SYSTEM + SimObjectParam itb; + SimObjectParam dtb; + SimObjectParam mem; + SimObjectParam system; + Param cpu_id; + Param profile; +#else + SimObjectParam workload; +#endif // FULL_SYSTEM + Param clock; + SimObjectParam icache; + SimObjectParam dcache; + + Param defer_registration; + Param exitOnError; + Param function_trace; + Param function_trace_start; + +END_DECLARE_SIM_OBJECT_PARAMS(OzoneChecker) + +BEGIN_INIT_SIM_OBJECT_PARAMS(OzoneChecker) + + INIT_PARAM(max_insts_any_thread, + "terminate when any thread reaches this inst count"), + INIT_PARAM(max_insts_all_threads, + "terminate when all threads have reached this inst count"), + INIT_PARAM(max_loads_any_thread, + "terminate when any thread reaches this load count"), + INIT_PARAM(max_loads_all_threads, + "terminate when all threads have reached this load count"), + +#if FULL_SYSTEM + INIT_PARAM(itb, "Instruction TLB"), + INIT_PARAM(dtb, "Data TLB"), + INIT_PARAM(mem, "memory"), + INIT_PARAM(system, "system object"), + INIT_PARAM(cpu_id, "processor ID"), + INIT_PARAM(profile, ""), +#else + INIT_PARAM(workload, "processes to run"), +#endif // FULL_SYSTEM + + INIT_PARAM(clock, "clock speed"), + INIT_PARAM(icache, "L1 instruction cache object"), + INIT_PARAM(dcache, "L1 data cache object"), + + INIT_PARAM(defer_registration, "defer system registration (for sampling)"), + INIT_PARAM(exitOnError, "exit on error"), + INIT_PARAM(function_trace, "Enable function trace"), + INIT_PARAM(function_trace_start, "Cycle to start function trace") + +END_INIT_SIM_OBJECT_PARAMS(OzoneChecker) + + +CREATE_SIM_OBJECT(OzoneChecker) +{ + OzoneChecker::Params *params = new OzoneChecker::Params(); + params->name = getInstanceName(); + params->numberOfThreads = 1; + params->max_insts_any_thread = 0; + params->max_insts_all_threads = 0; + params->max_loads_any_thread = 0; + params->max_loads_all_threads = 0; + params->exitOnError = exitOnError; + params->deferRegistration = defer_registration; + params->functionTrace = function_trace; + params->functionTraceStart = function_trace_start; + params->clock = clock; + // Hack to touch all parameters. Consider not deriving Checker + // from BaseCPU..it's not really a CPU in the end. + Counter temp; + temp = max_insts_any_thread; + temp = max_insts_all_threads; + temp = max_loads_any_thread; + temp = max_loads_all_threads; + BaseMem *cache = icache; + cache = dcache; + +#if FULL_SYSTEM + params->itb = itb; + params->dtb = dtb; + params->mem = mem; + params->system = system; + params->cpu_id = cpu_id; + params->profile = profile; +#else + params->process = workload; +#endif + + OzoneChecker *cpu = new OzoneChecker(params); + return cpu; +} + +REGISTER_SIM_OBJECT("OzoneChecker", OzoneChecker) diff --cc src/cpu/checker/exec_context.hh index 7d30e736a,000000000..054739a6a mode 100644,000000..100644 --- a/src/cpu/checker/exec_context.hh +++ b/src/cpu/checker/exec_context.hh @@@ -1,283 -1,0 +1,290 @@@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_CHECKER_EXEC_CONTEXT_HH__ +#define __CPU_CHECKER_EXEC_CONTEXT_HH__ + +#include "cpu/checker/cpu.hh" +#include "cpu/cpu_exec_context.hh" +#include "cpu/exec_context.hh" + +class EndQuiesceEvent; +namespace Kernel { + class Statistics; +}; + ++/** ++ * Derived ExecContext class for use with the Checker. The template ++ * parameter is the ExecContext class used by the specific CPU being ++ * verified. This CheckerExecContext is then used by the main CPU in ++ * place of its usual ExecContext class. It handles updating the ++ * checker's state any time state is updated through the ExecContext. ++ */ +template +class CheckerExecContext : public ExecContext +{ + public: + CheckerExecContext(XC *actual_xc, + CheckerCPU *checker_cpu) + : actualXC(actual_xc), checkerXC(checker_cpu->cpuXC), + checkerCPU(checker_cpu) + { } + + private: + XC *actualXC; + CPUExecContext *checkerXC; + CheckerCPU *checkerCPU; + + public: + + BaseCPU *getCpuPtr() { return actualXC->getCpuPtr(); } + + void setCpuId(int id) + { + actualXC->setCpuId(id); + checkerXC->setCpuId(id); + } + + int readCpuId() { return actualXC->readCpuId(); } + + TranslatingPort *getMemPort() { return actualXC->getMemPort(); } + +#if FULL_SYSTEM + System *getSystemPtr() { return actualXC->getSystemPtr(); } + + PhysicalMemory *getPhysMemPtr() { return actualXC->getPhysMemPtr(); } + + AlphaITB *getITBPtr() { return actualXC->getITBPtr(); } + + AlphaDTB *getDTBPtr() { return actualXC->getDTBPtr(); } + + Kernel::Statistics *getKernelStats() { return actualXC->getKernelStats(); } +#else + Process *getProcessPtr() { return actualXC->getProcessPtr(); } +#endif + + Status status() const { return actualXC->status(); } + + void setStatus(Status new_status) + { + actualXC->setStatus(new_status); + checkerXC->setStatus(new_status); + } + + /// Set the status to Active. Optional delay indicates number of + /// cycles to wait before beginning execution. + void activate(int delay = 1) { actualXC->activate(delay); } + + /// Set the status to Suspended. + void suspend() { actualXC->suspend(); } + + /// Set the status to Unallocated. + void deallocate() { actualXC->deallocate(); } + + /// Set the status to Halted. + void halt() { actualXC->halt(); } + +#if FULL_SYSTEM + void dumpFuncProfile() { actualXC->dumpFuncProfile(); } +#endif + + void takeOverFrom(ExecContext *oldContext) + { + actualXC->takeOverFrom(oldContext); + checkerXC->takeOverFrom(oldContext); + } + + void regStats(const std::string &name) { actualXC->regStats(name); } + + void serialize(std::ostream &os) { actualXC->serialize(os); } + void unserialize(Checkpoint *cp, const std::string §ion) + { actualXC->unserialize(cp, section); } + +#if FULL_SYSTEM + EndQuiesceEvent *getQuiesceEvent() { return actualXC->getQuiesceEvent(); } + + Tick readLastActivate() { return actualXC->readLastActivate(); } + Tick readLastSuspend() { return actualXC->readLastSuspend(); } + + void profileClear() { return actualXC->profileClear(); } + void profileSample() { return actualXC->profileSample(); } +#endif + + int getThreadNum() { return actualXC->getThreadNum(); } + + // @todo: Do I need this? + MachInst getInst() { return actualXC->getInst(); } + + // @todo: Do I need this? + void copyArchRegs(ExecContext *xc) + { + actualXC->copyArchRegs(xc); + checkerXC->copyArchRegs(xc); + } + + void clearArchRegs() + { + actualXC->clearArchRegs(); + checkerXC->clearArchRegs(); + } + + // + // New accessors for new decoder. + // + uint64_t readIntReg(int reg_idx) + { return actualXC->readIntReg(reg_idx); } + + FloatReg readFloatReg(int reg_idx, int width) + { return actualXC->readFloatReg(reg_idx, width); } + + FloatReg readFloatReg(int reg_idx) + { return actualXC->readFloatReg(reg_idx); } + + FloatRegBits readFloatRegBits(int reg_idx, int width) + { return actualXC->readFloatRegBits(reg_idx, width); } + + FloatRegBits readFloatRegBits(int reg_idx) + { return actualXC->readFloatRegBits(reg_idx); } + + void setIntReg(int reg_idx, uint64_t val) + { + actualXC->setIntReg(reg_idx, val); + checkerXC->setIntReg(reg_idx, val); + } + + void setFloatReg(int reg_idx, FloatReg val, int width) + { + actualXC->setFloatReg(reg_idx, val, width); + checkerXC->setFloatReg(reg_idx, val, width); + } + + void setFloatReg(int reg_idx, FloatReg val) + { + actualXC->setFloatReg(reg_idx, val); + checkerXC->setFloatReg(reg_idx, val); + } + + void setFloatRegBits(int reg_idx, FloatRegBits val, int width) + { + actualXC->setFloatRegBits(reg_idx, val, width); + checkerXC->setFloatRegBits(reg_idx, val, width); + } + + void setFloatRegBits(int reg_idx, FloatRegBits val) + { + actualXC->setFloatRegBits(reg_idx, val); + checkerXC->setFloatRegBits(reg_idx, val); + } + + uint64_t readPC() { return actualXC->readPC(); } + + void setPC(uint64_t val) + { + actualXC->setPC(val); + checkerXC->setPC(val); + checkerCPU->recordPCChange(val); + } + + uint64_t readNextPC() { return actualXC->readNextPC(); } + + void setNextPC(uint64_t val) + { + actualXC->setNextPC(val); + checkerXC->setNextPC(val); + checkerCPU->recordNextPCChange(val); + } + + uint64_t readNextNPC() { return actualXC->readNextNPC(); } + + void setNextNPC(uint64_t val) + { + actualXC->setNextNPC(val); + checkerXC->setNextNPC(val); + checkerCPU->recordNextPCChange(val); + } + + MiscReg readMiscReg(int misc_reg) + { return actualXC->readMiscReg(misc_reg); } + + MiscReg readMiscRegWithEffect(int misc_reg, Fault &fault) + { return actualXC->readMiscRegWithEffect(misc_reg, fault); } + + Fault setMiscReg(int misc_reg, const MiscReg &val) + { + checkerXC->setMiscReg(misc_reg, val); + return actualXC->setMiscReg(misc_reg, val); + } + + Fault setMiscRegWithEffect(int misc_reg, const MiscReg &val) + { + checkerXC->setMiscRegWithEffect(misc_reg, val); + return actualXC->setMiscRegWithEffect(misc_reg, val); + } + + unsigned readStCondFailures() + { return actualXC->readStCondFailures(); } + + void setStCondFailures(unsigned sc_failures) + { + checkerXC->setStCondFailures(sc_failures); + actualXC->setStCondFailures(sc_failures); + } +#if FULL_SYSTEM + bool inPalMode() { return actualXC->inPalMode(); } +#endif + + // @todo: Fix this! + bool misspeculating() { return actualXC->misspeculating(); } + +#if !FULL_SYSTEM + IntReg getSyscallArg(int i) { return actualXC->getSyscallArg(i); } + + // used to shift args for indirect syscall + void setSyscallArg(int i, IntReg val) + { + checkerXC->setSyscallArg(i, val); + actualXC->setSyscallArg(i, val); + } + + void setSyscallReturn(SyscallReturn return_value) + { + checkerXC->setSyscallReturn(return_value); + actualXC->setSyscallReturn(return_value); + } + + Counter readFuncExeInst() { return actualXC->readFuncExeInst(); } +#endif + void changeRegFileContext(RegFile::ContextParam param, + RegFile::ContextVal val) + { + actualXC->changeRegFileContext(param, val); + checkerXC->changeRegFileContext(param, val); + } +}; + +#endif // __CPU_CHECKER_EXEC_CONTEXT_HH__ diff --cc src/cpu/checker/o3_cpu_builder.cc index c7883b42b,000000000..31e945f73 mode 100644,000000..100644 --- a/src/cpu/checker/o3_cpu_builder.cc +++ b/src/cpu/checker/o3_cpu_builder.cc @@@ -1,121 -1,0 +1,151 @@@ ++/* ++ * Copyright (c) 2006 The Regents of The University of Michigan ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer; ++ * redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution; ++ * neither the name of the copyright holders nor the names of its ++ * contributors may be used to endorse or promote products derived from ++ * this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +#include + +#include "cpu/checker/cpu.hh" +#include "cpu/inst_seq.hh" +#include "cpu/o3/alpha_dyn_inst.hh" +#include "cpu/o3/alpha_impl.hh" +#include "sim/builder.hh" +#include "sim/process.hh" +#include "sim/sim_object.hh" + +class MemObject; + ++/** ++ * Specific non-templated derived class used for SimObject configuration. ++ */ +class O3Checker : public Checker > > +{ + public: + O3Checker(Params *p) + : Checker > >(p) + { } +}; + +//////////////////////////////////////////////////////////////////////// +// +// CheckerCPU Simulation Object +// +BEGIN_DECLARE_SIM_OBJECT_PARAMS(O3Checker) + + Param max_insts_any_thread; + Param max_insts_all_threads; + Param max_loads_any_thread; + Param max_loads_all_threads; + +#if FULL_SYSTEM + SimObjectParam itb; + SimObjectParam dtb; + SimObjectParam mem; + SimObjectParam system; + Param cpu_id; + Param profile; +#else + SimObjectParam workload; +#endif // FULL_SYSTEM + Param clock; + + Param defer_registration; + Param exitOnError; + Param function_trace; + Param function_trace_start; + +END_DECLARE_SIM_OBJECT_PARAMS(O3Checker) + +BEGIN_INIT_SIM_OBJECT_PARAMS(O3Checker) + + INIT_PARAM(max_insts_any_thread, + "terminate when any thread reaches this inst count"), + INIT_PARAM(max_insts_all_threads, + "terminate when all threads have reached this inst count"), + INIT_PARAM(max_loads_any_thread, + "terminate when any thread reaches this load count"), + INIT_PARAM(max_loads_all_threads, + "terminate when all threads have reached this load count"), + +#if FULL_SYSTEM + INIT_PARAM(itb, "Instruction TLB"), + INIT_PARAM(dtb, "Data TLB"), + INIT_PARAM(mem, "memory"), + INIT_PARAM(system, "system object"), + INIT_PARAM(cpu_id, "processor ID"), + INIT_PARAM(profile, ""), +#else + INIT_PARAM(workload, "processes to run"), +#endif // FULL_SYSTEM + + INIT_PARAM(clock, "clock speed"), + + INIT_PARAM(defer_registration, "defer system registration (for sampling)"), + INIT_PARAM(exitOnError, "exit on error"), + INIT_PARAM(function_trace, "Enable function trace"), + INIT_PARAM(function_trace_start, "Cycle to start function trace") + +END_INIT_SIM_OBJECT_PARAMS(O3Checker) + + +CREATE_SIM_OBJECT(O3Checker) +{ + O3Checker::Params *params = new O3Checker::Params(); + params->name = getInstanceName(); + params->numberOfThreads = 1; + params->max_insts_any_thread = 0; + params->max_insts_all_threads = 0; + params->max_loads_any_thread = 0; + params->max_loads_all_threads = 0; + params->exitOnError = exitOnError; + params->deferRegistration = defer_registration; + params->functionTrace = function_trace; + params->functionTraceStart = function_trace_start; + params->clock = clock; + // Hack to touch all parameters. Consider not deriving Checker + // from BaseCPU..it's not really a CPU in the end. + Counter temp; + temp = max_insts_any_thread; + temp = max_insts_all_threads; + temp = max_loads_any_thread; + temp = max_loads_all_threads; + +#if FULL_SYSTEM + params->itb = itb; + params->dtb = dtb; + params->mem = mem; + params->system = system; + params->cpu_id = cpu_id; + params->profile = profile; +#else + params->process = workload; +#endif + + O3Checker *cpu = new O3Checker(params); + return cpu; +} + +REGISTER_SIM_OBJECT("O3Checker", O3Checker) diff --cc src/cpu/o3/2bit_local_pred.cc index 3d6a78bed,000000000..77a45ea26 mode 100644,000000..100644 --- a/src/cpu/o3/2bit_local_pred.cc +++ b/src/cpu/o3/2bit_local_pred.cc @@@ -1,145 -1,0 +1,146 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include "base/intmath.hh" +#include "base/misc.hh" +#include "base/trace.hh" +#include "cpu/o3/2bit_local_pred.hh" + - DefaultBP::DefaultBP(unsigned _localPredictorSize, - unsigned _localCtrBits, - unsigned _instShiftAmt) ++LocalBP::LocalBP(unsigned _localPredictorSize, ++ unsigned _localCtrBits, ++ unsigned _instShiftAmt) + : localPredictorSize(_localPredictorSize), + localCtrBits(_localCtrBits), + instShiftAmt(_instShiftAmt) +{ + if (!isPowerOf2(localPredictorSize)) { + fatal("Invalid local predictor size!\n"); + } + + localPredictorSets = localPredictorSize / localCtrBits; + + if (!isPowerOf2(localPredictorSets)) { + fatal("Invalid number of local predictor sets! Check localCtrBits.\n"); + } + + // Setup the index mask. + indexMask = localPredictorSets - 1; + + DPRINTF(Fetch, "Branch predictor: index mask: %#x\n", indexMask); + + // Setup the array of counters for the local predictor. + localCtrs.resize(localPredictorSets); + + for (int i = 0; i < localPredictorSets; ++i) + localCtrs[i].setBits(_localCtrBits); + + DPRINTF(Fetch, "Branch predictor: local predictor size: %i\n", + localPredictorSize); + + DPRINTF(Fetch, "Branch predictor: local counter bits: %i\n", localCtrBits); + + DPRINTF(Fetch, "Branch predictor: instruction shift amount: %i\n", + instShiftAmt); +} + +void - DefaultBP::reset() ++LocalBP::reset() +{ + for (int i = 0; i < localPredictorSets; ++i) { + localCtrs[i].reset(); + } +} + +bool - DefaultBP::lookup(Addr &branch_addr) ++LocalBP::lookup(Addr &branch_addr, void * &bp_history) +{ + bool taken; - uint8_t local_prediction; ++ uint8_t counter_val; + unsigned local_predictor_idx = getLocalIndex(branch_addr); + + DPRINTF(Fetch, "Branch predictor: Looking up index %#x\n", + local_predictor_idx); + - local_prediction = localCtrs[local_predictor_idx].read(); ++ counter_val = localCtrs[local_predictor_idx].read(); + + DPRINTF(Fetch, "Branch predictor: prediction is %i.\n", - (int)local_prediction); ++ (int)counter_val); + - taken = getPrediction(local_prediction); ++ taken = getPrediction(counter_val); + +#if 0 + // Speculative update. + if (taken) { + DPRINTF(Fetch, "Branch predictor: Branch updated as taken.\n"); + localCtrs[local_predictor_idx].increment(); + } else { + DPRINTF(Fetch, "Branch predictor: Branch updated as not taken.\n"); + localCtrs[local_predictor_idx].decrement(); + } +#endif + + return taken; +} + +void - DefaultBP::update(Addr &branch_addr, bool taken) ++LocalBP::update(Addr &branch_addr, bool taken, void *bp_history) +{ ++ assert(bp_history == NULL); + unsigned local_predictor_idx; + + // Update the local predictor. + local_predictor_idx = getLocalIndex(branch_addr); + + DPRINTF(Fetch, "Branch predictor: Looking up index %#x\n", + local_predictor_idx); + + if (taken) { + DPRINTF(Fetch, "Branch predictor: Branch updated as taken.\n"); + localCtrs[local_predictor_idx].increment(); + } else { + DPRINTF(Fetch, "Branch predictor: Branch updated as not taken.\n"); + localCtrs[local_predictor_idx].decrement(); + } +} + +inline +bool - DefaultBP::getPrediction(uint8_t &count) ++LocalBP::getPrediction(uint8_t &count) +{ + // Get the MSB of the count + return (count >> (localCtrBits - 1)); +} + +inline +unsigned - DefaultBP::getLocalIndex(Addr &branch_addr) ++LocalBP::getLocalIndex(Addr &branch_addr) +{ + return (branch_addr >> instShiftAmt) & indexMask; +} diff --cc src/cpu/o3/2bit_local_pred.hh index 6e02a49be,000000000..0a2a71d3e mode 100644,000000..100644 --- a/src/cpu/o3/2bit_local_pred.hh +++ b/src/cpu/o3/2bit_local_pred.hh @@@ -1,101 -1,0 +1,111 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_2BIT_LOCAL_PRED_HH__ +#define __CPU_O3_2BIT_LOCAL_PRED_HH__ + +// For Addr type. +#include "arch/isa_traits.hh" +#include "cpu/o3/sat_counter.hh" + +#include + - class DefaultBP ++/** ++ * Implements a local predictor that uses the PC to index into a table of ++ * counters. Note that any time a pointer to the bp_history is given, it ++ * should be NULL using this predictor because it does not have any branch ++ * predictor state that needs to be recorded or updated; the update can be ++ * determined solely by the branch being taken or not taken. ++ */ ++class LocalBP +{ + public: + /** + * Default branch predictor constructor. + * @param localPredictorSize Size of the local predictor. + * @param localCtrBits Number of bits per counter. + * @param instShiftAmt Offset amount for instructions to ignore alignment. + */ - DefaultBP(unsigned localPredictorSize, unsigned localCtrBits, - unsigned instShiftAmt); ++ LocalBP(unsigned localPredictorSize, unsigned localCtrBits, ++ unsigned instShiftAmt); + + /** + * Looks up the given address in the branch predictor and returns + * a true/false value as to whether it is taken. + * @param branch_addr The address of the branch to look up. ++ * @param bp_history Pointer to any bp history state. + * @return Whether or not the branch is taken. + */ - bool lookup(Addr &branch_addr); ++ bool lookup(Addr &branch_addr, void * &bp_history); + + /** + * Updates the branch predictor with the actual result of a branch. + * @param branch_addr The address of the branch to update. + * @param taken Whether or not the branch was taken. + */ - void update(Addr &branch_addr, bool taken); ++ void update(Addr &branch_addr, bool taken, void *bp_history); ++ ++ void squash(void *bp_history) ++ { assert(bp_history == NULL); } + + void reset(); + + private: - + /** + * Returns the taken/not taken prediction given the value of the + * counter. + * @param count The value of the counter. + * @return The prediction based on the counter value. + */ + inline bool getPrediction(uint8_t &count); + + /** Calculates the local index based on the PC. */ + inline unsigned getLocalIndex(Addr &PC); + + /** Array of counters that make up the local predictor. */ + std::vector localCtrs; + + /** Size of the local predictor. */ + unsigned localPredictorSize; + + /** Number of sets. */ + unsigned localPredictorSets; + + /** Number of bits of the local predictor's counters. */ + unsigned localCtrBits; + + /** Number of bits to shift the PC when calculating index. */ + unsigned instShiftAmt; + + /** Mask to get index bits. */ + unsigned indexMask; +}; + +#endif // __CPU_O3_2BIT_LOCAL_PRED_HH__ diff --cc src/cpu/o3/alpha_cpu.hh index 4d889866a,000000000..2e5c856a8 mode 100644,000000..100644 --- a/src/cpu/o3/alpha_cpu.hh +++ b/src/cpu/o3/alpha_cpu.hh @@@ -1,444 -1,0 +1,525 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_ALPHA_FULL_CPU_HH__ +#define __CPU_O3_ALPHA_FULL_CPU_HH__ + +#include "arch/isa_traits.hh" +#include "cpu/exec_context.hh" +#include "cpu/o3/cpu.hh" +#include "sim/byteswap.hh" + +class EndQuiesceEvent; +namespace Kernel { + class Statistics; +}; + +class TranslatingPort; + ++/** ++ * AlphaFullCPU class. Derives from the FullO3CPU class, and ++ * implements all ISA and implementation specific functions of the ++ * CPU. This is the CPU class that is used for the SimObjects, and is ++ * what is given to the DynInsts. Most of its state exists in the ++ * FullO3CPU; the state is has is mainly for ISA specific ++ * functionality. ++ */ +template +class AlphaFullCPU : public FullO3CPU +{ + protected: + typedef TheISA::IntReg IntReg; + typedef TheISA::FloatReg FloatReg; + typedef TheISA::FloatRegBits FloatRegBits; + typedef TheISA::MiscReg MiscReg; + typedef TheISA::RegFile RegFile; + typedef TheISA::MiscRegFile MiscRegFile; + + public: + typedef O3ThreadState ImplState; + typedef O3ThreadState Thread; + typedef typename Impl::Params Params; + + /** Constructs an AlphaFullCPU with the given parameters. */ + AlphaFullCPU(Params *params); + ++ /** ++ * Derived ExecContext class for use with the AlphaFullCPU. It ++ * provides the interface for any external objects to access a ++ * single thread's state and some general CPU state. Any time ++ * external objects try to update state through this interface, ++ * the CPU will create an event to squash all in-flight ++ * instructions in order to ensure state is maintained correctly. ++ */ + class AlphaXC : public ExecContext + { + public: ++ /** Pointer to the CPU. */ + AlphaFullCPU *cpu; + ++ /** Pointer to the thread state that this XC corrseponds to. */ + O3ThreadState *thread; + ++ /** Returns a pointer to this CPU. */ + virtual BaseCPU *getCpuPtr() { return cpu; } + ++ /** Sets this CPU's ID. */ + virtual void setCpuId(int id) { cpu->cpu_id = id; } + ++ /** Reads this CPU's ID. */ + virtual int readCpuId() { return cpu->cpu_id; } + + virtual TranslatingPort *getMemPort() { return /*thread->port*/ NULL; } + +#if FULL_SYSTEM ++ /** Returns a pointer to the system. */ + virtual System *getSystemPtr() { return cpu->system; } + ++ /** Returns a pointer to physical memory. */ + virtual PhysicalMemory *getPhysMemPtr() { return cpu->physmem; } + ++ /** Returns a pointer to the ITB. */ + virtual AlphaITB *getITBPtr() { return cpu->itb; } + - virtual AlphaDTB * getDTBPtr() { return cpu->dtb; } ++ /** Returns a pointer to the DTB. */ ++ virtual AlphaDTB *getDTBPtr() { return cpu->dtb; } + ++ /** Returns a pointer to this thread's kernel statistics. */ + virtual Kernel::Statistics *getKernelStats() + { return thread->kernelStats; } +#else ++ /** Returns a pointer to this thread's process. */ + virtual Process *getProcessPtr() { return thread->process; } +#endif - ++ /** Returns this thread's status. */ + virtual Status status() const { return thread->status(); } + ++ /** Sets this thread's status. */ + virtual void setStatus(Status new_status) + { thread->setStatus(new_status); } + - /// Set the status to Active. Optional delay indicates number of - /// cycles to wait before beginning execution. ++ /** Set the status to Active. Optional delay indicates number of ++ * cycles to wait before beginning execution. */ + virtual void activate(int delay = 1); + - /// Set the status to Suspended. ++ /** Set the status to Suspended. */ + virtual void suspend(); + - /// Set the status to Unallocated. ++ /** Set the status to Unallocated. */ + virtual void deallocate(); + - /// Set the status to Halted. ++ /** Set the status to Halted. */ + virtual void halt(); + +#if FULL_SYSTEM ++ /** Dumps the function profiling information. ++ * @todo: Implement. ++ */ + virtual void dumpFuncProfile(); +#endif - ++ /** Takes over execution of a thread from another CPU. */ + virtual void takeOverFrom(ExecContext *old_context); + ++ /** Registers statistics associated with this XC. */ + virtual void regStats(const std::string &name); + ++ /** Serializes state. */ + virtual void serialize(std::ostream &os); ++ /** Unserializes state. */ + virtual void unserialize(Checkpoint *cp, const std::string §ion); + +#if FULL_SYSTEM ++ /** Returns pointer to the quiesce event. */ + virtual EndQuiesceEvent *getQuiesceEvent(); + ++ /** Reads the last tick that this thread was activated on. */ + virtual Tick readLastActivate(); ++ /** Reads the last tick that this thread was suspended on. */ + virtual Tick readLastSuspend(); + ++ /** Clears the function profiling information. */ + virtual void profileClear(); ++ /** Samples the function profiling information. */ + virtual void profileSample(); +#endif - ++ /** Returns this thread's ID number. */ + virtual int getThreadNum() { return thread->tid; } + ++ /** Returns the instruction this thread is currently committing. ++ * Only used when an instruction faults. ++ */ + virtual TheISA::MachInst getInst(); + ++ /** Copies the architectural registers from another XC into this XC. */ + virtual void copyArchRegs(ExecContext *xc); + ++ /** Resets all architectural registers to 0. */ + virtual void clearArchRegs(); + ++ /** Reads an integer register. */ + virtual uint64_t readIntReg(int reg_idx); + + virtual FloatReg readFloatReg(int reg_idx, int width); + + virtual FloatReg readFloatReg(int reg_idx); + + virtual FloatRegBits readFloatRegBits(int reg_idx, int width); + + virtual FloatRegBits readFloatRegBits(int reg_idx); + ++ /** Sets an integer register to a value. */ + virtual void setIntReg(int reg_idx, uint64_t val); + + virtual void setFloatReg(int reg_idx, FloatReg val, int width); + + virtual void setFloatReg(int reg_idx, FloatReg val); + + virtual void setFloatRegBits(int reg_idx, FloatRegBits val, int width); + + virtual void setFloatRegBits(int reg_idx, FloatRegBits val); + ++ /** Reads this thread's PC. */ + virtual uint64_t readPC() + { return cpu->readPC(thread->tid); } + ++ /** Sets this thread's PC. */ + virtual void setPC(uint64_t val); + ++ /** Reads this thread's next PC. */ + virtual uint64_t readNextPC() + { return cpu->readNextPC(thread->tid); } + ++ /** Sets this thread's next PC. */ + virtual void setNextPC(uint64_t val); + + virtual uint64_t readNextNPC() + { + panic("Alpha has no NextNPC!"); + return 0; + } + + virtual void setNextNPC(uint64_t val) + { panic("Alpha has no NextNPC!"); } + ++ /** Reads a miscellaneous register. */ + virtual MiscReg readMiscReg(int misc_reg) + { return cpu->readMiscReg(misc_reg, thread->tid); } + ++ /** Reads a misc. register, including any side-effects the ++ * read might have as defined by the architecture. */ + virtual MiscReg readMiscRegWithEffect(int misc_reg, Fault &fault) + { return cpu->readMiscRegWithEffect(misc_reg, fault, thread->tid); } + ++ /** Sets a misc. register. */ + virtual Fault setMiscReg(int misc_reg, const MiscReg &val); + ++ /** Sets a misc. register, including any side-effects the ++ * write might have as defined by the architecture. */ + virtual Fault setMiscRegWithEffect(int misc_reg, const MiscReg &val); + ++ /** Returns the number of consecutive store conditional failures. */ + // @todo: Figure out where these store cond failures should go. + virtual unsigned readStCondFailures() + { return thread->storeCondFailures; } + ++ /** Sets the number of consecutive store conditional failures. */ + virtual void setStCondFailures(unsigned sc_failures) + { thread->storeCondFailures = sc_failures; } + +#if FULL_SYSTEM ++ /** Returns if the thread is currently in PAL mode, based on ++ * the PC's value. */ + virtual bool inPalMode() + { return TheISA::PcPAL(cpu->readPC(thread->tid)); } +#endif - + // Only really makes sense for old CPU model. Lots of code + // outside the CPU still checks this function, so it will + // always return false to keep everything working. ++ /** Checks if the thread is misspeculating. Because it is ++ * very difficult to determine if the thread is ++ * misspeculating, this is set as false. */ + virtual bool misspeculating() { return false; } + +#if !FULL_SYSTEM ++ /** Gets a syscall argument by index. */ + virtual IntReg getSyscallArg(int i); + ++ /** Sets a syscall argument. */ + virtual void setSyscallArg(int i, IntReg val); + ++ /** Sets the syscall return value. */ + virtual void setSyscallReturn(SyscallReturn return_value); + ++ /** Executes a syscall in SE mode. */ + virtual void syscall(int64_t callnum) + { return cpu->syscall(callnum, thread->tid); } + ++ /** Reads the funcExeInst counter. */ + virtual Counter readFuncExeInst() { return thread->funcExeInst; } +#endif + virtual void changeRegFileContext(TheISA::RegFile::ContextParam param, + TheISA::RegFile::ContextVal val) + { panic("Not supported on Alpha!"); } + }; + +#if FULL_SYSTEM + /** ITB pointer. */ + AlphaITB *itb; + /** DTB pointer. */ + AlphaDTB *dtb; +#endif + + /** Registers statistics. */ + void regStats(); + +#if FULL_SYSTEM + /** Translates instruction requestion. */ + Fault translateInstReq(RequestPtr &req) + { + return itb->translate(req); + } + + /** Translates data read request. */ + Fault translateDataReadReq(RequestPtr &req) + { + return dtb->translate(req, false); + } + + /** Translates data write request. */ + Fault translateDataWriteReq(RequestPtr &req) + { + return dtb->translate(req, true); + } + +#else + /** Translates instruction requestion in syscall emulation mode. */ + Fault translateInstReq(RequestPtr &req) + { + int tid = req->getThreadNum(); + return this->thread[tid]->process->pTable->translate(req); + } + + /** Translates data read request in syscall emulation mode. */ + Fault translateDataReadReq(RequestPtr &req) + { + int tid = req->getThreadNum(); + return this->thread[tid]->process->pTable->translate(req); + } + + /** Translates data write request in syscall emulation mode. */ + Fault translateDataWriteReq(RequestPtr &req) + { + int tid = req->getThreadNum(); + return this->thread[tid]->process->pTable->translate(req); + } + +#endif ++ /** Reads a miscellaneous register. */ + MiscReg readMiscReg(int misc_reg, unsigned tid); + ++ /** Reads a misc. register, including any side effects the read ++ * might have as defined by the architecture. ++ */ + MiscReg readMiscRegWithEffect(int misc_reg, Fault &fault, unsigned tid); + ++ /** Sets a miscellaneous register. */ + Fault setMiscReg(int misc_reg, const MiscReg &val, unsigned tid); + ++ /** Sets a misc. register, including any side effects the write ++ * might have as defined by the architecture. ++ */ + Fault setMiscRegWithEffect(int misc_reg, const MiscReg &val, unsigned tid); + ++ /** Initiates a squash of all in-flight instructions for a given ++ * thread. The source of the squash is an external update of ++ * state through the XC. ++ */ + void squashFromXC(unsigned tid); + +#if FULL_SYSTEM ++ /** Posts an interrupt. */ + void post_interrupt(int int_num, int index); - ++ /** Reads the interrupt flag. */ + int readIntrFlag(); + /** Sets the interrupt flags. */ + void setIntrFlag(int val); + /** HW return from error interrupt. */ + Fault hwrei(unsigned tid); + /** Returns if a specific PC is a PAL mode PC. */ + bool inPalMode(uint64_t PC) + { return AlphaISA::PcPAL(PC); } + + /** Traps to handle given fault. */ + void trap(Fault fault, unsigned tid); + bool simPalCheck(int palFunc, unsigned tid); + + /** Processes any interrupts. */ + void processInterrupts(); + + /** Halts the CPU. */ + void halt() { panic("Halt not implemented!\n"); } +#endif + + +#if !FULL_SYSTEM + /** Executes a syscall. + * @todo: Determine if this needs to be virtual. + */ - void syscall(int64_t callnum, int thread_num); ++ void syscall(int64_t callnum, int tid); + /** Gets a syscall argument. */ + IntReg getSyscallArg(int i, int tid); + + /** Used to shift args for indirect syscall. */ + void setSyscallArg(int i, IntReg val, int tid); + + /** Sets the return value of a syscall. */ + void setSyscallReturn(SyscallReturn return_value, int tid); +#endif + + /** Read from memory function. */ + template + Fault read(RequestPtr &req, T &data) + { +#if 0 +#if FULL_SYSTEM && THE_ISA == ALPHA_ISA + if (req->flags & LOCKED) { + req->xc->setMiscReg(TheISA::Lock_Addr_DepTag, req->paddr); + req->xc->setMiscReg(TheISA::Lock_Flag_DepTag, true); + } +#endif +#endif + Fault error; + +#if FULL_SYSTEM + // @todo: Fix this LL/SC hack. + if (req->flags & LOCKED) { + lockAddr = req->paddr; + lockFlag = true; + } +#endif + + error = this->mem->read(req, data); + data = gtoh(data); + return error; + } + + /** CPU read function, forwards read to LSQ. */ + template + Fault read(RequestPtr &req, T &data, int load_idx) + { + return this->iew.ldstQueue.read(req, data, load_idx); + } + + /** Write to memory function. */ + template + Fault write(RequestPtr &req, T &data) + { +#if 0 +#if FULL_SYSTEM && THE_ISA == ALPHA_ISA + ExecContext *xc; + + // If this is a store conditional, act appropriately + if (req->flags & LOCKED) { + xc = req->xc; + + if (req->flags & UNCACHEABLE) { + // Don't update result register (see stq_c in isa_desc) + req->result = 2; + xc->setStCondFailures(0);//Needed? [RGD] + } else { + bool lock_flag = xc->readMiscReg(TheISA::Lock_Flag_DepTag); + Addr lock_addr = xc->readMiscReg(TheISA::Lock_Addr_DepTag); + req->result = lock_flag; + if (!lock_flag || + ((lock_addr & ~0xf) != (req->paddr & ~0xf))) { + xc->setMiscReg(TheISA::Lock_Flag_DepTag, false); + xc->setStCondFailures(xc->readStCondFailures() + 1); + if (((xc->readStCondFailures()) % 100000) == 0) { + std::cerr << "Warning: " + << xc->readStCondFailures() + << " consecutive store conditional failures " + << "on cpu " << req->xc->readCpuId() + << std::endl; + } + return NoFault; + } + else xc->setStCondFailures(0); + } + } + + // Need to clear any locked flags on other proccessors for + // this address. Only do this for succsful Store Conditionals + // and all other stores (WH64?). Unsuccessful Store + // Conditionals would have returned above, and wouldn't fall + // through. + for (int i = 0; i < this->system->execContexts.size(); i++){ + xc = this->system->execContexts[i]; + if ((xc->readMiscReg(TheISA::Lock_Addr_DepTag) & ~0xf) == + (req->paddr & ~0xf)) { + xc->setMiscReg(TheISA::Lock_Flag_DepTag, false); + } + } + +#endif +#endif + +#if FULL_SYSTEM + // @todo: Fix this LL/SC hack. + if (req->flags & LOCKED) { + if (req->flags & UNCACHEABLE) { + req->result = 2; + } else { + if (this->lockFlag) { + req->result = 1; + } else { + req->result = 0; + return NoFault; + } + } + } +#endif + + return this->mem->write(req, (T)htog(data)); + } + + /** CPU write function, forwards write to LSQ. */ + template + Fault write(RequestPtr &req, T &data, int store_idx) + { + return this->iew.ldstQueue.write(req, data, store_idx); + } + + Addr lockAddr; + ++ /** Temporary fix for the lock flag, works in the UP case. */ + bool lockFlag; +}; + +#endif // __CPU_O3_ALPHA_FULL_CPU_HH__ diff --cc src/cpu/o3/alpha_cpu_builder.cc index c91e2c7c9,000000000..1592261de mode 100644,000000..100644 --- a/src/cpu/o3/alpha_cpu_builder.cc +++ b/src/cpu/o3/alpha_cpu_builder.cc @@@ -1,410 -1,0 +1,413 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include + +#include "cpu/base.hh" +#include "cpu/o3/alpha_cpu.hh" +#include "cpu/o3/alpha_impl.hh" +#include "cpu/o3/alpha_params.hh" +#include "cpu/o3/fu_pool.hh" +#include "sim/builder.hh" + +class DerivAlphaFullCPU : public AlphaFullCPU +{ + public: + DerivAlphaFullCPU(AlphaSimpleParams *p) + : AlphaFullCPU(p) + { } +}; + +BEGIN_DECLARE_SIM_OBJECT_PARAMS(DerivAlphaFullCPU) + + Param clock; + Param numThreads; +Param activity; + +#if FULL_SYSTEM +SimObjectParam system; +Param cpu_id; +SimObjectParam itb; +SimObjectParam dtb; +#else +SimObjectVectorParam workload; +//SimObjectParam page_table; +#endif // FULL_SYSTEM + +SimObjectParam mem; + +SimObjectParam checker; + +Param max_insts_any_thread; +Param max_insts_all_threads; +Param max_loads_any_thread; +Param max_loads_all_threads; + +Param cachePorts; + +Param decodeToFetchDelay; +Param renameToFetchDelay; +Param iewToFetchDelay; +Param commitToFetchDelay; +Param fetchWidth; + +Param renameToDecodeDelay; +Param iewToDecodeDelay; +Param commitToDecodeDelay; +Param fetchToDecodeDelay; +Param decodeWidth; + +Param iewToRenameDelay; +Param commitToRenameDelay; +Param decodeToRenameDelay; +Param renameWidth; + +Param commitToIEWDelay; +Param renameToIEWDelay; +Param issueToExecuteDelay; +Param issueWidth; +Param executeWidth; +Param executeIntWidth; +Param executeFloatWidth; +Param executeBranchWidth; +Param executeMemoryWidth; +SimObjectParam fuPool; + +Param iewToCommitDelay; +Param renameToROBDelay; +Param commitWidth; +Param squashWidth; +Param trapLatency; +Param fetchTrapLatency; + ++Param predType; +Param localPredictorSize; +Param localCtrBits; +Param localHistoryTableSize; +Param localHistoryBits; +Param globalPredictorSize; +Param globalCtrBits; +Param globalHistoryBits; +Param choicePredictorSize; +Param choiceCtrBits; + +Param BTBEntries; +Param BTBTagSize; + +Param RASSize; + +Param LQEntries; +Param SQEntries; +Param LFSTSize; +Param SSITSize; + +Param numPhysIntRegs; +Param numPhysFloatRegs; +Param numIQEntries; +Param numROBEntries; + +Param smtNumFetchingThreads; +Param smtFetchPolicy; +Param smtLSQPolicy; +Param smtLSQThreshold; +Param smtIQPolicy; +Param smtIQThreshold; +Param smtROBPolicy; +Param smtROBThreshold; +Param smtCommitPolicy; + +Param instShiftAmt; + +Param defer_registration; + +Param function_trace; +Param function_trace_start; + +END_DECLARE_SIM_OBJECT_PARAMS(DerivAlphaFullCPU) + +BEGIN_INIT_SIM_OBJECT_PARAMS(DerivAlphaFullCPU) + + INIT_PARAM(clock, "clock speed"), + INIT_PARAM(numThreads, "number of HW thread contexts"), + INIT_PARAM_DFLT(activity, "Initial activity count", 0), + +#if FULL_SYSTEM + INIT_PARAM(system, "System object"), + INIT_PARAM(cpu_id, "processor ID"), + INIT_PARAM(itb, "Instruction translation buffer"), + INIT_PARAM(dtb, "Data translation buffer"), +#else + INIT_PARAM(workload, "Processes to run"), +// INIT_PARAM(page_table, "Page table"), +#endif // FULL_SYSTEM + + INIT_PARAM(mem, "Memory"), + + INIT_PARAM_DFLT(checker, "Checker CPU", NULL), + + INIT_PARAM_DFLT(max_insts_any_thread, + "Terminate when any thread reaches this inst count", + 0), + INIT_PARAM_DFLT(max_insts_all_threads, + "Terminate when all threads have reached" + "this inst count", + 0), + INIT_PARAM_DFLT(max_loads_any_thread, + "Terminate when any thread reaches this load count", + 0), + INIT_PARAM_DFLT(max_loads_all_threads, + "Terminate when all threads have reached this load" + "count", + 0), + + INIT_PARAM_DFLT(cachePorts, "Cache Ports", 200), + + INIT_PARAM(decodeToFetchDelay, "Decode to fetch delay"), + INIT_PARAM(renameToFetchDelay, "Rename to fetch delay"), + INIT_PARAM(iewToFetchDelay, "Issue/Execute/Writeback to fetch" + "delay"), + INIT_PARAM(commitToFetchDelay, "Commit to fetch delay"), + INIT_PARAM(fetchWidth, "Fetch width"), + INIT_PARAM(renameToDecodeDelay, "Rename to decode delay"), + INIT_PARAM(iewToDecodeDelay, "Issue/Execute/Writeback to decode" + "delay"), + INIT_PARAM(commitToDecodeDelay, "Commit to decode delay"), + INIT_PARAM(fetchToDecodeDelay, "Fetch to decode delay"), + INIT_PARAM(decodeWidth, "Decode width"), + + INIT_PARAM(iewToRenameDelay, "Issue/Execute/Writeback to rename" + "delay"), + INIT_PARAM(commitToRenameDelay, "Commit to rename delay"), + INIT_PARAM(decodeToRenameDelay, "Decode to rename delay"), + INIT_PARAM(renameWidth, "Rename width"), + + INIT_PARAM(commitToIEWDelay, "Commit to " + "Issue/Execute/Writeback delay"), + INIT_PARAM(renameToIEWDelay, "Rename to " + "Issue/Execute/Writeback delay"), + INIT_PARAM(issueToExecuteDelay, "Issue to execute delay (internal" + "to the IEW stage)"), + INIT_PARAM(issueWidth, "Issue width"), + INIT_PARAM(executeWidth, "Execute width"), + INIT_PARAM(executeIntWidth, "Integer execute width"), + INIT_PARAM(executeFloatWidth, "Floating point execute width"), + INIT_PARAM(executeBranchWidth, "Branch execute width"), + INIT_PARAM(executeMemoryWidth, "Memory execute width"), + INIT_PARAM_DFLT(fuPool, "Functional unit pool", NULL), + + INIT_PARAM(iewToCommitDelay, "Issue/Execute/Writeback to commit " + "delay"), + INIT_PARAM(renameToROBDelay, "Rename to reorder buffer delay"), + INIT_PARAM(commitWidth, "Commit width"), + INIT_PARAM(squashWidth, "Squash width"), + INIT_PARAM_DFLT(trapLatency, "Number of cycles before the trap is handled", 6), + INIT_PARAM_DFLT(fetchTrapLatency, "Number of cycles before the fetch trap is handled", 12), + ++ INIT_PARAM(predType, "Type of branch predictor ('local', 'tournament')"), + INIT_PARAM(localPredictorSize, "Size of local predictor"), + INIT_PARAM(localCtrBits, "Bits per counter"), + INIT_PARAM(localHistoryTableSize, "Size of local history table"), + INIT_PARAM(localHistoryBits, "Bits for the local history"), + INIT_PARAM(globalPredictorSize, "Size of global predictor"), + INIT_PARAM(globalCtrBits, "Bits per counter"), + INIT_PARAM(globalHistoryBits, "Bits of history"), + INIT_PARAM(choicePredictorSize, "Size of choice predictor"), + INIT_PARAM(choiceCtrBits, "Bits of choice counters"), + + INIT_PARAM(BTBEntries, "Number of BTB entries"), + INIT_PARAM(BTBTagSize, "Size of the BTB tags, in bits"), + + INIT_PARAM(RASSize, "RAS size"), + + INIT_PARAM(LQEntries, "Number of load queue entries"), + INIT_PARAM(SQEntries, "Number of store queue entries"), + INIT_PARAM(LFSTSize, "Last fetched store table size"), + INIT_PARAM(SSITSize, "Store set ID table size"), + + INIT_PARAM(numPhysIntRegs, "Number of physical integer registers"), + INIT_PARAM(numPhysFloatRegs, "Number of physical floating point " + "registers"), + INIT_PARAM(numIQEntries, "Number of instruction queue entries"), + INIT_PARAM(numROBEntries, "Number of reorder buffer entries"), + + INIT_PARAM_DFLT(smtNumFetchingThreads, "SMT Number of Fetching Threads", 1), + INIT_PARAM_DFLT(smtFetchPolicy, "SMT Fetch Policy", "SingleThread"), + INIT_PARAM_DFLT(smtLSQPolicy, "SMT LSQ Sharing Policy", "Partitioned"), + INIT_PARAM_DFLT(smtLSQThreshold,"SMT LSQ Threshold", 100), + INIT_PARAM_DFLT(smtIQPolicy, "SMT IQ Policy", "Partitioned"), + INIT_PARAM_DFLT(smtIQThreshold, "SMT IQ Threshold", 100), + INIT_PARAM_DFLT(smtROBPolicy, "SMT ROB Sharing Policy", "Partitioned"), + INIT_PARAM_DFLT(smtROBThreshold,"SMT ROB Threshold", 100), + INIT_PARAM_DFLT(smtCommitPolicy,"SMT Commit Fetch Policy", "RoundRobin"), + + INIT_PARAM(instShiftAmt, "Number of bits to shift instructions by"), + INIT_PARAM(defer_registration, "defer system registration (for sampling)"), + + INIT_PARAM(function_trace, "Enable function trace"), + INIT_PARAM(function_trace_start, "Cycle to start function trace") + +END_INIT_SIM_OBJECT_PARAMS(DerivAlphaFullCPU) + +CREATE_SIM_OBJECT(DerivAlphaFullCPU) +{ + DerivAlphaFullCPU *cpu; + +#if FULL_SYSTEM + // Full-system only supports a single thread for the moment. + int actual_num_threads = 1; +#else + // In non-full-system mode, we infer the number of threads from + // the workload if it's not explicitly specified. + int actual_num_threads = + numThreads.isValid() ? numThreads : workload.size(); + + if (workload.size() == 0) { + fatal("Must specify at least one workload!"); + } + +#endif + + AlphaSimpleParams *params = new AlphaSimpleParams; + + params->clock = clock; + + params->name = getInstanceName(); + params->numberOfThreads = actual_num_threads; + params->activity = activity; + +#if FULL_SYSTEM + params->system = system; + params->cpu_id = cpu_id; + params->itb = itb; + params->dtb = dtb; +#else + params->workload = workload; +// params->pTable = page_table; +#endif // FULL_SYSTEM + + params->mem = mem; + + params->checker = checker; + + params->max_insts_any_thread = max_insts_any_thread; + params->max_insts_all_threads = max_insts_all_threads; + params->max_loads_any_thread = max_loads_any_thread; + params->max_loads_all_threads = max_loads_all_threads; + + // + // Caches + // + params->cachePorts = cachePorts; + + params->decodeToFetchDelay = decodeToFetchDelay; + params->renameToFetchDelay = renameToFetchDelay; + params->iewToFetchDelay = iewToFetchDelay; + params->commitToFetchDelay = commitToFetchDelay; + params->fetchWidth = fetchWidth; + + params->renameToDecodeDelay = renameToDecodeDelay; + params->iewToDecodeDelay = iewToDecodeDelay; + params->commitToDecodeDelay = commitToDecodeDelay; + params->fetchToDecodeDelay = fetchToDecodeDelay; + params->decodeWidth = decodeWidth; + + params->iewToRenameDelay = iewToRenameDelay; + params->commitToRenameDelay = commitToRenameDelay; + params->decodeToRenameDelay = decodeToRenameDelay; + params->renameWidth = renameWidth; + + params->commitToIEWDelay = commitToIEWDelay; + params->renameToIEWDelay = renameToIEWDelay; + params->issueToExecuteDelay = issueToExecuteDelay; + params->issueWidth = issueWidth; + params->executeWidth = executeWidth; + params->executeIntWidth = executeIntWidth; + params->executeFloatWidth = executeFloatWidth; + params->executeBranchWidth = executeBranchWidth; + params->executeMemoryWidth = executeMemoryWidth; + params->fuPool = fuPool; + + params->iewToCommitDelay = iewToCommitDelay; + params->renameToROBDelay = renameToROBDelay; + params->commitWidth = commitWidth; + params->squashWidth = squashWidth; + params->trapLatency = trapLatency; + params->fetchTrapLatency = fetchTrapLatency; + ++ params->predType = predType; + params->localPredictorSize = localPredictorSize; + params->localCtrBits = localCtrBits; + params->localHistoryTableSize = localHistoryTableSize; + params->localHistoryBits = localHistoryBits; + params->globalPredictorSize = globalPredictorSize; + params->globalCtrBits = globalCtrBits; + params->globalHistoryBits = globalHistoryBits; + params->choicePredictorSize = choicePredictorSize; + params->choiceCtrBits = choiceCtrBits; + + params->BTBEntries = BTBEntries; + params->BTBTagSize = BTBTagSize; + + params->RASSize = RASSize; + + params->LQEntries = LQEntries; + params->SQEntries = SQEntries; + + params->SSITSize = SSITSize; + params->LFSTSize = LFSTSize; + + params->numPhysIntRegs = numPhysIntRegs; + params->numPhysFloatRegs = numPhysFloatRegs; + params->numIQEntries = numIQEntries; + params->numROBEntries = numROBEntries; + + params->smtNumFetchingThreads = smtNumFetchingThreads; + params->smtFetchPolicy = smtFetchPolicy; + params->smtIQPolicy = smtIQPolicy; + params->smtLSQPolicy = smtLSQPolicy; + params->smtLSQThreshold = smtLSQThreshold; + params->smtROBPolicy = smtROBPolicy; + params->smtROBThreshold = smtROBThreshold; + params->smtCommitPolicy = smtCommitPolicy; + + params->instShiftAmt = 2; + + params->deferRegistration = defer_registration; + + params->functionTrace = function_trace; + params->functionTraceStart = function_trace_start; + + cpu = new DerivAlphaFullCPU(params); + + return cpu; +} + +REGISTER_SIM_OBJECT("DerivAlphaFullCPU", DerivAlphaFullCPU) + diff --cc src/cpu/o3/alpha_cpu_impl.hh index 6893f8c64,000000000..ad4401f7e mode 100644,000000..100644 --- a/src/cpu/o3/alpha_cpu_impl.hh +++ b/src/cpu/o3/alpha_cpu_impl.hh @@@ -1,809 -1,0 +1,815 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include "arch/alpha/faults.hh" +#include "base/cprintf.hh" +#include "base/statistics.hh" +#include "base/timebuf.hh" +#include "cpu/checker/exec_context.hh" +#include "sim/sim_events.hh" +#include "sim/stats.hh" + +#include "cpu/o3/alpha_cpu.hh" +#include "cpu/o3/alpha_params.hh" +#include "cpu/o3/comm.hh" +#include "cpu/o3/thread_state.hh" + +#if FULL_SYSTEM +#include "arch/alpha/osfpal.hh" +#include "arch/isa_traits.hh" +#include "cpu/quiesce_event.hh" +#include "kern/kernel_stats.hh" +#endif + +using namespace TheISA; + +template +AlphaFullCPU::AlphaFullCPU(Params *params) +#if FULL_SYSTEM + : FullO3CPU(params), itb(params->itb), dtb(params->dtb) +#else + : FullO3CPU(params) +#endif +{ + DPRINTF(FullCPU, "AlphaFullCPU: Creating AlphaFullCPU object.\n"); + ++ // Setup any thread state. + this->thread.resize(this->numThreads); + + for (int i = 0; i < this->numThreads; ++i) { +#if FULL_SYSTEM ++ // SMT is not supported in FS mode yet. + assert(this->numThreads == 1); + this->thread[i] = new Thread(this, 0, params->mem); + this->thread[i]->setStatus(ExecContext::Suspended); +#else + if (i < params->workload.size()) { + DPRINTF(FullCPU, "FullCPU: Workload[%i] process is %#x", + i, this->thread[i]); + this->thread[i] = new Thread(this, i, params->workload[i], i); + + this->thread[i]->setStatus(ExecContext::Suspended); + //usedTids[i] = true; + //threadMap[i] = i; + } else { + //Allocate Empty execution context so M5 can use later + //when scheduling threads to CPU + Process* dummy_proc = NULL; + + this->thread[i] = new Thread(this, i, dummy_proc, i); + //usedTids[i] = false; + } +#endif // !FULL_SYSTEM + - this->thread[i]->numInst = 0; - + ExecContext *xc_proxy; + - AlphaXC *alpha_xc_proxy = new AlphaXC; ++ // Setup the XC that will serve as the interface to the threads/CPU. ++ AlphaXC *alpha_xc = new AlphaXC; + ++ // If we're using a checker, then the XC should be the ++ // CheckerExecContext. + if (params->checker) { - xc_proxy = new CheckerExecContext(alpha_xc_proxy, this->checker); ++ xc_proxy = new CheckerExecContext( ++ alpha_xc, this->checker); + } else { - xc_proxy = alpha_xc_proxy; ++ xc_proxy = alpha_xc; + } + - alpha_xc_proxy->cpu = this; - alpha_xc_proxy->thread = this->thread[i]; ++ alpha_xc->cpu = this; ++ alpha_xc->thread = this->thread[i]; + +#if FULL_SYSTEM ++ // Setup quiesce event. + this->thread[i]->quiesceEvent = + new EndQuiesceEvent(xc_proxy); + this->thread[i]->lastActivate = 0; + this->thread[i]->lastSuspend = 0; +#endif ++ // Give the thread the XC. + this->thread[i]->xcProxy = xc_proxy; + ++ // Add the XC to the CPU's list of XC's. + this->execContexts.push_back(xc_proxy); + } + + + for (int i=0; i < this->numThreads; i++) { + this->thread[i]->funcExeInst = 0; + } + + // Sets CPU pointers. These must be set at this level because the CPU + // pointers are defined to be the highest level of CPU class. + this->fetch.setCPU(this); + this->decode.setCPU(this); + this->rename.setCPU(this); + this->iew.setCPU(this); + this->commit.setCPU(this); + + this->rob.setCPU(this); + this->regFile.setCPU(this); + + lockAddr = 0; + lockFlag = false; +} + +template +void +AlphaFullCPU::regStats() +{ + // Register stats for everything that has stats. + this->fullCPURegStats(); + this->fetch.regStats(); + this->decode.regStats(); + this->rename.regStats(); + this->iew.regStats(); + this->commit.regStats(); +} + +#if FULL_SYSTEM +template +void +AlphaFullCPU::AlphaXC::dumpFuncProfile() +{ + // Currently not supported +} +#endif + +template +void +AlphaFullCPU::AlphaXC::takeOverFrom(ExecContext *old_context) +{ + // some things should already be set up + assert(getMemPort() == old_context->getMemPort()); +#if FULL_SYSTEM + assert(getSystemPtr() == old_context->getSystemPtr()); +#else + assert(getProcessPtr() == old_context->getProcessPtr()); +#endif + + // copy over functional state + setStatus(old_context->status()); + copyArchRegs(old_context); + setCpuId(old_context->readCpuId()); ++ +#if !FULL_SYSTEM + thread->funcExeInst = old_context->readFuncExeInst(); +#else + EndQuiesceEvent *other_quiesce = old_context->getQuiesceEvent(); + if (other_quiesce) { + // Point the quiesce event's XC at this XC so that it wakes up + // the proper CPU. + other_quiesce->xc = this; + } + if (thread->quiesceEvent) { + thread->quiesceEvent->xc = this; + } + + // Transfer kernel stats from one CPU to the other. + thread->kernelStats = old_context->getKernelStats(); +// storeCondFailures = 0; + cpu->lockFlag = false; +#endif + + old_context->setStatus(ExecContext::Unallocated); + + thread->inSyscall = false; + thread->trapPending = false; +} + +template +void +AlphaFullCPU::AlphaXC::activate(int delay) +{ + DPRINTF(FullCPU, "Calling activate on AlphaXC\n"); + + if (thread->status() == ExecContext::Active) + return; + +#if FULL_SYSTEM + thread->lastActivate = curTick; +#endif + + if (thread->status() == ExecContext::Unallocated) { + cpu->activateWhenReady(thread->tid); + return; + } + + thread->setStatus(ExecContext::Active); + + // status() == Suspended + cpu->activateContext(thread->tid, delay); +} + +template +void +AlphaFullCPU::AlphaXC::suspend() +{ + DPRINTF(FullCPU, "Calling suspend on AlphaXC\n"); + + if (thread->status() == ExecContext::Suspended) + return; + +#if FULL_SYSTEM + thread->lastActivate = curTick; + thread->lastSuspend = curTick; +#endif +/* +#if FULL_SYSTEM + // Don't change the status from active if there are pending interrupts + if (cpu->check_interrupts()) { + assert(status() == ExecContext::Active); + return; + } +#endif +*/ + thread->setStatus(ExecContext::Suspended); + cpu->suspendContext(thread->tid); +} + +template +void +AlphaFullCPU::AlphaXC::deallocate() +{ + DPRINTF(FullCPU, "Calling deallocate on AlphaXC\n"); + + if (thread->status() == ExecContext::Unallocated) + return; + + thread->setStatus(ExecContext::Unallocated); + cpu->deallocateContext(thread->tid); +} + +template +void +AlphaFullCPU::AlphaXC::halt() +{ + DPRINTF(FullCPU, "Calling halt on AlphaXC\n"); + + if (thread->status() == ExecContext::Halted) + return; + + thread->setStatus(ExecContext::Halted); + cpu->haltContext(thread->tid); +} + +template +void +AlphaFullCPU::AlphaXC::regStats(const std::string &name) +{ +#if FULL_SYSTEM + thread->kernelStats = new Kernel::Statistics(cpu->system); + thread->kernelStats->regStats(name + ".kern"); +#endif +} + +template +void +AlphaFullCPU::AlphaXC::serialize(std::ostream &os) +{ +#if FULL_SYSTEM + if (thread->kernelStats) + thread->kernelStats->serialize(os); +#endif + +} + +template +void +AlphaFullCPU::AlphaXC::unserialize(Checkpoint *cp, const std::string §ion) +{ +#if FULL_SYSTEM + if (thread->kernelStats) + thread->kernelStats->unserialize(cp, section); +#endif + +} + +#if FULL_SYSTEM +template +EndQuiesceEvent * +AlphaFullCPU::AlphaXC::getQuiesceEvent() +{ + return thread->quiesceEvent; +} + +template +Tick +AlphaFullCPU::AlphaXC::readLastActivate() +{ + return thread->lastActivate; +} + +template +Tick +AlphaFullCPU::AlphaXC::readLastSuspend() +{ + return thread->lastSuspend; +} + +template +void +AlphaFullCPU::AlphaXC::profileClear() +{} + +template +void +AlphaFullCPU::AlphaXC::profileSample() +{} +#endif + +template +TheISA::MachInst +AlphaFullCPU::AlphaXC:: getInst() +{ + return thread->inst; +} + +template +void +AlphaFullCPU::AlphaXC::copyArchRegs(ExecContext *xc) +{ + // This function will mess things up unless the ROB is empty and + // there are no instructions in the pipeline. + unsigned tid = thread->tid; + PhysRegIndex renamed_reg; + + // First loop through the integer registers. + for (int i = 0; i < AlphaISA::NumIntRegs; ++i) { + renamed_reg = cpu->renameMap[tid].lookup(i); + + DPRINTF(FullCPU, "FullCPU: Copying over register %i, had data %lli, " + "now has data %lli.\n", + renamed_reg, cpu->readIntReg(renamed_reg), + xc->readIntReg(i)); + + cpu->setIntReg(renamed_reg, xc->readIntReg(i)); + } + + // Then loop through the floating point registers. + for (int i = 0; i < AlphaISA::NumFloatRegs; ++i) { + renamed_reg = cpu->renameMap[tid].lookup(i + AlphaISA::FP_Base_DepTag); + cpu->setFloatRegBits(renamed_reg, + xc->readFloatRegBits(i)); + } + + // Copy the misc regs. + copyMiscRegs(xc, this); + + // Then finally set the PC and the next PC. + cpu->setPC(xc->readPC(), tid); + cpu->setNextPC(xc->readNextPC(), tid); +#if !FULL_SYSTEM + this->thread->funcExeInst = xc->readFuncExeInst(); +#endif +} + +template +void +AlphaFullCPU::AlphaXC::clearArchRegs() +{} + +template +uint64_t +AlphaFullCPU::AlphaXC::readIntReg(int reg_idx) +{ - DPRINTF(Fault, "Reading int register through the XC!\n"); + return cpu->readArchIntReg(reg_idx, thread->tid); +} + +template +FloatReg +AlphaFullCPU::AlphaXC::readFloatReg(int reg_idx, int width) +{ - DPRINTF(Fault, "Reading float register through the XC!\n"); + switch(width) { + case 32: + return cpu->readArchFloatRegSingle(reg_idx, thread->tid); + case 64: + return cpu->readArchFloatRegDouble(reg_idx, thread->tid); + default: + panic("Unsupported width!"); + return 0; + } +} + +template +FloatReg +AlphaFullCPU::AlphaXC::readFloatReg(int reg_idx) +{ - DPRINTF(Fault, "Reading float register through the XC!\n"); + return cpu->readArchFloatRegSingle(reg_idx, thread->tid); +} + +template +FloatRegBits +AlphaFullCPU::AlphaXC::readFloatRegBits(int reg_idx, int width) +{ + DPRINTF(Fault, "Reading floatint register through the XC!\n"); + return cpu->readArchFloatRegInt(reg_idx, thread->tid); +} + +template +FloatRegBits +AlphaFullCPU::AlphaXC::readFloatRegBits(int reg_idx) +{ - DPRINTF(Fault, "Reading floatint register through the XC!\n"); + return cpu->readArchFloatRegInt(reg_idx, thread->tid); +} + +template +void +AlphaFullCPU::AlphaXC::setIntReg(int reg_idx, uint64_t val) +{ - DPRINTF(Fault, "Setting int register through the XC!\n"); + cpu->setArchIntReg(reg_idx, val, thread->tid); + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } +} + +template +void +AlphaFullCPU::AlphaXC::setFloatReg(int reg_idx, FloatReg val, int width) +{ - DPRINTF(Fault, "Setting float register through the XC!\n"); + switch(width) { + case 32: + cpu->setArchFloatRegSingle(reg_idx, val, thread->tid); + break; + case 64: + cpu->setArchFloatRegDouble(reg_idx, val, thread->tid); + break; + } + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } +} + +template +void +AlphaFullCPU::AlphaXC::setFloatReg(int reg_idx, FloatReg val) +{ - DPRINTF(Fault, "Setting float register through the XC!\n"); + cpu->setArchFloatRegSingle(reg_idx, val, thread->tid); + + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } +} + +template +void +AlphaFullCPU::AlphaXC::setFloatRegBits(int reg_idx, FloatRegBits val, + int width) +{ + DPRINTF(Fault, "Setting floatint register through the XC!\n"); + cpu->setArchFloatRegInt(reg_idx, val, thread->tid); + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } +} + +template +void +AlphaFullCPU::AlphaXC::setFloatRegBits(int reg_idx, FloatRegBits val) +{ - DPRINTF(Fault, "Setting floatint register through the XC!\n"); + cpu->setArchFloatRegInt(reg_idx, val, thread->tid); + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } +} + +template +void +AlphaFullCPU::AlphaXC::setPC(uint64_t val) +{ + cpu->setPC(val, thread->tid); + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } +} + +template +void +AlphaFullCPU::AlphaXC::setNextPC(uint64_t val) +{ + cpu->setNextPC(val, thread->tid); + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } +} + +template +Fault +AlphaFullCPU::AlphaXC::setMiscReg(int misc_reg, const MiscReg &val) +{ - DPRINTF(Fault, "Setting misc register through the XC!\n"); - + Fault ret_fault = cpu->setMiscReg(misc_reg, val, thread->tid); + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } + + return ret_fault; +} + +template +Fault - AlphaFullCPU::AlphaXC::setMiscRegWithEffect(int misc_reg, const MiscReg &val) ++AlphaFullCPU::AlphaXC::setMiscRegWithEffect(int misc_reg, ++ const MiscReg &val) +{ - DPRINTF(Fault, "Setting misc register through the XC!\n"); - + Fault ret_fault = cpu->setMiscRegWithEffect(misc_reg, val, thread->tid); + ++ // Squash if we're not already in a state update mode. + if (!thread->trapPending && !thread->inSyscall) { + cpu->squashFromXC(thread->tid); + } + + return ret_fault; +} + +#if !FULL_SYSTEM + +template +TheISA::IntReg +AlphaFullCPU::AlphaXC::getSyscallArg(int i) +{ + return cpu->getSyscallArg(i, thread->tid); +} + +template +void +AlphaFullCPU::AlphaXC::setSyscallArg(int i, IntReg val) +{ + cpu->setSyscallArg(i, val, thread->tid); +} + +template +void +AlphaFullCPU::AlphaXC::setSyscallReturn(SyscallReturn return_value) +{ + cpu->setSyscallReturn(return_value, thread->tid); +} + +#endif // FULL_SYSTEM + +template +MiscReg +AlphaFullCPU::readMiscReg(int misc_reg, unsigned tid) +{ + return this->regFile.readMiscReg(misc_reg, tid); +} + +template +MiscReg +AlphaFullCPU::readMiscRegWithEffect(int misc_reg, Fault &fault, + unsigned tid) +{ + return this->regFile.readMiscRegWithEffect(misc_reg, fault, tid); +} + +template +Fault +AlphaFullCPU::setMiscReg(int misc_reg, const MiscReg &val, unsigned tid) +{ + return this->regFile.setMiscReg(misc_reg, val, tid); +} + +template +Fault +AlphaFullCPU::setMiscRegWithEffect(int misc_reg, const MiscReg &val, + unsigned tid) +{ + return this->regFile.setMiscRegWithEffect(misc_reg, val, tid); +} + +template +void +AlphaFullCPU::squashFromXC(unsigned tid) +{ + this->thread[tid]->inSyscall = true; + this->commit.generateXCEvent(tid); +} + +#if FULL_SYSTEM + +template +void +AlphaFullCPU::post_interrupt(int int_num, int index) +{ + BaseCPU::post_interrupt(int_num, index); + + if (this->thread[0]->status() == ExecContext::Suspended) { + DPRINTF(IPI,"Suspended Processor awoke\n"); - // xcProxies[0]->activate(); + this->execContexts[0]->activate(); + } +} + +template +int +AlphaFullCPU::readIntrFlag() +{ + return this->regFile.readIntrFlag(); +} + +template +void +AlphaFullCPU::setIntrFlag(int val) +{ + this->regFile.setIntrFlag(val); +} + +template +Fault +AlphaFullCPU::hwrei(unsigned tid) +{ + // Need to clear the lock flag upon returning from an interrupt. + this->lockFlag = false; + + this->thread[tid]->kernelStats->hwrei(); + + this->checkInterrupts = true; + + // FIXME: XXX check for interrupts? XXX + return NoFault; +} + +template +bool +AlphaFullCPU::simPalCheck(int palFunc, unsigned tid) +{ + if (this->thread[tid]->kernelStats) + this->thread[tid]->kernelStats->callpal(palFunc, + this->execContexts[tid]); + + switch (palFunc) { + case PAL::halt: + halt(); + if (--System::numSystemsRunning == 0) + new SimExitEvent("all cpus halted"); + break; + + case PAL::bpt: + case PAL::bugchk: + if (this->system->breakpoint()) + return false; + break; + } + + return true; +} + +template +void +AlphaFullCPU::trap(Fault fault, unsigned tid) +{ ++ // Pass the thread's XC into the invoke method. + fault->invoke(this->execContexts[tid]); +} + +template +void +AlphaFullCPU::processInterrupts() +{ + // Check for interrupts here. For now can copy the code that + // exists within isa_fullsys_traits.hh. Also assume that thread 0 + // is the one that handles the interrupts. + // @todo: Possibly consolidate the interrupt checking code. + // @todo: Allow other threads to handle interrupts. + + // Check if there are any outstanding interrupts + //Handle the interrupts + int ipl = 0; + int summary = 0; + + this->checkInterrupts = false; + + if (this->readMiscReg(IPR_ASTRR, 0)) + panic("asynchronous traps not implemented\n"); + + if (this->readMiscReg(IPR_SIRR, 0)) { + for (int i = INTLEVEL_SOFTWARE_MIN; + i < INTLEVEL_SOFTWARE_MAX; i++) { + if (this->readMiscReg(IPR_SIRR, 0) & (ULL(1) << i)) { + // See table 4-19 of the 21164 hardware reference + ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1; + summary |= (ULL(1) << i); + } + } + } + + uint64_t interrupts = this->intr_status(); + + if (interrupts) { + for (int i = INTLEVEL_EXTERNAL_MIN; + i < INTLEVEL_EXTERNAL_MAX; i++) { + if (interrupts & (ULL(1) << i)) { + // See table 4-19 of the 21164 hardware reference + ipl = i; + summary |= (ULL(1) << i); + } + } + } + + if (ipl && ipl > this->readMiscReg(IPR_IPLR, 0)) { + this->setMiscReg(IPR_ISR, summary, 0); + this->setMiscReg(IPR_INTID, ipl, 0); ++ // Checker needs to know these two registers were updated. + if (this->checker) { + this->checker->cpuXCBase()->setMiscReg(IPR_ISR, summary); + this->checker->cpuXCBase()->setMiscReg(IPR_INTID, ipl); + } + this->trap(Fault(new InterruptFault), 0); + DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n", + this->readMiscReg(IPR_IPLR, 0), ipl, summary); + } +} + +#endif // FULL_SYSTEM + +#if !FULL_SYSTEM + +template +void +AlphaFullCPU::syscall(int64_t callnum, int tid) +{ + DPRINTF(FullCPU, "AlphaFullCPU: [tid:%i] Executing syscall().\n\n", tid); + + DPRINTF(Activity,"Activity: syscall() called.\n"); + + // Temporarily increase this by one to account for the syscall + // instruction. + ++(this->thread[tid]->funcExeInst); + + // Execute the actual syscall. + this->thread[tid]->syscall(callnum); + + // Decrease funcExeInst by one as the normal commit will handle + // incrementing it. + --(this->thread[tid]->funcExeInst); +} + +template +TheISA::IntReg +AlphaFullCPU::getSyscallArg(int i, int tid) +{ + return this->readArchIntReg(AlphaISA::ArgumentReg0 + i, tid); +} + +template +void +AlphaFullCPU::setSyscallArg(int i, IntReg val, int tid) +{ + this->setArchIntReg(AlphaISA::ArgumentReg0 + i, val, tid); +} + +template +void +AlphaFullCPU::setSyscallReturn(SyscallReturn return_value, int tid) +{ + // check for error condition. Alpha syscall convention is to + // indicate success/failure in reg a3 (r19) and put the + // return value itself in the standard return value reg (v0). + if (return_value.successful()) { + // no error + this->setArchIntReg(SyscallSuccessReg, 0, tid); + this->setArchIntReg(ReturnValueReg, return_value.value(), tid); + } else { + // got an error, return details + this->setArchIntReg(SyscallSuccessReg, (IntReg) -1, tid); + this->setArchIntReg(ReturnValueReg, -return_value.value(), tid); + } +} +#endif diff --cc src/cpu/o3/alpha_dyn_inst.hh index af2858802,000000000..143ffe7e4 mode 100644,000000..100644 --- a/src/cpu/o3/alpha_dyn_inst.hh +++ b/src/cpu/o3/alpha_dyn_inst.hh @@@ -1,287 -1,0 +1,295 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_ALPHA_DYN_INST_HH__ +#define __CPU_O3_ALPHA_DYN_INST_HH__ + +#include "arch/isa_traits.hh" +#include "cpu/base_dyn_inst.hh" +#include "cpu/inst_seq.hh" +#include "cpu/o3/alpha_cpu.hh" +#include "cpu/o3/alpha_impl.hh" + +class Packet; + +/** + * Mostly implementation & ISA specific AlphaDynInst. As with most + * other classes in the new CPU model, it is templated on the Impl to + * allow for passing in of all types, such as the CPU type and the ISA + * type. The AlphaDynInst serves as the primary interface to the CPU + * for instructions that are executing. + */ +template +class AlphaDynInst : public BaseDynInst +{ + public: + /** Typedef for the CPU. */ + typedef typename Impl::FullCPU FullCPU; + + /** Binary machine instruction type. */ + typedef TheISA::MachInst MachInst; + /** Extended machine instruction type. */ + typedef TheISA::ExtMachInst ExtMachInst; + /** Logical register index type. */ + typedef TheISA::RegIndex RegIndex; + /** Integer register index type. */ + typedef TheISA::IntReg IntReg; + typedef TheISA::FloatReg FloatReg; + typedef TheISA::FloatRegBits FloatRegBits; + /** Misc register index type. */ + typedef TheISA::MiscReg MiscReg; + + enum { + MaxInstSrcRegs = TheISA::MaxInstSrcRegs, //< Max source regs + MaxInstDestRegs = TheISA::MaxInstDestRegs, //< Max dest regs + }; + + public: + /** BaseDynInst constructor given a binary instruction. */ + AlphaDynInst(ExtMachInst inst, Addr PC, Addr Pred_PC, InstSeqNum seq_num, + FullCPU *cpu); + + /** BaseDynInst constructor given a static inst pointer. */ + AlphaDynInst(StaticInstPtr &_staticInst); + + /** Executes the instruction.*/ + Fault execute(); + + /** Initiates the access. Only valid for memory operations. */ + Fault initiateAcc(); + + /** Completes the access. Only valid for memory operations. */ + Fault completeAcc(Packet *pkt); + + private: + /** Initializes variables. */ + void initVars(); + + public: ++ /** Reads a miscellaneous register. */ + MiscReg readMiscReg(int misc_reg) + { + return this->cpu->readMiscReg(misc_reg, this->threadNumber); + } + ++ /** Reads a misc. register, including any side-effects the read ++ * might have as defined by the architecture. ++ */ + MiscReg readMiscRegWithEffect(int misc_reg, Fault &fault) + { + return this->cpu->readMiscRegWithEffect(misc_reg, fault, + this->threadNumber); + } + ++ /** Sets a misc. register. */ + Fault setMiscReg(int misc_reg, const MiscReg &val) + { + this->instResult.integer = val; + return this->cpu->setMiscReg(misc_reg, val, this->threadNumber); + } + ++ /** Sets a misc. register, including any side-effects the write ++ * might have as defined by the architecture. ++ */ + Fault setMiscRegWithEffect(int misc_reg, const MiscReg &val) + { + return this->cpu->setMiscRegWithEffect(misc_reg, val, + this->threadNumber); + } + +#if FULL_SYSTEM + /** Calls hardware return from error interrupt. */ + Fault hwrei(); + /** Reads interrupt flag. */ + int readIntrFlag(); + /** Sets interrupt flag. */ + void setIntrFlag(int val); + /** Checks if system is in PAL mode. */ + bool inPalMode(); + /** Traps to handle specified fault. */ + void trap(Fault fault); + bool simPalCheck(int palFunc); +#else + /** Calls a syscall. */ + void syscall(int64_t callnum); +#endif + + private: + /** Physical register index of the destination registers of this + * instruction. + */ + PhysRegIndex _destRegIdx[MaxInstDestRegs]; + + /** Physical register index of the source registers of this + * instruction. + */ + PhysRegIndex _srcRegIdx[MaxInstSrcRegs]; + + /** Physical register index of the previous producers of the + * architected destinations. + */ + PhysRegIndex _prevDestRegIdx[MaxInstDestRegs]; + + public: + + // The register accessor methods provide the index of the + // instruction's operand (e.g., 0 or 1), not the architectural + // register index, to simplify the implementation of register + // renaming. We find the architectural register index by indexing + // into the instruction's own operand index table. Note that a + // raw pointer to the StaticInst is provided instead of a + // ref-counted StaticInstPtr to redice overhead. This is fine as + // long as these methods don't copy the pointer into any long-term + // storage (which is pretty hard to imagine they would have reason + // to do). + + uint64_t readIntReg(const StaticInst *si, int idx) + { + return this->cpu->readIntReg(_srcRegIdx[idx]); + } + + FloatReg readFloatReg(const StaticInst *si, int idx, int width) + { + return this->cpu->readFloatReg(_srcRegIdx[idx], width); + } + + FloatReg readFloatReg(const StaticInst *si, int idx) + { + return this->cpu->readFloatReg(_srcRegIdx[idx]); + } + + FloatRegBits readFloatRegBits(const StaticInst *si, int idx, int width) + { + return this->cpu->readFloatRegBits(_srcRegIdx[idx], width); + } + + FloatRegBits readFloatRegBits(const StaticInst *si, int idx) + { + return this->cpu->readFloatRegBits(_srcRegIdx[idx]); + } + + /** @todo: Make results into arrays so they can handle multiple dest + * registers. + */ + void setIntReg(const StaticInst *si, int idx, uint64_t val) + { + this->cpu->setIntReg(_destRegIdx[idx], val); + BaseDynInst::setIntReg(si, idx, val); + } + + void setFloatReg(const StaticInst *si, int idx, FloatReg val, int width) + { + this->cpu->setFloatReg(_destRegIdx[idx], val, width); + BaseDynInst::setFloatRegSingle(si, idx, val); + } + + void setFloatReg(const StaticInst *si, int idx, FloatReg val) + { + this->cpu->setFloatReg(_destRegIdx[idx], val); + BaseDynInst::setFloatRegDouble(si, idx, val); + } + + void setFloatRegBits(const StaticInst *si, int idx, + FloatRegBits val, int width) + { + this->cpu->setFloatRegBits(_destRegIdx[idx], val, width); + this->instResult.integer = val; + } + + void setFloatRegBits(const StaticInst *si, int idx, FloatRegBits val) + { + this->cpu->setFloatRegBits(_destRegIdx[idx], val); + BaseDynInst::setFloatRegInt(si, idx, val); + } + + /** Returns the physical register index of the i'th destination + * register. + */ + PhysRegIndex renamedDestRegIdx(int idx) const + { + return _destRegIdx[idx]; + } + + /** Returns the physical register index of the i'th source register. */ + PhysRegIndex renamedSrcRegIdx(int idx) const + { + return _srcRegIdx[idx]; + } + + /** Returns the physical register index of the previous physical register + * that remapped to the same logical register index. + */ + PhysRegIndex prevDestRegIdx(int idx) const + { + return _prevDestRegIdx[idx]; + } + + /** Renames a destination register to a physical register. Also records + * the previous physical register that the logical register mapped to. + */ + void renameDestReg(int idx, + PhysRegIndex renamed_dest, + PhysRegIndex previous_rename) + { + _destRegIdx[idx] = renamed_dest; + _prevDestRegIdx[idx] = previous_rename; + } + + /** Renames a source logical register to the physical register which + * has/will produce that logical register's result. + * @todo: add in whether or not the source register is ready. + */ + void renameSrcReg(int idx, PhysRegIndex renamed_src) + { + _srcRegIdx[idx] = renamed_src; + } + + public: + /** Calculates EA part of a memory instruction. Currently unused, + * though it may be useful in the future if we want to split + * memory operations into EA calculation and memory access parts. + */ + Fault calcEA() + { + return this->staticInst->eaCompInst()->execute(this, this->traceData); + } + + /** Does the memory access part of a memory instruction. Currently unused, + * though it may be useful in the future if we want to split + * memory operations into EA calculation and memory access parts. + */ + Fault memAccess() + { + return this->staticInst->memAccInst()->execute(this, this->traceData); + } +}; + +#endif // __CPU_O3_ALPHA_DYN_INST_HH__ + diff --cc src/cpu/o3/alpha_dyn_inst_impl.hh index 06755eb76,000000000..3a0727b45 mode 100644,000000..100644 --- a/src/cpu/o3/alpha_dyn_inst_impl.hh +++ b/src/cpu/o3/alpha_dyn_inst_impl.hh @@@ -1,176 -1,0 +1,180 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include "cpu/o3/alpha_dyn_inst.hh" + +template +AlphaDynInst::AlphaDynInst(ExtMachInst inst, Addr PC, Addr Pred_PC, + InstSeqNum seq_num, FullCPU *cpu) + : BaseDynInst(inst, PC, Pred_PC, seq_num, cpu) +{ + initVars(); +} + +template +AlphaDynInst::AlphaDynInst(StaticInstPtr &_staticInst) + : BaseDynInst(_staticInst) +{ + initVars(); +} + +template +void +AlphaDynInst::initVars() +{ + // Make sure to have the renamed register entries set to the same + // as the normal register entries. It will allow the IQ to work + // without any modifications. + for (int i = 0; i < this->staticInst->numDestRegs(); i++) { + _destRegIdx[i] = this->staticInst->destRegIdx(i); + } + + for (int i = 0; i < this->staticInst->numSrcRegs(); i++) { + _srcRegIdx[i] = this->staticInst->srcRegIdx(i); + this->_readySrcRegIdx[i] = 0; + } +} + +template +Fault +AlphaDynInst::execute() +{ - // @todo: Pretty convoluted way to avoid squashing from happening when using - // the XC during an instruction's execution (specifically for instructions - // that have sideeffects that use the XC). Fix this. ++ // @todo: Pretty convoluted way to avoid squashing from happening ++ // when using the XC during an instruction's execution ++ // (specifically for instructions that have side-effects that use ++ // the XC). Fix this. + bool in_syscall = this->thread->inSyscall; + this->thread->inSyscall = true; + + this->fault = this->staticInst->execute(this, this->traceData); + + this->thread->inSyscall = in_syscall; + + return this->fault; +} + +template +Fault +AlphaDynInst::initiateAcc() +{ - // @todo: Pretty convoluted way to avoid squashing from happening when using - // the XC during an instruction's execution (specifically for instructions - // that have sideeffects that use the XC). Fix this. ++ // @todo: Pretty convoluted way to avoid squashing from happening ++ // when using the XC during an instruction's execution ++ // (specifically for instructions that have side-effects that use ++ // the XC). Fix this. + bool in_syscall = this->thread->inSyscall; + this->thread->inSyscall = true; + + this->fault = this->staticInst->initiateAcc(this, this->traceData); + + this->thread->inSyscall = in_syscall; + + return this->fault; +} + +template +Fault +AlphaDynInst::completeAcc(Packet *pkt) +{ + if (this->isLoad()) { + this->fault = this->staticInst->completeAcc(pkt, this, + this->traceData); + } else if (this->isStore()) { + this->fault = this->staticInst->completeAcc(pkt, this, + this->traceData); + } else { + panic("Unknown type!"); + } + + return this->fault; +} + +#if FULL_SYSTEM +template +Fault +AlphaDynInst::hwrei() +{ ++ // Can only do a hwrei when in pal mode. + if (!this->cpu->inPalMode(this->readPC())) + return new AlphaISA::UnimplementedOpcodeFault; + ++ // Set the next PC based on the value of the EXC_ADDR IPR. + this->setNextPC(this->cpu->readMiscReg(AlphaISA::IPR_EXC_ADDR, + this->threadNumber)); + + // Tell CPU to clear any state it needs to if a hwrei is taken. + this->cpu->hwrei(this->threadNumber); + + // FIXME: XXX check for interrupts? XXX + return NoFault; +} + +template +int +AlphaDynInst::readIntrFlag() +{ + return this->cpu->readIntrFlag(); +} + +template +void +AlphaDynInst::setIntrFlag(int val) +{ + this->cpu->setIntrFlag(val); +} + +template +bool +AlphaDynInst::inPalMode() +{ + return this->cpu->inPalMode(this->PC); +} + +template +void +AlphaDynInst::trap(Fault fault) +{ + this->cpu->trap(fault, this->threadNumber); +} + +template +bool +AlphaDynInst::simPalCheck(int palFunc) +{ + return this->cpu->simPalCheck(palFunc, this->threadNumber); +} +#else +template +void +AlphaDynInst::syscall(int64_t callnum) +{ + this->cpu->syscall(callnum, this->threadNumber); +} +#endif + diff --cc src/cpu/o3/alpha_params.hh index 8c6779495,000000000..e48abd9ed mode 100644,000000..100644 --- a/src/cpu/o3/alpha_params.hh +++ b/src/cpu/o3/alpha_params.hh @@@ -1,186 -1,0 +1,187 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_ALPHA_PARAMS_HH__ +#define __CPU_O3_ALPHA_PARAMS_HH__ + +#include "cpu/o3/cpu.hh" + +//Forward declarations +class AlphaDTB; +class AlphaITB; +class FUPool; +class MemObject; +class Process; +class System; + +/** + * This file defines the parameters that will be used for the AlphaFullCPU. + * This must be defined externally so that the Impl can have a params class + * defined that it can pass to all of the individual stages. + */ + +class AlphaSimpleParams : public BaseFullCPU::Params +{ + public: + +#if FULL_SYSTEM + AlphaITB *itb; AlphaDTB *dtb; +#else + std::vector workload; + Process *process; +#endif // FULL_SYSTEM + + //Page Table +// PageTable *pTable; + + MemObject *mem; + + BaseCPU *checker; + + unsigned activity; + + // + // Caches + // +// MemInterface *icacheInterface; +// MemInterface *dcacheInterface; + + unsigned cachePorts; + + // + // Fetch + // + unsigned decodeToFetchDelay; + unsigned renameToFetchDelay; + unsigned iewToFetchDelay; + unsigned commitToFetchDelay; + unsigned fetchWidth; + + // + // Decode + // + unsigned renameToDecodeDelay; + unsigned iewToDecodeDelay; + unsigned commitToDecodeDelay; + unsigned fetchToDecodeDelay; + unsigned decodeWidth; + + // + // Rename + // + unsigned iewToRenameDelay; + unsigned commitToRenameDelay; + unsigned decodeToRenameDelay; + unsigned renameWidth; + + // + // IEW + // + unsigned commitToIEWDelay; + unsigned renameToIEWDelay; + unsigned issueToExecuteDelay; + unsigned issueWidth; + unsigned executeWidth; + unsigned executeIntWidth; + unsigned executeFloatWidth; + unsigned executeBranchWidth; + unsigned executeMemoryWidth; + FUPool *fuPool; + + // + // Commit + // + unsigned iewToCommitDelay; + unsigned renameToROBDelay; + unsigned commitWidth; + unsigned squashWidth; + Tick trapLatency; + Tick fetchTrapLatency; + + // - // Branch predictor (BP & BTB) ++ // Branch predictor (BP, BTB, RAS) + // ++ std::string predType; + unsigned localPredictorSize; + unsigned localCtrBits; + unsigned localHistoryTableSize; + unsigned localHistoryBits; + unsigned globalPredictorSize; + unsigned globalCtrBits; + unsigned globalHistoryBits; + unsigned choicePredictorSize; + unsigned choiceCtrBits; + + unsigned BTBEntries; + unsigned BTBTagSize; + + unsigned RASSize; + + // + // Load store queue + // + unsigned LQEntries; + unsigned SQEntries; + + // + // Memory dependence + // + unsigned SSITSize; + unsigned LFSTSize; + + // + // Miscellaneous + // + unsigned numPhysIntRegs; + unsigned numPhysFloatRegs; + unsigned numIQEntries; + unsigned numROBEntries; + + //SMT Parameters + unsigned smtNumFetchingThreads; + + std::string smtFetchPolicy; + + std::string smtIQPolicy; + unsigned smtIQThreshold; + + std::string smtLSQPolicy; + unsigned smtLSQThreshold; + + std::string smtCommitPolicy; + + std::string smtROBPolicy; + unsigned smtROBThreshold; + + // Probably can get this from somewhere. + unsigned instShiftAmt; +}; + +#endif // __CPU_O3_ALPHA_PARAMS_HH__ diff --cc src/cpu/o3/bpred_unit.cc index d0af5af92,000000000..b33543bdc mode 100644,000000..100644 --- a/src/cpu/o3/bpred_unit.cc +++ b/src/cpu/o3/bpred_unit.cc @@@ -1,39 -1,0 +1,39 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include "cpu/o3/bpred_unit_impl.hh" +#include "cpu/o3/alpha_impl.hh" +#include "cpu/o3/alpha_dyn_inst.hh" +#include "cpu/ozone/ozone_impl.hh" +//#include "cpu/ozone/simple_impl.hh" + - template class TwobitBPredUnit; - template class TwobitBPredUnit; - //template class TwobitBPredUnit; ++template class BPredUnit; ++template class BPredUnit; ++//template class BPredUnit; diff --cc src/cpu/o3/bpred_unit.hh index 39a88a3ce,000000000..2c0a39565 mode 100644,000000..100644 --- a/src/cpu/o3/bpred_unit.hh +++ b/src/cpu/o3/bpred_unit.hh @@@ -1,228 -1,0 +1,256 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_BPRED_UNIT_HH__ +#define __CPU_O3_BPRED_UNIT_HH__ + +// For Addr type. +#include "arch/isa_traits.hh" +#include "base/statistics.hh" +#include "cpu/inst_seq.hh" + +#include "cpu/o3/2bit_local_pred.hh" +#include "cpu/o3/btb.hh" +#include "cpu/o3/ras.hh" +#include "cpu/o3/tournament_pred.hh" + +#include + +/** + * Basically a wrapper class to hold both the branch predictor + * and the BTB. + */ +template - class TwobitBPredUnit ++class BPredUnit +{ - public: ++ private: + typedef typename Impl::Params Params; + typedef typename Impl::DynInstPtr DynInstPtr; + ++ enum PredType { ++ Local, ++ Tournament ++ }; ++ ++ PredType predictor; ++ ++ public: ++ + /** + * @param params The params object, that has the size of the BP and BTB. + */ - TwobitBPredUnit(Params *params); ++ BPredUnit(Params *params); + + /** + * Registers statistics. + */ + void regStats(); + + void switchOut(); + + void takeOverFrom(); + + /** + * Predicts whether or not the instruction is a taken branch, and the + * target of the branch if it is taken. + * @param inst The branch instruction. + * @param PC The predicted PC is passed back through this parameter. + * @param tid The thread id. + * @return Returns if the branch is taken or not. + */ + bool predict(DynInstPtr &inst, Addr &PC, unsigned tid); + ++ // @todo: Rename this function. ++ void BPUncond(void * &bp_history); ++ + /** + * Tells the branch predictor to commit any updates until the given + * sequence number. + * @param done_sn The sequence number to commit any older updates up until. + * @param tid The thread id. + */ + void update(const InstSeqNum &done_sn, unsigned tid); + + /** + * Squashes all outstanding updates until a given sequence number. + * @param squashed_sn The sequence number to squash any younger updates up + * until. + * @param tid The thread id. + */ + void squash(const InstSeqNum &squashed_sn, unsigned tid); + + /** + * Squashes all outstanding updates until a given sequence number, and + * corrects that sn's update with the proper address and taken/not taken. + * @param squashed_sn The sequence number to squash any younger updates up + * until. + * @param corr_target The correct branch target. + * @param actually_taken The correct branch direction. + * @param tid The thread id. + */ + void squash(const InstSeqNum &squashed_sn, const Addr &corr_target, + bool actually_taken, unsigned tid); + ++ /** ++ * @param bp_history Pointer to the history object. The predictor ++ * will need to update any state and delete the object. ++ */ ++ void BPSquash(void *bp_history); ++ + /** + * Looks up a given PC in the BP to see if it is taken or not taken. + * @param inst_PC The PC to look up. ++ * @param bp_history Pointer that will be set to an object that ++ * has the branch predictor state associated with the lookup. + * @return Whether the branch is taken or not taken. + */ - bool BPLookup(Addr &inst_PC) - { return BP.lookup(inst_PC); } ++ bool BPLookup(Addr &inst_PC, void * &bp_history); + + /** + * Looks up a given PC in the BTB to see if a matching entry exists. + * @param inst_PC The PC to look up. + * @return Whether the BTB contains the given PC. + */ + bool BTBValid(Addr &inst_PC) + { return BTB.valid(inst_PC, 0); } + + /** + * Looks up a given PC in the BTB to get the predicted target. + * @param inst_PC The PC to look up. + * @return The address of the target of the branch. + */ + Addr BTBLookup(Addr &inst_PC) + { return BTB.lookup(inst_PC, 0); } + + /** + * Updates the BP with taken/not taken information. + * @param inst_PC The branch's PC that will be updated. + * @param taken Whether the branch was taken or not taken. ++ * @param bp_history Pointer to the branch predictor state that is ++ * associated with the branch lookup that is being updated. + * @todo Make this update flexible enough to handle a global predictor. + */ - void BPUpdate(Addr &inst_PC, bool taken) - { BP.update(inst_PC, taken); } ++ void BPUpdate(Addr &inst_PC, bool taken, void *bp_history); + + /** + * Updates the BTB with the target of a branch. + * @param inst_PC The branch's PC that will be updated. + * @param target_PC The branch's target that will be added to the BTB. + */ + void BTBUpdate(Addr &inst_PC, Addr &target_PC) + { BTB.update(inst_PC, target_PC,0); } + ++ void dump(); ++ + private: + struct PredictorHistory { + /** - * Makes a predictor history struct that contains a sequence number, - * the PC of its instruction, and whether or not it was predicted - * taken. ++ * Makes a predictor history struct that contains any ++ * information needed to update the predictor, BTB, and RAS. + */ + PredictorHistory(const InstSeqNum &seq_num, const Addr &inst_PC, - const bool pred_taken, const unsigned _tid) - : seqNum(seq_num), PC(inst_PC), RASTarget(0), globalHistory(0), ++ const bool pred_taken, void *bp_history, ++ const unsigned _tid) ++ : seqNum(seq_num), PC(inst_PC), RASTarget(0), + RASIndex(0), tid(_tid), predTaken(pred_taken), usedRAS(0), - wasCall(0) ++ wasCall(0), bpHistory(bp_history) + { } + + /** The sequence number for the predictor history entry. */ + InstSeqNum seqNum; + + /** The PC associated with the sequence number. */ + Addr PC; + + /** The RAS target (only valid if a return). */ + Addr RASTarget; + - /** The global history at the time this entry was created. */ - unsigned globalHistory; - + /** The RAS index of the instruction (only valid if a call). */ + unsigned RASIndex; + + /** The thread id. */ + unsigned tid; + + /** Whether or not it was predicted taken. */ + bool predTaken; + + /** Whether or not the RAS was used. */ + bool usedRAS; + + /** Whether or not the instruction was a call. */ + bool wasCall; ++ ++ /** Pointer to the history object passed back from the branch ++ * predictor. It is used to update or restore state of the ++ * branch predictor. ++ */ ++ void *bpHistory; + }; + + typedef std::list History; + + /** + * The per-thread predictor history. This is used to update the predictor + * as instructions are committed, or restore it to the proper state after + * a squash. + */ + History predHist[Impl::MaxThreads]; + - /** The branch predictor. */ - DefaultBP BP; ++ /** The local branch predictor. */ ++ LocalBP *localBP; ++ ++ /** The tournament branch predictor. */ ++ TournamentBP *tournamentBP; + + /** The BTB. */ + DefaultBTB BTB; + + /** The per-thread return address stack. */ + ReturnAddrStack RAS[Impl::MaxThreads]; + + /** Stat for number of BP lookups. */ + Stats::Scalar<> lookups; + /** Stat for number of conditional branches predicted. */ + Stats::Scalar<> condPredicted; + /** Stat for number of conditional branches predicted incorrectly. */ + Stats::Scalar<> condIncorrect; + /** Stat for number of BTB lookups. */ + Stats::Scalar<> BTBLookups; + /** Stat for number of BTB hits. */ + Stats::Scalar<> BTBHits; + /** Stat for number of times the BTB is correct. */ + Stats::Scalar<> BTBCorrect; + /** Stat for number of times the RAS is used to get a target. */ + Stats::Scalar<> usedRAS; + /** Stat for number of times the RAS is incorrect. */ + Stats::Scalar<> RASIncorrect; +}; + +#endif // __CPU_O3_BPRED_UNIT_HH__ diff --cc src/cpu/o3/bpred_unit_impl.hh index cde9f28ab,000000000..0da02145b mode 100644,000000..100644 --- a/src/cpu/o3/bpred_unit_impl.hh +++ b/src/cpu/o3/bpred_unit_impl.hh @@@ -1,326 -1,0 +1,412 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include +#include + +#include "base/trace.hh" +#include "base/traceflags.hh" +#include "cpu/o3/bpred_unit.hh" + +using namespace std; + +template - TwobitBPredUnit::TwobitBPredUnit(Params *params) - : BP(params->localPredictorSize, - params->localCtrBits, - params->instShiftAmt), - BTB(params->BTBEntries, ++BPredUnit::BPredUnit(Params *params) ++ : BTB(params->BTBEntries, + params->BTBTagSize, + params->instShiftAmt) +{ ++ // Setup the selected predictor. ++ if (params->predType == "local") { ++ localBP = new LocalBP(params->localPredictorSize, ++ params->localCtrBits, ++ params->instShiftAmt); ++ predictor = Local; ++ } else if (params->predType == "tournament") { ++ tournamentBP = new TournamentBP(params->localPredictorSize, ++ params->localCtrBits, ++ params->localHistoryTableSize, ++ params->localHistoryBits, ++ params->globalPredictorSize, ++ params->globalHistoryBits, ++ params->globalCtrBits, ++ params->choicePredictorSize, ++ params->choiceCtrBits, ++ params->instShiftAmt); ++ predictor = Tournament; ++ } else { ++ fatal("Invalid BP selected!"); ++ } ++ + for (int i=0; i < Impl::MaxThreads; i++) + RAS[i].init(params->RASSize); +} + +template +void - TwobitBPredUnit::regStats() ++BPredUnit::regStats() +{ + lookups + .name(name() + ".BPredUnit.lookups") + .desc("Number of BP lookups") + ; + + condPredicted + .name(name() + ".BPredUnit.condPredicted") + .desc("Number of conditional branches predicted") + ; + + condIncorrect + .name(name() + ".BPredUnit.condIncorrect") + .desc("Number of conditional branches incorrect") + ; + + BTBLookups + .name(name() + ".BPredUnit.BTBLookups") + .desc("Number of BTB lookups") + ; + + BTBHits + .name(name() + ".BPredUnit.BTBHits") + .desc("Number of BTB hits") + ; + + BTBCorrect + .name(name() + ".BPredUnit.BTBCorrect") + .desc("Number of correct BTB predictions (this stat may not " + "work properly.") + ; + + usedRAS + .name(name() + ".BPredUnit.usedRAS") + .desc("Number of times the RAS was used to get a target.") + ; + + RASIncorrect + .name(name() + ".BPredUnit.RASInCorrect") + .desc("Number of incorrect RAS predictions.") + ; +} + +template +void - TwobitBPredUnit::switchOut() ++BPredUnit::switchOut() +{ ++ // Clear any state upon switch out. + for (int i = 0; i < Impl::MaxThreads; ++i) { - predHist[i].clear(); ++ squash(0, i); + } +} + +template +void - TwobitBPredUnit::takeOverFrom() ++BPredUnit::takeOverFrom() +{ ++ // Can reset all predictor state, but it's not necessarily better ++ // than leaving it be. +/* + for (int i = 0; i < Impl::MaxThreads; ++i) + RAS[i].reset(); + + BP.reset(); + BTB.reset(); +*/ +} + +template +bool - TwobitBPredUnit::predict(DynInstPtr &inst, Addr &PC, unsigned tid) ++BPredUnit::predict(DynInstPtr &inst, Addr &PC, unsigned tid) +{ + // See if branch predictor predicts taken. + // If so, get its target addr either from the BTB or the RAS. - // Once that's done, speculatively update the predictor? + // Save off record of branch stuff so the RAS can be fixed + // up once it's done. + + using TheISA::MachInst; + + bool pred_taken = false; + Addr target; + + ++lookups; + ++ void *bp_history = NULL; ++ + if (inst->isUncondCtrl()) { + DPRINTF(Fetch, "BranchPred: [tid:%i] Unconditional control.\n", tid); + pred_taken = true; ++ // Tell the BP there was an unconditional branch. ++ BPUncond(bp_history); + } else { + ++condPredicted; + - pred_taken = BPLookup(PC); ++ pred_taken = BPLookup(PC, bp_history); + + DPRINTF(Fetch, "BranchPred: [tid:%i]: Branch predictor predicted %i " + "for PC %#x\n", + tid, pred_taken, inst->readPC()); + } + - PredictorHistory predict_record(inst->seqNum, PC, pred_taken, tid); ++ PredictorHistory predict_record(inst->seqNum, PC, pred_taken, ++ bp_history, tid); + + // Now lookup in the BTB or RAS. + if (pred_taken) { + if (inst->isReturn()) { + ++usedRAS; + + // If it's a function return call, then look up the address + // in the RAS. + target = RAS[tid].top(); + + // Record the top entry of the RAS, and its index. + predict_record.usedRAS = true; + predict_record.RASIndex = RAS[tid].topIdx(); + predict_record.RASTarget = target; + + assert(predict_record.RASIndex < 16); + + RAS[tid].pop(); + + DPRINTF(Fetch, "BranchPred: [tid:%i]: Instruction %#x is a return, " + "RAS predicted target: %#x, RAS index: %i.\n", + tid, inst->readPC(), target, predict_record.RASIndex); + } else { + ++BTBLookups; + + if (inst->isCall()) { + RAS[tid].push(PC + sizeof(MachInst)); + + // Record that it was a call so that the top RAS entry can + // be popped off if the speculation is incorrect. + predict_record.wasCall = true; + + DPRINTF(Fetch, "BranchPred: [tid:%i] Instruction %#x was a call" + ", adding %#x to the RAS.\n", + tid, inst->readPC(), PC + sizeof(MachInst)); + } + + if (BTB.valid(PC, tid)) { + ++BTBHits; + - //If it's anything else, use the BTB to get the target addr. ++ // If it's not a return, use the BTB to get the target addr. + target = BTB.lookup(PC, tid); + + DPRINTF(Fetch, "BranchPred: [tid:%i]: Instruction %#x predicted" + " target is %#x.\n", + tid, inst->readPC(), target); + + } else { + DPRINTF(Fetch, "BranchPred: [tid:%i]: BTB doesn't have a " + "valid entry.\n",tid); + pred_taken = false; + } + + } + } + + if (pred_taken) { + // Set the PC and the instruction's predicted target. + PC = target; + inst->setPredTarg(target); + } else { + PC = PC + sizeof(MachInst); + inst->setPredTarg(PC); + } + + predHist[tid].push_front(predict_record); + + DPRINTF(Fetch, "[tid:%i] predHist.size(): %i\n", tid, predHist[tid].size()); + + return pred_taken; +} + +template +void - TwobitBPredUnit::update(const InstSeqNum &done_sn, unsigned tid) ++BPredUnit::update(const InstSeqNum &done_sn, unsigned tid) +{ + DPRINTF(Fetch, "BranchPred: [tid:%i]: Commiting branches until sequence" + "number %lli.\n", tid, done_sn); + + while (!predHist[tid].empty() && + predHist[tid].back().seqNum <= done_sn) { + // Update the branch predictor with the correct results. - BP.update(predHist[tid].back().PC, - predHist[tid].back().predTaken); ++ BPUpdate(predHist[tid].back().PC, ++ predHist[tid].back().predTaken, ++ predHist[tid].back().bpHistory); + + predHist[tid].pop_back(); + } +} + +template +void - TwobitBPredUnit::squash(const InstSeqNum &squashed_sn, unsigned tid) ++BPredUnit::squash(const InstSeqNum &squashed_sn, unsigned tid) +{ + History &pred_hist = predHist[tid]; + + while (!pred_hist.empty() && + pred_hist.front().seqNum > squashed_sn) { - if (pred_hist.front().usedRAS) { ++ if (pred_hist.front().usedRAS) { + DPRINTF(Fetch, "BranchPred: [tid:%i]: Restoring top of RAS to: %i," + " target: %#x.\n", + tid, + pred_hist.front().RASIndex, + pred_hist.front().RASTarget); + + RAS[tid].restore(pred_hist.front().RASIndex, + pred_hist.front().RASTarget); + + } else if (pred_hist.front().wasCall) { - DPRINTF(Fetch, "BranchPred: [tid:%i]: Removing speculative entry added " - "to the RAS.\n",tid); ++ DPRINTF(Fetch, "BranchPred: [tid:%i]: Removing speculative entry " ++ "added to the RAS.\n",tid); + + RAS[tid].pop(); + } + ++ // This call should delete the bpHistory. ++ BPSquash(pred_hist.front().bpHistory); ++ + pred_hist.pop_front(); + } + +} + +template +void - TwobitBPredUnit::squash(const InstSeqNum &squashed_sn, - const Addr &corr_target, - const bool actually_taken, - unsigned tid) ++BPredUnit::squash(const InstSeqNum &squashed_sn, ++ const Addr &corr_target, ++ const bool actually_taken, ++ unsigned tid) +{ + // Now that we know that a branch was mispredicted, we need to undo + // all the branches that have been seen up until this branch and + // fix up everything. + + History &pred_hist = predHist[tid]; + + ++condIncorrect; + + DPRINTF(Fetch, "BranchPred: [tid:%i]: Squashing from sequence number %i, " + "setting target to %#x.\n", + tid, squashed_sn, corr_target); + - while (!pred_hist.empty() && - pred_hist.front().seqNum > squashed_sn) { - if (pred_hist.front().usedRAS) { - DPRINTF(Fetch, "BranchPred: [tid:%i]: Restoring top of RAS to: %i, " - "target: %#x.\n", - tid, - pred_hist.front().RASIndex, - pred_hist.front().RASTarget); - - RAS[tid].restore(pred_hist.front().RASIndex, - pred_hist.front().RASTarget); - } else if (pred_hist.front().wasCall) { - DPRINTF(Fetch, "BranchPred: [tid:%i]: Removing speculative entry" - " added to the RAS.\n", tid); - - RAS[tid].pop(); - } - - pred_hist.pop_front(); - } ++ squash(squashed_sn, tid); + + // If there's a squash due to a syscall, there may not be an entry + // corresponding to the squash. In that case, don't bother trying to + // fix up the entry. + if (!pred_hist.empty()) { - pred_hist.front().predTaken = actually_taken; - ++ assert(pred_hist.front().seqNum == squashed_sn); + if (pred_hist.front().usedRAS) { + ++RASIncorrect; + } + - BP.update(pred_hist.front().PC, actually_taken); ++ BPUpdate(pred_hist.front().PC, actually_taken, ++ pred_hist.front().bpHistory); + + BTB.update(pred_hist.front().PC, corr_target, tid); + pred_hist.pop_front(); + } +} ++ ++template ++void ++BPredUnit::BPUncond(void * &bp_history) ++{ ++ // Only the tournament predictor cares about unconditional branches. ++ if (predictor == Tournament) { ++ tournamentBP->uncondBr(bp_history); ++ } ++} ++ ++template ++void ++BPredUnit::BPSquash(void *bp_history) ++{ ++ if (predictor == Local) { ++ localBP->squash(bp_history); ++ } else if (predictor == Tournament) { ++ tournamentBP->squash(bp_history); ++ } else { ++ panic("Predictor type is unexpected value!"); ++ } ++} ++ ++template ++bool ++BPredUnit::BPLookup(Addr &inst_PC, void * &bp_history) ++{ ++ if (predictor == Local) { ++ return localBP->lookup(inst_PC, bp_history); ++ } else if (predictor == Tournament) { ++ return tournamentBP->lookup(inst_PC, bp_history); ++ } else { ++ panic("Predictor type is unexpected value!"); ++ } ++} ++ ++template ++void ++BPredUnit::BPUpdate(Addr &inst_PC, bool taken, void *bp_history) ++{ ++ if (predictor == Local) { ++ localBP->update(inst_PC, taken, bp_history); ++ } else if (predictor == Tournament) { ++ tournamentBP->update(inst_PC, taken, bp_history); ++ } else { ++ panic("Predictor type is unexpected value!"); ++ } ++} ++ ++template ++void ++BPredUnit::dump() ++{ ++ typename History::iterator pred_hist_it; ++ ++ for (int i = 0; i < Impl::MaxThreads; ++i) { ++ if (!predHist[i].empty()) { ++ pred_hist_it = predHist[i].begin(); ++ ++ cprintf("predHist[%i].size(): %i\n", i, predHist[i].size()); ++ ++ while (pred_hist_it != predHist[i].end()) { ++ cprintf("[sn:%lli], PC:%#x, tid:%i, predTaken:%i, " ++ "bpHistory:%#x\n", ++ (*pred_hist_it).seqNum, (*pred_hist_it).PC, ++ (*pred_hist_it).tid, (*pred_hist_it).predTaken, ++ (*pred_hist_it).bpHistory); ++ pred_hist_it++; ++ } ++ ++ cprintf("\n"); ++ } ++ } ++} diff --cc src/cpu/o3/comm.hh index 84ad5f6b1,000000000..bf1bd08e8 mode 100644,000000..100644 --- a/src/cpu/o3/comm.hh +++ b/src/cpu/o3/comm.hh @@@ -1,199 -1,0 +1,198 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_COMM_HH__ +#define __CPU_O3_COMM_HH__ + +#include + +#include "arch/faults.hh" +#include "arch/isa_traits.hh" +#include "cpu/inst_seq.hh" +#include "sim/host.hh" + +// Typedef for physical register index type. Although the Impl would be the +// most likely location for this, there are a few classes that need this +// typedef yet are not templated on the Impl. For now it will be defined here. +typedef short int PhysRegIndex; + ++/** Struct that defines the information passed from fetch to decode. */ +template +struct DefaultFetchDefaultDecode { + typedef typename Impl::DynInstPtr DynInstPtr; + + int size; + + DynInstPtr insts[Impl::MaxWidth]; + Fault fetchFault; + InstSeqNum fetchFaultSN; + bool clearFetchFault; +}; + ++/** Struct that defines the information passed from decode to rename. */ +template +struct DefaultDecodeDefaultRename { + typedef typename Impl::DynInstPtr DynInstPtr; + + int size; + + DynInstPtr insts[Impl::MaxWidth]; +}; + ++/** Struct that defines the information passed from rename to IEW. */ +template +struct DefaultRenameDefaultIEW { + typedef typename Impl::DynInstPtr DynInstPtr; + + int size; + + DynInstPtr insts[Impl::MaxWidth]; +}; + ++/** Struct that defines the information passed from IEW to commit. */ +template +struct DefaultIEWDefaultCommit { + typedef typename Impl::DynInstPtr DynInstPtr; + + int size; + + DynInstPtr insts[Impl::MaxWidth]; + + bool squash[Impl::MaxThreads]; + bool branchMispredict[Impl::MaxThreads]; + bool branchTaken[Impl::MaxThreads]; + uint64_t mispredPC[Impl::MaxThreads]; + uint64_t nextPC[Impl::MaxThreads]; + InstSeqNum squashedSeqNum[Impl::MaxThreads]; + + bool includeSquashInst[Impl::MaxThreads]; +}; + +template +struct IssueStruct { + typedef typename Impl::DynInstPtr DynInstPtr; + + int size; + + DynInstPtr insts[Impl::MaxWidth]; +}; + ++/** Struct that defines all backwards communication. */ +template +struct TimeBufStruct { + struct decodeComm { + bool squash; + bool predIncorrect; + uint64_t branchAddr; + + InstSeqNum doneSeqNum; + + // @todo: Might want to package this kind of branch stuff into a single + // struct as it is used pretty frequently. + bool branchMispredict; + bool branchTaken; + uint64_t mispredPC; + uint64_t nextPC; + + unsigned branchCount; + }; + + decodeComm decodeInfo[Impl::MaxThreads]; + - // Rename can't actually tell anything to squash or send a new PC back - // because it doesn't do anything along those lines. But maybe leave - // these fields in here to keep the stages mostly orthagonal. + struct renameComm { - bool squash; - - uint64_t nextPC; + }; + + renameComm renameInfo[Impl::MaxThreads]; + + struct iewComm { + // Also eventually include skid buffer space. + bool usedIQ; + unsigned freeIQEntries; + bool usedLSQ; + unsigned freeLSQEntries; + + unsigned iqCount; + unsigned ldstqCount; + + unsigned dispatched; + unsigned dispatchedToLSQ; + }; + + iewComm iewInfo[Impl::MaxThreads]; + + struct commitComm { + bool usedROB; + unsigned freeROBEntries; + bool emptyROB; + + bool squash; + bool robSquashing; + + bool branchMispredict; + bool branchTaken; + uint64_t mispredPC; + uint64_t nextPC; + + // Represents the instruction that has either been retired or + // squashed. Similar to having a single bus that broadcasts the + // retired or squashed sequence number. + InstSeqNum doneSeqNum; + + //Just in case we want to do a commit/squash on a cycle + //(necessary for multiple ROBs?) + bool commitInsts; + InstSeqNum squashSeqNum; + + // Communication specifically to the IQ to tell the IQ that it can + // schedule a non-speculative instruction. + InstSeqNum nonSpecSeqNum; + + // Hack for now to send back an uncached access to the IEW stage. + typedef typename Impl::DynInstPtr DynInstPtr; + bool uncached; + DynInstPtr uncachedLoad; + + bool interruptPending; + bool clearInterrupt; + }; + + commitComm commitInfo[Impl::MaxThreads]; + + bool decodeBlock[Impl::MaxThreads]; + bool decodeUnblock[Impl::MaxThreads]; + bool renameBlock[Impl::MaxThreads]; + bool renameUnblock[Impl::MaxThreads]; + bool iewBlock[Impl::MaxThreads]; + bool iewUnblock[Impl::MaxThreads]; + bool commitBlock[Impl::MaxThreads]; + bool commitUnblock[Impl::MaxThreads]; +}; + +#endif //__CPU_O3_COMM_HH__ diff --cc src/cpu/o3/commit.hh index ae2aa2996,000000000..eef96b5fd mode 100644,000000..100644 --- a/src/cpu/o3/commit.hh +++ b/src/cpu/o3/commit.hh @@@ -1,422 -1,0 +1,454 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_COMMIT_HH__ +#define __CPU_O3_COMMIT_HH__ + +#include "arch/faults.hh" +#include "base/statistics.hh" +#include "base/timebuf.hh" +#include "cpu/exetrace.hh" +#include "cpu/inst_seq.hh" + +template +class O3ThreadState; + +/** + * DefaultCommit handles single threaded and SMT commit. Its width is + * specified by the parameters; each cycle it tries to commit that + * many instructions. The SMT policy decides which thread it tries to + * commit instructions from. Non- speculative instructions must reach + * the head of the ROB before they are ready to execute; once they + * reach the head, commit will broadcast the instruction's sequence + * number to the previous stages so that they can issue/ execute the + * instruction. Only one non-speculative instruction is handled per + * cycle. Commit is responsible for handling all back-end initiated + * redirects. It receives the redirect, and then broadcasts it to all + * stages, indicating the sequence number they should squash until, + * and any necessary branch misprediction information as well. It + * priortizes redirects by instruction's age, only broadcasting a + * redirect if it corresponds to an instruction that should currently + * be in the ROB. This is done by tracking the sequence number of the + * youngest instruction in the ROB, which gets updated to any + * squashing instruction's sequence number, and only broadcasting a + * redirect if it corresponds to an older instruction. Commit also + * supports multiple cycle squashing, to model a ROB that can only + * remove a certain number of instructions per cycle. + */ +template +class DefaultCommit +{ + public: + // Typedefs from the Impl. + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::Params Params; + typedef typename Impl::CPUPol CPUPol; + + typedef typename CPUPol::RenameMap RenameMap; + typedef typename CPUPol::ROB ROB; + + typedef typename CPUPol::TimeStruct TimeStruct; + typedef typename CPUPol::FetchStruct FetchStruct; + typedef typename CPUPol::IEWStruct IEWStruct; + typedef typename CPUPol::RenameStruct RenameStruct; + + typedef typename CPUPol::Fetch Fetch; + typedef typename CPUPol::IEW IEW; + + typedef O3ThreadState Thread; + ++ /** Event class used to schedule a squash due to a trap (fault or ++ * interrupt) to happen on a specific cycle. ++ */ + class TrapEvent : public Event { + private: + DefaultCommit *commit; + unsigned tid; + + public: + TrapEvent(DefaultCommit *_commit, unsigned _tid); + + void process(); + const char *description(); + }; + + /** Overall commit status. Used to determine if the CPU can deschedule + * itself due to a lack of activity. + */ + enum CommitStatus{ + Active, + Inactive + }; + + /** Individual thread status. */ + enum ThreadStatus { + Running, + Idle, + ROBSquashing, + TrapPending, + FetchTrapPending + }; + + /** Commit policy for SMT mode. */ + enum CommitPolicy { + Aggressive, + RoundRobin, + OldestReady + }; + + private: + /** Overall commit status. */ + CommitStatus _status; + /** Next commit status, to be set at the end of the cycle. */ + CommitStatus _nextStatus; + /** Per-thread status. */ + ThreadStatus commitStatus[Impl::MaxThreads]; + /** Commit policy used in SMT mode. */ + CommitPolicy commitPolicy; + + public: + /** Construct a DefaultCommit with the given parameters. */ + DefaultCommit(Params *params); + + /** Returns the name of the DefaultCommit. */ + std::string name() const; + + /** Registers statistics. */ + void regStats(); + + /** Sets the CPU pointer. */ + void setCPU(FullCPU *cpu_ptr); + + /** Sets the list of threads. */ + void setThreads(std::vector &threads); + + /** Sets the main time buffer pointer, used for backwards communication. */ + void setTimeBuffer(TimeBuffer *tb_ptr); + + void setFetchQueue(TimeBuffer *fq_ptr); + + /** Sets the pointer to the queue coming from rename. */ + void setRenameQueue(TimeBuffer *rq_ptr); + + /** Sets the pointer to the queue coming from IEW. */ + void setIEWQueue(TimeBuffer *iq_ptr); + + void setFetchStage(Fetch *fetch_stage); + + Fetch *fetchStage; + - /** Sets the poitner to the IEW stage. */ ++ /** Sets the pointer to the IEW stage. */ + void setIEWStage(IEW *iew_stage); + + /** The pointer to the IEW stage. Used solely to ensure that + * various events (traps, interrupts, syscalls) do not occur until + * all stores have written back. + */ + IEW *iewStage; + + /** Sets pointer to list of active threads. */ + void setActiveThreads(std::list *at_ptr); + + /** Sets pointer to the commited state rename map. */ + void setRenameMap(RenameMap rm_ptr[Impl::MaxThreads]); + + /** Sets pointer to the ROB. */ + void setROB(ROB *rob_ptr); + + /** Initializes stage by sending back the number of free entries. */ + void initStage(); + ++ /** Initializes the switching out of commit. */ + void switchOut(); + ++ /** Completes the switch out of commit. */ + void doSwitchOut(); + ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); + + /** Ticks the commit stage, which tries to commit instructions. */ + void tick(); + + /** Handles any squashes that are sent from IEW, and adds instructions + * to the ROB and tries to commit instructions. + */ + void commit(); + + /** Returns the number of free ROB entries for a specific thread. */ + unsigned numROBFreeEntries(unsigned tid); + ++ /** Generates an event to schedule a squash due to a trap. */ ++ void generateTrapEvent(unsigned tid); ++ ++ /** Records that commit needs to initiate a squash due to an ++ * external state update through the XC. ++ */ + void generateXCEvent(unsigned tid); + + private: + /** Updates the overall status of commit with the nextStatus, and - * tell the CPU if commit is active/inactive. */ ++ * tell the CPU if commit is active/inactive. ++ */ + void updateStatus(); + + /** Sets the next status based on threads' statuses, which becomes the + * current status at the end of the cycle. + */ + void setNextStatus(); + + /** Checks if the ROB is completed with squashing. This is for the case + * where the ROB can take multiple cycles to complete squashing. + */ + bool robDoneSquashing(); + + /** Returns if any of the threads have the number of ROB entries changed + * on this cycle. Used to determine if the number of free ROB entries needs + * to be sent back to previous stages. + */ + bool changedROBEntries(); + ++ /** Squashes all in flight instructions. */ + void squashAll(unsigned tid); + ++ /** Handles squashing due to a trap. */ + void squashFromTrap(unsigned tid); + ++ /** Handles squashing due to an XC write. */ + void squashFromXC(unsigned tid); + + /** Commits as many instructions as possible. */ + void commitInsts(); + + /** Tries to commit the head ROB instruction passed in. + * @param head_inst The instruction to be committed. + */ + bool commitHead(DynInstPtr &head_inst, unsigned inst_num); + - void generateTrapEvent(unsigned tid); - + /** Gets instructions from rename and inserts them into the ROB. */ + void getInsts(); + + /** Marks completed instructions using information sent from IEW. */ + void markCompletedInsts(); + + /** Gets the thread to commit, based on the SMT policy. */ + int getCommittingThread(); + + /** Returns the thread ID to use based on a round robin policy. */ + int roundRobin(); + + /** Returns the thread ID to use based on an oldest instruction policy. */ + int oldestReady(); + + public: + /** Returns the PC of the head instruction of the ROB. + * @todo: Probably remove this function as it returns only thread 0. + */ + uint64_t readPC() { return PC[0]; } + ++ /** Returns the PC of a specific thread. */ + uint64_t readPC(unsigned tid) { return PC[tid]; } + ++ /** Sets the PC of a specific thread. */ + void setPC(uint64_t val, unsigned tid) { PC[tid] = val; } + ++ /** Reads the PC of a specific thread. */ + uint64_t readNextPC(unsigned tid) { return nextPC[tid]; } + ++ /** Sets the next PC of a specific thread. */ + void setNextPC(uint64_t val, unsigned tid) { nextPC[tid] = val; } + + private: + /** Time buffer interface. */ + TimeBuffer *timeBuffer; + + /** Wire to write information heading to previous stages. */ + typename TimeBuffer::wire toIEW; + + /** Wire to read information from IEW (for ROB). */ + typename TimeBuffer::wire robInfoFromIEW; + + TimeBuffer *fetchQueue; + + typename TimeBuffer::wire fromFetch; + + /** IEW instruction queue interface. */ + TimeBuffer *iewQueue; + + /** Wire to read information from IEW queue. */ + typename TimeBuffer::wire fromIEW; + + /** Rename instruction queue interface, for ROB. */ + TimeBuffer *renameQueue; + + /** Wire to read information from rename queue. */ + typename TimeBuffer::wire fromRename; + + public: + /** ROB interface. */ + ROB *rob; + + private: + /** Pointer to FullCPU. */ + FullCPU *cpu; + ++ /** Vector of all of the threads. */ + std::vector thread; + + Fault fetchFault; + + int fetchTrapWait; + + /** Records that commit has written to the time buffer this cycle. Used for + * the CPU to determine if it can deschedule itself if there is no activity. + */ + bool wroteToTimeBuffer; + + /** Records if the number of ROB entries has changed this cycle. If it has, + * then the number of free entries must be re-broadcast. + */ + bool changedROBNumEntries[Impl::MaxThreads]; + + /** A counter of how many threads are currently squashing. */ + int squashCounter; + + /** Records if a thread has to squash this cycle due to a trap. */ + bool trapSquash[Impl::MaxThreads]; + + /** Records if a thread has to squash this cycle due to an XC write. */ + bool xcSquash[Impl::MaxThreads]; + + /** Priority List used for Commit Policy */ + std::list priority_list; + + /** IEW to Commit delay, in ticks. */ + unsigned iewToCommitDelay; + + /** Commit to IEW delay, in ticks. */ + unsigned commitToIEWDelay; + + /** Rename to ROB delay, in ticks. */ + unsigned renameToROBDelay; + + unsigned fetchToCommitDelay; + + /** Rename width, in instructions. Used so ROB knows how many + * instructions to get from the rename instruction queue. + */ + unsigned renameWidth; + + /** IEW width, in instructions. Used so ROB knows how many + * instructions to get from the IEW instruction queue. + */ + unsigned iewWidth; + + /** Commit width, in instructions. */ + unsigned commitWidth; + + /** Number of Reorder Buffers */ + unsigned numRobs; + + /** Number of Active Threads */ + unsigned numThreads; + ++ /** Is a switch out pending. */ + bool switchPending; ++ ++ /** Is commit switched out. */ + bool switchedOut; + ++ /** The latency to handle a trap. Used when scheduling trap ++ * squash event. ++ */ + Tick trapLatency; + + Tick fetchTrapLatency; + + Tick fetchFaultTick; + ++ /** The commit PC of each thread. Refers to the instruction that ++ * is currently being processed/committed. ++ */ + Addr PC[Impl::MaxThreads]; + ++ /** The next PC of each thread. */ + Addr nextPC[Impl::MaxThreads]; + + /** The sequence number of the youngest valid instruction in the ROB. */ + InstSeqNum youngestSeqNum[Impl::MaxThreads]; + + /** Pointer to the list of active threads. */ + std::list *activeThreads; + + /** Rename map interface. */ + RenameMap *renameMap[Impl::MaxThreads]; + ++ /** Updates commit stats based on this instruction. */ + void updateComInstStats(DynInstPtr &inst); + + /** Stat for the total number of committed instructions. */ + Stats::Scalar<> commitCommittedInsts; + /** Stat for the total number of squashed instructions discarded by commit. + */ + Stats::Scalar<> commitSquashedInsts; + /** Stat for the total number of times commit is told to squash. + * @todo: Actually increment this stat. + */ + Stats::Scalar<> commitSquashEvents; + /** Stat for the total number of times commit has had to stall due to a non- + * speculative instruction reaching the head of the ROB. + */ + Stats::Scalar<> commitNonSpecStalls; + /** Stat for the total number of branch mispredicts that caused a squash. */ + Stats::Scalar<> branchMispredicts; + /** Distribution of the number of committed instructions each cycle. */ + Stats::Distribution<> numCommittedDist; + + /** Total number of instructions committed. */ + Stats::Vector<> statComInst; + /** Total number of software prefetches committed. */ + Stats::Vector<> statComSwp; + /** Stat for the total number of committed memory references. */ + Stats::Vector<> statComRefs; + /** Stat for the total number of committed loads. */ + Stats::Vector<> statComLoads; + /** Total number of committed memory barriers. */ + Stats::Vector<> statComMembars; + /** Total number of committed branches. */ + Stats::Vector<> statComBranches; + ++ /** Number of cycles where the commit bandwidth limit is reached. */ + Stats::Scalar<> commitEligibleSamples; ++ /** Number of instructions not committed due to bandwidth limits. */ + Stats::Vector<> commitEligible; +}; + +#endif // __CPU_O3_COMMIT_HH__ diff --cc src/cpu/o3/commit_impl.hh index 9efe30d24,000000000..f8a252b87 mode 100644,000000..100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@@ -1,1308 -1,0 +1,1309 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include +#include + +#include "base/loader/symtab.hh" +#include "base/timebuf.hh" +#include "cpu/checker/cpu.hh" +#include "cpu/exetrace.hh" +#include "cpu/o3/commit.hh" +#include "cpu/o3/thread_state.hh" + +using namespace std; + +template +DefaultCommit::TrapEvent::TrapEvent(DefaultCommit *_commit, + unsigned _tid) + : Event(&mainEventQueue, CPU_Tick_Pri), commit(_commit), tid(_tid) +{ + this->setFlags(Event::AutoDelete); +} + +template +void +DefaultCommit::TrapEvent::process() +{ + // This will get reset by commit if it was switched out at the + // time of this event processing. + commit->trapSquash[tid] = true; +} + +template +const char * +DefaultCommit::TrapEvent::description() +{ + return "Trap event"; +} + +template +DefaultCommit::DefaultCommit(Params *params) + : squashCounter(0), + iewToCommitDelay(params->iewToCommitDelay), + commitToIEWDelay(params->commitToIEWDelay), + renameToROBDelay(params->renameToROBDelay), + fetchToCommitDelay(params->commitToFetchDelay), + renameWidth(params->renameWidth), + iewWidth(params->executeWidth), + commitWidth(params->commitWidth), + numThreads(params->numberOfThreads), + switchedOut(false), + trapLatency(params->trapLatency), + fetchTrapLatency(params->fetchTrapLatency) +{ + _status = Active; + _nextStatus = Inactive; + string policy = params->smtCommitPolicy; + + //Convert string to lowercase + std::transform(policy.begin(), policy.end(), policy.begin(), + (int(*)(int)) tolower); + + //Assign commit policy + if (policy == "aggressive"){ + commitPolicy = Aggressive; + + DPRINTF(Commit,"Commit Policy set to Aggressive."); + } else if (policy == "roundrobin"){ + commitPolicy = RoundRobin; + + //Set-Up Priority List + for (int tid=0; tid < numThreads; tid++) { + priority_list.push_back(tid); + } + + DPRINTF(Commit,"Commit Policy set to Round Robin."); + } else if (policy == "oldestready"){ + commitPolicy = OldestReady; + + DPRINTF(Commit,"Commit Policy set to Oldest Ready."); + } else { + assert(0 && "Invalid SMT Commit Policy. Options Are: {Aggressive," + "RoundRobin,OldestReady}"); + } + + for (int i=0; i < numThreads; i++) { + commitStatus[i] = Idle; + changedROBNumEntries[i] = false; + trapSquash[i] = false; + xcSquash[i] = false; + } + + fetchFaultTick = 0; + fetchTrapWait = 0; +} + +template +std::string +DefaultCommit::name() const +{ + return cpu->name() + ".commit"; +} + +template +void +DefaultCommit::regStats() +{ + using namespace Stats; + commitCommittedInsts + .name(name() + ".commitCommittedInsts") + .desc("The number of committed instructions") + .prereq(commitCommittedInsts); + commitSquashedInsts + .name(name() + ".commitSquashedInsts") + .desc("The number of squashed insts skipped by commit") + .prereq(commitSquashedInsts); + commitSquashEvents + .name(name() + ".commitSquashEvents") + .desc("The number of times commit is told to squash") + .prereq(commitSquashEvents); + commitNonSpecStalls + .name(name() + ".commitNonSpecStalls") + .desc("The number of times commit has been forced to stall to " + "communicate backwards") + .prereq(commitNonSpecStalls); + branchMispredicts + .name(name() + ".branchMispredicts") + .desc("The number of times a branch was mispredicted") + .prereq(branchMispredicts); + numCommittedDist + .init(0,commitWidth,1) + .name(name() + ".COM:committed_per_cycle") + .desc("Number of insts commited each cycle") + .flags(Stats::pdf) + ; + + statComInst + .init(cpu->number_of_threads) + .name(name() + ".COM:count") + .desc("Number of instructions committed") + .flags(total) + ; + + statComSwp + .init(cpu->number_of_threads) + .name(name() + ".COM:swp_count") + .desc("Number of s/w prefetches committed") + .flags(total) + ; + + statComRefs + .init(cpu->number_of_threads) + .name(name() + ".COM:refs") + .desc("Number of memory references committed") + .flags(total) + ; + + statComLoads + .init(cpu->number_of_threads) + .name(name() + ".COM:loads") + .desc("Number of loads committed") + .flags(total) + ; + + statComMembars + .init(cpu->number_of_threads) + .name(name() + ".COM:membars") + .desc("Number of memory barriers committed") + .flags(total) + ; + + statComBranches + .init(cpu->number_of_threads) + .name(name() + ".COM:branches") + .desc("Number of branches committed") + .flags(total) + ; + + // + // Commit-Eligible instructions... + // + // -> The number of instructions eligible to commit in those + // cycles where we reached our commit BW limit (less the number + // actually committed) + // + // -> The average value is computed over ALL CYCLES... not just + // the BW limited cycles + // + // -> The standard deviation is computed only over cycles where + // we reached the BW limit + // + commitEligible + .init(cpu->number_of_threads) + .name(name() + ".COM:bw_limited") + .desc("number of insts not committed due to BW limits") + .flags(total) + ; + + commitEligibleSamples + .name(name() + ".COM:bw_lim_events") + .desc("number cycles where commit BW limit reached") + ; +} + +template +void +DefaultCommit::setCPU(FullCPU *cpu_ptr) +{ + DPRINTF(Commit, "Commit: Setting CPU pointer.\n"); + cpu = cpu_ptr; + + // Commit must broadcast the number of free entries it has at the start of + // the simulation, so it starts as active. + cpu->activateStage(FullCPU::CommitIdx); + + trapLatency = cpu->cycles(trapLatency); + fetchTrapLatency = cpu->cycles(fetchTrapLatency); +} + +template +void +DefaultCommit::setThreads(vector &threads) +{ + thread = threads; +} + +template +void +DefaultCommit::setTimeBuffer(TimeBuffer *tb_ptr) +{ + DPRINTF(Commit, "Commit: Setting time buffer pointer.\n"); + timeBuffer = tb_ptr; + + // Setup wire to send information back to IEW. + toIEW = timeBuffer->getWire(0); + + // Setup wire to read data from IEW (for the ROB). + robInfoFromIEW = timeBuffer->getWire(-iewToCommitDelay); +} + +template +void +DefaultCommit::setFetchQueue(TimeBuffer *fq_ptr) +{ + DPRINTF(Commit, "Commit: Setting fetch queue pointer.\n"); + fetchQueue = fq_ptr; + + // Setup wire to get instructions from rename (for the ROB). + fromFetch = fetchQueue->getWire(-fetchToCommitDelay); +} + +template +void +DefaultCommit::setRenameQueue(TimeBuffer *rq_ptr) +{ + DPRINTF(Commit, "Commit: Setting rename queue pointer.\n"); + renameQueue = rq_ptr; + + // Setup wire to get instructions from rename (for the ROB). + fromRename = renameQueue->getWire(-renameToROBDelay); +} + +template +void +DefaultCommit::setIEWQueue(TimeBuffer *iq_ptr) +{ + DPRINTF(Commit, "Commit: Setting IEW queue pointer.\n"); + iewQueue = iq_ptr; + + // Setup wire to get instructions from IEW. + fromIEW = iewQueue->getWire(-iewToCommitDelay); +} + +template +void +DefaultCommit::setFetchStage(Fetch *fetch_stage) +{ + fetchStage = fetch_stage; +} + +template +void +DefaultCommit::setIEWStage(IEW *iew_stage) +{ + iewStage = iew_stage; +} + +template +void +DefaultCommit::setActiveThreads(list *at_ptr) +{ + DPRINTF(Commit, "Commit: Setting active threads list pointer.\n"); + activeThreads = at_ptr; +} + +template +void +DefaultCommit::setRenameMap(RenameMap rm_ptr[]) +{ + DPRINTF(Commit, "Setting rename map pointers.\n"); + + for (int i=0; i < numThreads; i++) { + renameMap[i] = &rm_ptr[i]; + } +} + +template +void +DefaultCommit::setROB(ROB *rob_ptr) +{ + DPRINTF(Commit, "Commit: Setting ROB pointer.\n"); + rob = rob_ptr; +} + +template +void +DefaultCommit::initStage() +{ + rob->setActiveThreads(activeThreads); + rob->resetEntries(); + + // Broadcast the number of free entries. + for (int i=0; i < numThreads; i++) { + toIEW->commitInfo[i].usedROB = true; + toIEW->commitInfo[i].freeROBEntries = rob->numFreeEntries(i); + } + + cpu->activityThisCycle(); +} + +template +void +DefaultCommit::switchOut() +{ + switchPending = true; +} + +template +void +DefaultCommit::doSwitchOut() +{ + switchedOut = true; + switchPending = false; + rob->switchOut(); +} + +template +void +DefaultCommit::takeOverFrom() +{ + switchedOut = false; + _status = Active; + _nextStatus = Inactive; + for (int i=0; i < numThreads; i++) { + commitStatus[i] = Idle; + changedROBNumEntries[i] = false; + trapSquash[i] = false; + xcSquash[i] = false; + } + squashCounter = 0; + rob->takeOverFrom(); +} + +template +void +DefaultCommit::updateStatus() +{ + // reset ROB changed variable + list::iterator threads = (*activeThreads).begin(); + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + changedROBNumEntries[tid] = false; + + // Also check if any of the threads has a trap pending + if (commitStatus[tid] == TrapPending || + commitStatus[tid] == FetchTrapPending) { + _nextStatus = Active; + } + } + + if (_nextStatus == Inactive && _status == Active) { + DPRINTF(Activity, "Deactivating stage.\n"); + cpu->deactivateStage(FullCPU::CommitIdx); + } else if (_nextStatus == Active && _status == Inactive) { + DPRINTF(Activity, "Activating stage.\n"); + cpu->activateStage(FullCPU::CommitIdx); + } + + _status = _nextStatus; +} + +template +void +DefaultCommit::setNextStatus() +{ + int squashes = 0; + + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (commitStatus[tid] == ROBSquashing) { + squashes++; + } + } + + assert(squashes == squashCounter); + + // If commit is currently squashing, then it will have activity for the + // next cycle. Set its next status as active. + if (squashCounter) { + _nextStatus = Active; + } +} + +template +bool +DefaultCommit::changedROBEntries() +{ + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (changedROBNumEntries[tid]) { + return true; + } + } + + return false; +} + +template +unsigned +DefaultCommit::numROBFreeEntries(unsigned tid) +{ + return rob->numFreeEntries(tid); +} + +template +void +DefaultCommit::generateTrapEvent(unsigned tid) +{ + DPRINTF(Commit, "Generating trap event for [tid:%i]\n", tid); + + TrapEvent *trap = new TrapEvent(this, tid); + + trap->schedule(curTick + trapLatency); + + thread[tid]->trapPending = true; +} + +template +void +DefaultCommit::generateXCEvent(unsigned tid) +{ + DPRINTF(Commit, "Generating XC squash event for [tid:%i]\n", tid); + + xcSquash[tid] = true; +} + +template +void +DefaultCommit::squashAll(unsigned tid) +{ + // If we want to include the squashing instruction in the squash, + // then use one older sequence number. + // Hopefully this doesn't mess things up. Basically I want to squash + // all instructions of this thread. + InstSeqNum squashed_inst = rob->isEmpty() ? + 0 : rob->readHeadInst(tid)->seqNum - 1;; + + // All younger instructions will be squashed. Set the sequence + // number as the youngest instruction in the ROB (0 in this case. + // Hopefully nothing breaks.) + youngestSeqNum[tid] = 0; + + rob->squash(squashed_inst, tid); + changedROBNumEntries[tid] = true; + + // Send back the sequence number of the squashed instruction. + toIEW->commitInfo[tid].doneSeqNum = squashed_inst; + + // Send back the squash signal to tell stages that they should + // squash. + toIEW->commitInfo[tid].squash = true; + + // Send back the rob squashing signal so other stages know that + // the ROB is in the process of squashing. + toIEW->commitInfo[tid].robSquashing = true; + + toIEW->commitInfo[tid].branchMispredict = false; + + toIEW->commitInfo[tid].nextPC = PC[tid]; +} + +template +void +DefaultCommit::squashFromTrap(unsigned tid) +{ + squashAll(tid); + + DPRINTF(Commit, "Squashing from trap, restarting at PC %#x\n", PC[tid]); + + thread[tid]->trapPending = false; + thread[tid]->inSyscall = false; + + trapSquash[tid] = false; + + commitStatus[tid] = ROBSquashing; + cpu->activityThisCycle(); + + ++squashCounter; +} + +template +void +DefaultCommit::squashFromXC(unsigned tid) +{ + squashAll(tid); + + DPRINTF(Commit, "Squashing from XC, restarting at PC %#x\n", PC[tid]); + + thread[tid]->inSyscall = false; + assert(!thread[tid]->trapPending); + + commitStatus[tid] = ROBSquashing; + cpu->activityThisCycle(); + + xcSquash[tid] = false; + + ++squashCounter; +} + +template +void +DefaultCommit::tick() +{ + wroteToTimeBuffer = false; + _nextStatus = Inactive; + + if (switchPending && rob->isEmpty() && !iewStage->hasStoresToWB()) { + cpu->signalSwitched(); + return; + } + + list::iterator threads = (*activeThreads).begin(); + + // Check if any of the threads are done squashing. Change the + // status if they are done. + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (commitStatus[tid] == ROBSquashing) { + + if (rob->isDoneSquashing(tid)) { + commitStatus[tid] = Running; + --squashCounter; + } else { + DPRINTF(Commit,"[tid:%u]: Still Squashing, cannot commit any" + "insts this cycle.\n", tid); + } + } + } + + commit(); + + markCompletedInsts(); + + threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (!rob->isEmpty(tid) && rob->readHeadInst(tid)->readyToCommit()) { + // The ROB has more instructions it can commit. Its next status + // will be active. + _nextStatus = Active; + + DynInstPtr inst = rob->readHeadInst(tid); + + DPRINTF(Commit,"[tid:%i]: Instruction [sn:%lli] PC %#x is head of" + " ROB and ready to commit\n", + tid, inst->seqNum, inst->readPC()); + + } else if (!rob->isEmpty(tid)) { + DynInstPtr inst = rob->readHeadInst(tid); + + DPRINTF(Commit,"[tid:%i]: Can't commit, Instruction [sn:%lli] PC " + "%#x is head of ROB and not ready\n", + tid, inst->seqNum, inst->readPC()); + } + + DPRINTF(Commit, "[tid:%i]: ROB has %d insts & %d free entries.\n", + tid, rob->countInsts(tid), rob->numFreeEntries(tid)); + } + + + if (wroteToTimeBuffer) { + DPRINTF(Activity, "Activity This Cycle.\n"); + cpu->activityThisCycle(); + } + + updateStatus(); +} + +template +void +DefaultCommit::commit() +{ + + ////////////////////////////////////// + // Check for interrupts + ////////////////////////////////////// + +#if FULL_SYSTEM + // Process interrupts if interrupts are enabled, not in PAL mode, + // and no other traps or external squashes are currently pending. + // @todo: Allow other threads to handle interrupts. + if (cpu->checkInterrupts && + cpu->check_interrupts() && + !cpu->inPalMode(readPC()) && + !trapSquash[0] && + !xcSquash[0]) { + // Tell fetch that there is an interrupt pending. This will + // make fetch wait until it sees a non PAL-mode PC, at which + // point it stops fetching instructions. + toIEW->commitInfo[0].interruptPending = true; + + // Wait until the ROB is empty and all stores have drained in + // order to enter the interrupt. + if (rob->isEmpty() && !iewStage->hasStoresToWB()) { + // Not sure which thread should be the one to interrupt. For now + // always do thread 0. + assert(!thread[0]->inSyscall); + thread[0]->inSyscall = true; + + // CPU will handle implementation of the interrupt. + cpu->processInterrupts(); + + // Now squash or record that I need to squash this cycle. + commitStatus[0] = TrapPending; + + // Exit state update mode to avoid accidental updating. + thread[0]->inSyscall = false; + + // Generate trap squash event. + generateTrapEvent(0); + + toIEW->commitInfo[0].clearInterrupt = true; + + DPRINTF(Commit, "Interrupt detected.\n"); + } else { + DPRINTF(Commit, "Interrupt pending, waiting for ROB to empty.\n"); + } + } +#endif // FULL_SYSTEM + + //////////////////////////////////// + // Check for any possible squashes, handle them first + //////////////////////////////////// + + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; - ++/* + if (fromFetch->fetchFault && commitStatus[0] != TrapPending) { + // Record the fault. Wait until it's empty in the ROB. + // Then handle the trap. Ignore it if there's already a + // trap pending as fetch will be redirected. + fetchFault = fromFetch->fetchFault; + fetchFaultTick = curTick + fetchTrapLatency; + commitStatus[0] = FetchTrapPending; + DPRINTF(Commit, "Fault from fetch recorded. Will trap if the " + "ROB empties without squashing the fault.\n"); + fetchTrapWait = 0; + } + + // Fetch may tell commit to clear the trap if it's been squashed. + if (fromFetch->clearFetchFault) { + DPRINTF(Commit, "Received clear fetch fault signal\n"); + fetchTrapWait = 0; + if (commitStatus[0] == FetchTrapPending) { + DPRINTF(Commit, "Clearing fault from fetch\n"); + commitStatus[0] = Running; + } + } - ++*/ + // Not sure which one takes priority. I think if we have + // both, that's a bad sign. + if (trapSquash[tid] == true) { + assert(!xcSquash[tid]); + squashFromTrap(tid); + } else if (xcSquash[tid] == true) { + squashFromXC(tid); + } + + // Squashed sequence number must be older than youngest valid + // instruction in the ROB. This prevents squashes from younger + // instructions overriding squashes from older instructions. + if (fromIEW->squash[tid] && + commitStatus[tid] != TrapPending && + fromIEW->squashedSeqNum[tid] <= youngestSeqNum[tid]) { + + DPRINTF(Commit, "[tid:%i]: Squashing due to PC %#x [sn:%i]\n", + tid, + fromIEW->mispredPC[tid], + fromIEW->squashedSeqNum[tid]); + + DPRINTF(Commit, "[tid:%i]: Redirecting to PC %#x\n", + tid, + fromIEW->nextPC[tid]); + + commitStatus[tid] = ROBSquashing; + + ++squashCounter; + + // If we want to include the squashing instruction in the squash, + // then use one older sequence number. + InstSeqNum squashed_inst = fromIEW->squashedSeqNum[tid]; + + if (fromIEW->includeSquashInst[tid] == true) + squashed_inst--; + + // All younger instructions will be squashed. Set the sequence + // number as the youngest instruction in the ROB. + youngestSeqNum[tid] = squashed_inst; + + rob->squash(squashed_inst, tid); + changedROBNumEntries[tid] = true; + + toIEW->commitInfo[tid].doneSeqNum = squashed_inst; + + toIEW->commitInfo[tid].squash = true; + + // Send back the rob squashing signal so other stages know that + // the ROB is in the process of squashing. + toIEW->commitInfo[tid].robSquashing = true; + + toIEW->commitInfo[tid].branchMispredict = + fromIEW->branchMispredict[tid]; + + toIEW->commitInfo[tid].branchTaken = + fromIEW->branchTaken[tid]; + + toIEW->commitInfo[tid].nextPC = fromIEW->nextPC[tid]; + + toIEW->commitInfo[tid].mispredPC = fromIEW->mispredPC[tid]; + + if (toIEW->commitInfo[tid].branchMispredict) { + ++branchMispredicts; + } + } + + } + + setNextStatus(); + + if (squashCounter != numThreads) { + // If we're not currently squashing, then get instructions. + getInsts(); + + // Try to commit any instructions. + commitInsts(); + } + + //Check for any activity + threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (changedROBNumEntries[tid]) { + toIEW->commitInfo[tid].usedROB = true; + toIEW->commitInfo[tid].freeROBEntries = rob->numFreeEntries(tid); + + if (rob->isEmpty(tid)) { + toIEW->commitInfo[tid].emptyROB = true; + } + + wroteToTimeBuffer = true; + changedROBNumEntries[tid] = false; + } + } +} + +template +void +DefaultCommit::commitInsts() +{ + //////////////////////////////////// + // Handle commit + // Note that commit will be handled prior to putting new + // instructions in the ROB so that the ROB only tries to commit + // instructions it has in this current cycle, and not instructions + // it is writing in during this cycle. Can't commit and squash + // things at the same time... + //////////////////////////////////// + + DPRINTF(Commit, "Trying to commit instructions in the ROB.\n"); + + unsigned num_committed = 0; + + DynInstPtr head_inst; + + // Commit as many instructions as possible until the commit bandwidth + // limit is reached, or it becomes impossible to commit any more. + while (num_committed < commitWidth) { + int commit_thread = getCommittingThread(); + + if (commit_thread == -1 || !rob->isHeadReady(commit_thread)) + break; + + head_inst = rob->readHeadInst(commit_thread); + + int tid = head_inst->threadNumber; + + assert(tid == commit_thread); + + DPRINTF(Commit, "Trying to commit head instruction, [sn:%i] [tid:%i]\n", + head_inst->seqNum, tid); + + // If the head instruction is squashed, it is ready to retire + // (be removed from the ROB) at any time. + if (head_inst->isSquashed()) { + + DPRINTF(Commit, "Retiring squashed instruction from " + "ROB.\n"); + + rob->retireHead(commit_thread); + + ++commitSquashedInsts; + + // Record that the number of ROB entries has changed. + changedROBNumEntries[tid] = true; + } else { + PC[tid] = head_inst->readPC(); + nextPC[tid] = head_inst->readNextPC(); + + // Increment the total number of non-speculative instructions + // executed. + // Hack for now: it really shouldn't happen until after the + // commit is deemed to be successful, but this count is needed + // for syscalls. + thread[tid]->funcExeInst++; + + // Try to commit the head instruction. + bool commit_success = commitHead(head_inst, num_committed); + + if (commit_success) { + ++num_committed; + + changedROBNumEntries[tid] = true; + + // Set the doneSeqNum to the youngest committed instruction. + toIEW->commitInfo[tid].doneSeqNum = head_inst->seqNum; + + ++commitCommittedInsts; + + // To match the old model, don't count nops and instruction + // prefetches towards the total commit count. + if (!head_inst->isNop() && !head_inst->isInstPrefetch()) { + cpu->instDone(tid); + } + + PC[tid] = nextPC[tid]; + nextPC[tid] = nextPC[tid] + sizeof(TheISA::MachInst); +#if FULL_SYSTEM + int count = 0; + Addr oldpc; + do { + // Debug statement. Checks to make sure we're not + // currently updating state while handling PC events. + if (count == 0) + assert(!thread[tid]->inSyscall && + !thread[tid]->trapPending); + oldpc = PC[tid]; + cpu->system->pcEventQueue.service( + thread[tid]->getXCProxy()); + count++; + } while (oldpc != PC[tid]); + if (count > 1) { + DPRINTF(Commit, "PC skip function event, stopping commit\n"); + break; + } +#endif + } else { + DPRINTF(Commit, "Unable to commit head instruction PC:%#x " + "[tid:%i] [sn:%i].\n", + head_inst->readPC(), tid ,head_inst->seqNum); + break; + } + } + } + + DPRINTF(CommitRate, "%i\n", num_committed); + numCommittedDist.sample(num_committed); + + if (num_committed == commitWidth) { - commitEligible[0]++; ++ commitEligibleSamples++; + } +} + +template +bool +DefaultCommit::commitHead(DynInstPtr &head_inst, unsigned inst_num) +{ + assert(head_inst); + + int tid = head_inst->threadNumber; + + // If the instruction is not executed yet, then it will need extra + // handling. Signal backwards that it should be executed. + if (!head_inst->isExecuted()) { + // Keep this number correct. We have not yet actually executed + // and committed this instruction. + thread[tid]->funcExeInst--; + + head_inst->reachedCommit = true; + + if (head_inst->isNonSpeculative() || ++ head_inst->isStoreConditional() || + head_inst->isMemBarrier() || + head_inst->isWriteBarrier()) { + + DPRINTF(Commit, "Encountered a barrier or non-speculative " + "instruction [sn:%lli] at the head of the ROB, PC %#x.\n", + head_inst->seqNum, head_inst->readPC()); + +#if !FULL_SYSTEM + // Hack to make sure syscalls/memory barriers/quiesces + // aren't executed until all stores write back their data. + // This direct communication shouldn't be used for + // anything other than this. + if (inst_num > 0 || iewStage->hasStoresToWB()) +#else + if ((head_inst->isMemBarrier() || head_inst->isWriteBarrier() || + head_inst->isQuiesce()) && + iewStage->hasStoresToWB()) +#endif + { + DPRINTF(Commit, "Waiting for all stores to writeback.\n"); + return false; + } + + toIEW->commitInfo[tid].nonSpecSeqNum = head_inst->seqNum; + + // Change the instruction so it won't try to commit again until + // it is executed. + head_inst->clearCanCommit(); + + ++commitNonSpecStalls; + + return false; + } else if (head_inst->isLoad()) { + DPRINTF(Commit, "[sn:%lli]: Uncached load, PC %#x.\n", + head_inst->seqNum, head_inst->readPC()); + + // Send back the non-speculative instruction's sequence + // number. Tell the lsq to re-execute the load. + toIEW->commitInfo[tid].nonSpecSeqNum = head_inst->seqNum; + toIEW->commitInfo[tid].uncached = true; + toIEW->commitInfo[tid].uncachedLoad = head_inst; + + head_inst->clearCanCommit(); + + return false; + } else { + panic("Trying to commit un-executed instruction " + "of unknown type!\n"); + } + } + + if (head_inst->isThreadSync()) { + // Not handled for now. + panic("Thread sync instructions are not handled yet.\n"); + } + + // Stores mark themselves as completed. + if (!head_inst->isStore()) { + head_inst->setCompleted(); + } + + // Use checker prior to updating anything due to traps or PC + // based events. + if (cpu->checker) { + cpu->checker->tick(head_inst); + } + + // Check if the instruction caused a fault. If so, trap. + Fault inst_fault = head_inst->getFault(); + + if (inst_fault != NoFault) { + head_inst->setCompleted(); +#if FULL_SYSTEM + DPRINTF(Commit, "Inst [sn:%lli] PC %#x has a fault\n", + head_inst->seqNum, head_inst->readPC()); + + if (iewStage->hasStoresToWB() || inst_num > 0) { + DPRINTF(Commit, "Stores outstanding, fault must wait.\n"); + return false; + } + + if (cpu->checker && head_inst->isStore()) { + cpu->checker->tick(head_inst); + } + + assert(!thread[tid]->inSyscall); + + // Mark that we're in state update mode so that the trap's + // execution doesn't generate extra squashes. + thread[tid]->inSyscall = true; + + // DTB will sometimes need the machine instruction for when + // faults happen. So we will set it here, prior to the DTB + // possibly needing it for its fault. + thread[tid]->setInst( + static_cast(head_inst->staticInst->machInst)); + + // Execute the trap. Although it's slightly unrealistic in + // terms of timing (as it doesn't wait for the full timing of + // the trap event to complete before updating state), it's + // needed to update the state as soon as possible. This + // prevents external agents from changing any specific state + // that the trap need. + cpu->trap(inst_fault, tid); + + // Exit state update mode to avoid accidental updating. + thread[tid]->inSyscall = false; + + commitStatus[tid] = TrapPending; + + // Generate trap squash event. + generateTrapEvent(tid); + + return false; +#else // !FULL_SYSTEM + panic("fault (%d) detected @ PC %08p", inst_fault, + head_inst->PC); +#endif // FULL_SYSTEM + } + + updateComInstStats(head_inst); + + if (head_inst->traceData) { + head_inst->traceData->setFetchSeq(head_inst->seqNum); + head_inst->traceData->setCPSeq(thread[tid]->numInst); + head_inst->traceData->finalize(); + head_inst->traceData = NULL; + } + + // Update the commit rename map + for (int i = 0; i < head_inst->numDestRegs(); i++) { + renameMap[tid]->setEntry(head_inst->destRegIdx(i), + head_inst->renamedDestRegIdx(i)); + } + + // Finally clear the head ROB entry. + rob->retireHead(tid); + + // Return true to indicate that we have committed an instruction. + return true; +} + +template +void +DefaultCommit::getInsts() +{ + // Read any renamed instructions and place them into the ROB. + int insts_to_process = min((int)renameWidth, fromRename->size); + + for (int inst_num = 0; inst_num < insts_to_process; ++inst_num) + { + DynInstPtr inst = fromRename->insts[inst_num]; + int tid = inst->threadNumber; + + if (!inst->isSquashed() && + commitStatus[tid] != ROBSquashing) { + changedROBNumEntries[tid] = true; + + DPRINTF(Commit, "Inserting PC %#x [sn:%i] [tid:%i] into ROB.\n", + inst->readPC(), inst->seqNum, tid); + + rob->insertInst(inst); + + assert(rob->getThreadEntries(tid) <= rob->getMaxEntries(tid)); + + youngestSeqNum[tid] = inst->seqNum; + } else { + DPRINTF(Commit, "Instruction PC %#x [sn:%i] [tid:%i] was " + "squashed, skipping.\n", + inst->readPC(), inst->seqNum, tid); + } + } +} + +template +void +DefaultCommit::markCompletedInsts() +{ + // Grab completed insts out of the IEW instruction queue, and mark + // instructions completed within the ROB. + for (int inst_num = 0; + inst_num < fromIEW->size && fromIEW->insts[inst_num]; + ++inst_num) + { + if (!fromIEW->insts[inst_num]->isSquashed()) { + DPRINTF(Commit, "[tid:%i]: Marking PC %#x, [sn:%lli] ready " + "within ROB.\n", + fromIEW->insts[inst_num]->threadNumber, + fromIEW->insts[inst_num]->readPC(), + fromIEW->insts[inst_num]->seqNum); + + // Mark the instruction as ready to commit. + fromIEW->insts[inst_num]->setCanCommit(); + } + } +} + +template +bool +DefaultCommit::robDoneSquashing() +{ + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (!rob->isDoneSquashing(tid)) + return false; + } + + return true; +} + +template +void +DefaultCommit::updateComInstStats(DynInstPtr &inst) +{ + unsigned thread = inst->threadNumber; + + // + // Pick off the software prefetches + // +#ifdef TARGET_ALPHA + if (inst->isDataPrefetch()) { + statComSwp[thread]++; + } else { + statComInst[thread]++; + } +#else + statComInst[thread]++; +#endif + + // + // Control Instructions + // + if (inst->isControl()) + statComBranches[thread]++; + + // + // Memory references + // + if (inst->isMemRef()) { + statComRefs[thread]++; + + if (inst->isLoad()) { + statComLoads[thread]++; + } + } + + if (inst->isMemBarrier()) { + statComMembars[thread]++; + } +} + +//////////////////////////////////////// +// // +// SMT COMMIT POLICY MAINTAINED HERE // +// // +//////////////////////////////////////// +template +int +DefaultCommit::getCommittingThread() +{ + if (numThreads > 1) { + switch (commitPolicy) { + + case Aggressive: + //If Policy is Aggressive, commit will call + //this function multiple times per + //cycle + return oldestReady(); + + case RoundRobin: + return roundRobin(); + + case OldestReady: + return oldestReady(); + + default: + return -1; + } + } else { + int tid = (*activeThreads).front(); + + if (commitStatus[tid] == Running || + commitStatus[tid] == Idle || + commitStatus[tid] == FetchTrapPending) { + return tid; + } else { + return -1; + } + } +} + +template +int +DefaultCommit::roundRobin() +{ + list::iterator pri_iter = priority_list.begin(); + list::iterator end = priority_list.end(); + + while (pri_iter != end) { + unsigned tid = *pri_iter; + + if (commitStatus[tid] == Running || + commitStatus[tid] == Idle) { + + if (rob->isHeadReady(tid)) { + priority_list.erase(pri_iter); + priority_list.push_back(tid); + + return tid; + } + } + + pri_iter++; + } + + return -1; +} + +template +int +DefaultCommit::oldestReady() +{ + unsigned oldest = 0; + bool first = true; + + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (!rob->isEmpty(tid) && + (commitStatus[tid] == Running || + commitStatus[tid] == Idle || + commitStatus[tid] == FetchTrapPending)) { + + if (rob->isHeadReady(tid)) { + + DynInstPtr head_inst = rob->readHeadInst(tid); + + if (first) { + oldest = tid; + first = false; + } else if (head_inst->seqNum < oldest) { + oldest = tid; + } + } + } + } + + if (!first) { + return oldest; + } else { + return -1; + } +} diff --cc src/cpu/o3/cpu.hh index 51fcb1adb,000000000..c2c5289bf mode 100644,000000..100644 --- a/src/cpu/o3/cpu.hh +++ b/src/cpu/o3/cpu.hh @@@ -1,525 -1,0 +1,544 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_CPU_HH__ +#define __CPU_O3_CPU_HH__ + +#include +#include +#include +#include +#include + +#include "arch/isa_traits.hh" +#include "base/statistics.hh" +#include "base/timebuf.hh" +#include "config/full_system.hh" +#include "cpu/activity.hh" +#include "cpu/base.hh" +#include "cpu/cpu_exec_context.hh" +#include "cpu/o3/comm.hh" +#include "cpu/o3/cpu_policy.hh" +#include "cpu/o3/scoreboard.hh" +#include "cpu/o3/thread_state.hh" +#include "sim/process.hh" + +template +class Checker; +class ExecContext; +class MemObject; +class Process; + +class BaseFullCPU : public BaseCPU +{ + //Stuff that's pretty ISA independent will go here. + public: + typedef BaseCPU::Params Params; + + BaseFullCPU(Params *params); + + void regStats(); + + int readCpuId() { return cpu_id; } + + protected: + int cpu_id; +}; + ++/** ++ * FullO3CPU class, has each of the stages (fetch through commit) ++ * within it, as well as all of the time buffers between stages. The ++ * tick() function for the CPU is defined here. ++ */ +template +class FullO3CPU : public BaseFullCPU +{ + public: + typedef TheISA::FloatReg FloatReg; + typedef TheISA::FloatRegBits FloatRegBits; + + // Typedefs from the Impl here. + typedef typename Impl::CPUPol CPUPolicy; + typedef typename Impl::Params Params; + typedef typename Impl::DynInstPtr DynInstPtr; + + typedef O3ThreadState Thread; + + typedef typename std::list::iterator ListIt; + + public: + enum Status { + Running, + Idle, + Halted, + Blocked, + SwitchedOut + }; + + /** Overall CPU status. */ + Status _status; + + private: + class TickEvent : public Event + { + private: + /** Pointer to the CPU. */ + FullO3CPU *cpu; + + public: + /** Constructs a tick event. */ + TickEvent(FullO3CPU *c); + + /** Processes a tick event, calling tick() on the CPU. */ + void process(); + /** Returns the description of the tick event. */ + const char *description(); + }; + + /** The tick event used for scheduling CPU ticks. */ + TickEvent tickEvent; + + /** Schedule tick event, regardless of its current state. */ + void scheduleTickEvent(int delay) + { + if (tickEvent.squashed()) + tickEvent.reschedule(curTick + cycles(delay)); + else if (!tickEvent.scheduled()) + tickEvent.schedule(curTick + cycles(delay)); + } + + /** Unschedule tick event, regardless of its current state. */ + void unscheduleTickEvent() + { + if (tickEvent.scheduled()) + tickEvent.squash(); + } + + public: + /** Constructs a CPU with the given parameters. */ + FullO3CPU(Params *params); + /** Destructor. */ + ~FullO3CPU(); + + /** Registers statistics. */ + void fullCPURegStats(); + + /** Ticks CPU, calling tick() on each stage, and checking the overall + * activity to see if the CPU should deschedule itself. + */ + void tick(); + + /** Initialize the CPU */ + void init(); + + /** Setup CPU to insert a thread's context */ + void insertThread(unsigned tid); + + /** Remove all of a thread's context from CPU */ + void removeThread(unsigned tid); + + /** Count the Total Instructions Committed in the CPU. */ + virtual Counter totalInstructions() const + { + Counter total(0); + + for (int i=0; i < thread.size(); i++) + total += thread[i]->numInst; + + return total; + } + + /** Add Thread to Active Threads List. */ + void activateContext(int tid, int delay); + + /** Remove Thread from Active Threads List */ + void suspendContext(int tid); + + /** Remove Thread from Active Threads List && + * Remove Thread Context from CPU. + */ + void deallocateContext(int tid); + + /** Remove Thread from Active Threads List && + * Remove Thread Context from CPU. + */ + void haltContext(int tid); + + /** Activate a Thread When CPU Resources are Available. */ + void activateWhenReady(int tid); + + /** Add or Remove a Thread Context in the CPU. */ + void doContextSwitch(); + + /** Update The Order In Which We Process Threads. */ + void updateThreadPriority(); + + /** Executes a syscall on this cycle. + * --------------------------------------- + * Note: this is a virtual function. CPU-Specific + * functionality defined in derived classes + */ + virtual void syscall(int tid) { panic("Unimplemented!"); } + - /** Check if there are any system calls pending. */ - void checkSyscalls(); - - /** Switches out this CPU. - */ ++ /** Switches out this CPU. */ + void switchOut(Sampler *sampler); + ++ /** Signals to this CPU that a stage has completed switching out. */ + void signalSwitched(); + - /** Takes over from another CPU. - */ ++ /** Takes over from another CPU. */ + void takeOverFrom(BaseCPU *oldCPU); + + /** Get the current instruction sequence number, and increment it. */ + InstSeqNum getAndIncrementInstSeq() + { return globalSeqNum++; } + +#if FULL_SYSTEM + /** Check if this address is a valid instruction address. */ + bool validInstAddr(Addr addr) { return true; } + + /** Check if this address is a valid data address. */ + bool validDataAddr(Addr addr) { return true; } + + /** Get instruction asid. */ + int getInstAsid(unsigned tid) + { return regFile.miscRegs[tid].getInstAsid(); } + + /** Get data asid. */ + int getDataAsid(unsigned tid) + { return regFile.miscRegs[tid].getDataAsid(); } +#else + /** Get instruction asid. */ + int getInstAsid(unsigned tid) + { return thread[tid]->asid; } + + /** Get data asid. */ + int getDataAsid(unsigned tid) + { return thread[tid]->asid; } + +#endif + - // - // New accessors for new decoder. - // ++ /** Register accessors. Index refers to the physical register index. */ + uint64_t readIntReg(int reg_idx); + + FloatReg readFloatReg(int reg_idx); + + FloatReg readFloatReg(int reg_idx, int width); + + FloatRegBits readFloatRegBits(int reg_idx); + + FloatRegBits readFloatRegBits(int reg_idx, int width); + + void setIntReg(int reg_idx, uint64_t val); + + void setFloatReg(int reg_idx, FloatReg val); + + void setFloatReg(int reg_idx, FloatReg val, int width); + + void setFloatRegBits(int reg_idx, FloatRegBits val); + + void setFloatRegBits(int reg_idx, FloatRegBits val, int width); + + uint64_t readArchIntReg(int reg_idx, unsigned tid); + + float readArchFloatRegSingle(int reg_idx, unsigned tid); + + double readArchFloatRegDouble(int reg_idx, unsigned tid); + + uint64_t readArchFloatRegInt(int reg_idx, unsigned tid); + ++ /** Architectural register accessors. Looks up in the commit ++ * rename table to obtain the true physical index of the ++ * architected register first, then accesses that physical ++ * register. ++ */ + void setArchIntReg(int reg_idx, uint64_t val, unsigned tid); + + void setArchFloatRegSingle(int reg_idx, float val, unsigned tid); + + void setArchFloatRegDouble(int reg_idx, double val, unsigned tid); + + void setArchFloatRegInt(int reg_idx, uint64_t val, unsigned tid); + ++ /** Reads the commit PC of a specific thread. */ + uint64_t readPC(unsigned tid); + - void setPC(Addr new_PC,unsigned tid); ++ /** Sets the commit PC of a specific thread. */ ++ void setPC(Addr new_PC, unsigned tid); + ++ /** Reads the next PC of a specific thread. */ + uint64_t readNextPC(unsigned tid); + - void setNextPC(uint64_t val,unsigned tid); ++ /** Sets the next PC of a specific thread. */ ++ void setNextPC(uint64_t val, unsigned tid); + + /** Function to add instruction onto the head of the list of the + * instructions. Used when new instructions are fetched. + */ + ListIt addInst(DynInstPtr &inst); + + /** Function to tell the CPU that an instruction has completed. */ + void instDone(unsigned tid); + + /** Add Instructions to the CPU Remove List*/ + void addToRemoveList(DynInstPtr &inst); + + /** Remove an instruction from the front end of the list. There's + * no restriction on location of the instruction. + */ + void removeFrontInst(DynInstPtr &inst); + + /** Remove all instructions that are not currently in the ROB. */ + void removeInstsNotInROB(unsigned tid); + + /** Remove all instructions younger than the given sequence number. */ + void removeInstsUntil(const InstSeqNum &seq_num,unsigned tid); + ++ /** Removes the instruction pointed to by the iterator. */ + inline void squashInstIt(const ListIt &instIt, const unsigned &tid); + ++ /** Cleans up all instructions on the remove list. */ + void cleanUpRemovedInsts(); + - /** Remove all instructions from the list. */ - // void removeAllInsts(); - ++ /** Debug function to print all instructions on the list. */ + void dumpInsts(); + - /** Basically a wrapper function so that instructions executed at - * commit can tell the instruction queue that they have - * completed. Eventually this hack should be removed. - */ - // void wakeDependents(DynInstPtr &inst); - + public: + /** List of all the instructions in flight. */ + std::list instList; + + /** List of all the instructions that will be removed at the end of this + * cycle. + */ + std::queue removeList; + +#ifdef DEBUG ++ /** Debug structure to keep track of the sequence numbers still in ++ * flight. ++ */ + std::set snList; +#endif + + /** Records if instructions need to be removed this cycle due to + * being retired or squashed. + */ + bool removeInstsThisCycle; + + protected: + /** The fetch stage. */ + typename CPUPolicy::Fetch fetch; + + /** The decode stage. */ + typename CPUPolicy::Decode decode; + + /** The dispatch stage. */ + typename CPUPolicy::Rename rename; + + /** The issue/execute/writeback stages. */ + typename CPUPolicy::IEW iew; + + /** The commit stage. */ + typename CPUPolicy::Commit commit; + + /** The register file. */ + typename CPUPolicy::RegFile regFile; + + /** The free list. */ + typename CPUPolicy::FreeList freeList; + + /** The rename map. */ + typename CPUPolicy::RenameMap renameMap[Impl::MaxThreads]; + + /** The commit rename map. */ + typename CPUPolicy::RenameMap commitRenameMap[Impl::MaxThreads]; + + /** The re-order buffer. */ + typename CPUPolicy::ROB rob; + + /** Active Threads List */ + std::list activeThreads; + + /** Integer Register Scoreboard */ + Scoreboard scoreboard; + + public: + /** Enum to give each stage a specific index, so when calling + * activateStage() or deactivateStage(), they can specify which stage + * is being activated/deactivated. + */ + enum StageIdx { + FetchIdx, + DecodeIdx, + RenameIdx, + IEWIdx, + CommitIdx, + NumStages }; + + /** Typedefs from the Impl to get the structs that each of the + * time buffers should use. + */ + typedef typename CPUPolicy::TimeStruct TimeStruct; + + typedef typename CPUPolicy::FetchStruct FetchStruct; + + typedef typename CPUPolicy::DecodeStruct DecodeStruct; + + typedef typename CPUPolicy::RenameStruct RenameStruct; + + typedef typename CPUPolicy::IEWStruct IEWStruct; + + /** The main time buffer to do backwards communication. */ + TimeBuffer timeBuffer; + + /** The fetch stage's instruction queue. */ + TimeBuffer fetchQueue; + + /** The decode stage's instruction queue. */ + TimeBuffer decodeQueue; + + /** The rename stage's instruction queue. */ + TimeBuffer renameQueue; + + /** The IEW stage's instruction queue. */ + TimeBuffer iewQueue; + - public: ++ private: ++ /** The activity recorder; used to tell if the CPU has any ++ * activity remaining or if it can go to idle and deschedule ++ * itself. ++ */ + ActivityRecorder activityRec; + ++ public: ++ /** Records that there was time buffer activity this cycle. */ + void activityThisCycle() { activityRec.activity(); } + ++ /** Changes a stage's status to active within the activity recorder. */ + void activateStage(const StageIdx idx) + { activityRec.activateStage(idx); } + ++ /** Changes a stage's status to inactive within the activity recorder. */ + void deactivateStage(const StageIdx idx) + { activityRec.deactivateStage(idx); } + + /** Wakes the CPU, rescheduling the CPU if it's not already active. */ + void wakeCPU(); + + /** Gets a free thread id. Use if thread ids change across system. */ + int getFreeTid(); + + public: - /** Temporary function to get pointer to exec context. */ ++ /** Returns a pointer to a thread's exec context. */ + ExecContext *xcBase(unsigned tid) + { + return thread[tid]->getXCProxy(); + } + + /** The global sequence number counter. */ + InstSeqNum globalSeqNum; + ++ /** Pointer to the checker, which can dynamically verify ++ * instruction results at run time. This can be set to NULL if it ++ * is not being used. ++ */ + Checker *checker; + +#if FULL_SYSTEM + /** Pointer to the system. */ + System *system; + + /** Pointer to the memory controller. */ + MemoryController *memCtrl; + /** Pointer to physical memory. */ + PhysicalMemory *physmem; +#endif + + /** Pointer to memory. */ + MemObject *mem; + ++ /** Pointer to the sampler */ + Sampler *sampler; + ++ /** Counter of how many stages have completed switching out. */ + int switchCount; + - // List of all ExecContexts. ++ /** Pointers to all of the threads in the CPU. */ + std::vector thread; + +#if 0 + /** Page table pointer. */ + PageTable *pTable; +#endif + + /** Pointer to the icache interface. */ + MemInterface *icacheInterface; + /** Pointer to the dcache interface. */ + MemInterface *dcacheInterface; + + /** Whether or not the CPU should defer its registration. */ + bool deferRegistration; + + /** Is there a context switch pending? */ + bool contextSwitch; + + /** Threads Scheduled to Enter CPU */ + std::list cpuWaitList; + + /** The cycle that the CPU was last running, used for statistics. */ + Tick lastRunningCycle; + + /** Number of Threads CPU can process */ + unsigned numThreads; + + /** Mapping for system thread id to cpu id */ + std::map threadMap; + + /** Available thread ids in the cpu*/ + std::vector tids; + + /** Stat for total number of times the CPU is descheduled. */ + Stats::Scalar<> timesIdled; + /** Stat for total number of cycles the CPU spends descheduled. */ + Stats::Scalar<> idleCycles; + /** Stat for the number of committed instructions per thread. */ + Stats::Vector<> committedInsts; + /** Stat for the total number of committed instructions. */ + Stats::Scalar<> totalCommittedInsts; + /** Stat for the CPI per thread. */ + Stats::Formula cpi; + /** Stat for the total CPI. */ + Stats::Formula totalCpi; + /** Stat for the IPC per thread. */ + Stats::Formula ipc; + /** Stat for the total IPC. */ + Stats::Formula totalIpc; +}; + +#endif // __CPU_O3_CPU_HH__ diff --cc src/cpu/o3/cpu_policy.hh index 4ea4daee6,000000000..32a0adcf1 mode 100644,000000..100644 --- a/src/cpu/o3/cpu_policy.hh +++ b/src/cpu/o3/cpu_policy.hh @@@ -1,93 -1,0 +1,119 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_CPU_POLICY_HH__ +#define __CPU_O3_CPU_POLICY_HH__ + +#include "cpu/o3/bpred_unit.hh" +#include "cpu/o3/free_list.hh" +#include "cpu/o3/inst_queue.hh" +#include "cpu/o3/lsq.hh" +#include "cpu/o3/lsq_unit.hh" +#include "cpu/o3/mem_dep_unit.hh" +#include "cpu/o3/regfile.hh" +#include "cpu/o3/rename_map.hh" +#include "cpu/o3/rob.hh" +#include "cpu/o3/store_set.hh" + +#include "cpu/o3/commit.hh" +#include "cpu/o3/decode.hh" +#include "cpu/o3/fetch.hh" +#include "cpu/o3/iew.hh" +#include "cpu/o3/rename.hh" + +#include "cpu/o3/comm.hh" + ++/** ++ * Struct that defines the key classes to be used by the CPU. All ++ * classes use the typedefs defined here to determine what are the ++ * classes of the other stages and communication buffers. In order to ++ * change a structure such as the IQ, simply change the typedef here ++ * to use the desired class instead, and recompile. In order to ++ * create a different CPU to be used simultaneously with this one, see ++ * the alpha_impl.hh file for instructions. ++ */ +template +struct SimpleCPUPolicy +{ - typedef TwobitBPredUnit BPredUnit; ++ /** Typedef for the branch prediction unit (which includes the BP, ++ * RAS, and BTB). ++ */ ++ typedef BPredUnit BPredUnit; ++ /** Typedef for the register file. Most classes assume a unified ++ * physical register file. ++ */ + typedef PhysRegFile RegFile; ++ /** Typedef for the freelist of registers. */ + typedef SimpleFreeList FreeList; ++ /** Typedef for the rename map. */ + typedef SimpleRenameMap RenameMap; ++ /** Typedef for the ROB. */ + typedef ROB ROB; ++ /** Typedef for the instruction queue/scheduler. */ + typedef InstructionQueue IQ; ++ /** Typedef for the memory dependence unit. */ + typedef MemDepUnit MemDepUnit; ++ /** Typedef for the LSQ. */ + typedef LSQ LSQ; ++ /** Typedef for the thread-specific LSQ units. */ + typedef LSQUnit LSQUnit; + - ++ /** Typedef for fetch. */ + typedef DefaultFetch Fetch; ++ /** Typedef for decode. */ + typedef DefaultDecode Decode; ++ /** Typedef for rename. */ + typedef DefaultRename Rename; ++ /** Typedef for Issue/Execute/Writeback. */ + typedef DefaultIEW IEW; ++ /** Typedef for commit. */ + typedef DefaultCommit Commit; + + /** The struct for communication between fetch and decode. */ + typedef DefaultFetchDefaultDecode FetchStruct; + + /** The struct for communication between decode and rename. */ + typedef DefaultDecodeDefaultRename DecodeStruct; + + /** The struct for communication between rename and IEW. */ + typedef DefaultRenameDefaultIEW RenameStruct; + + /** The struct for communication between IEW and commit. */ + typedef DefaultIEWDefaultCommit IEWStruct; + + /** The struct for communication within the IEW stage. */ + typedef IssueStruct IssueStruct; + + /** The struct for all backwards communication. */ + typedef TimeBufStruct TimeStruct; + +}; + +#endif //__CPU_O3_CPU_POLICY_HH__ diff --cc src/cpu/o3/decode.hh index 8abe1d480,000000000..ff88358d6 mode 100644,000000..100644 --- a/src/cpu/o3/decode.hh +++ b/src/cpu/o3/decode.hh @@@ -1,294 -1,0 +1,297 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_DECODE_HH__ +#define __CPU_O3_DECODE_HH__ + +#include + +#include "base/statistics.hh" +#include "base/timebuf.hh" + +/** + * DefaultDecode class handles both single threaded and SMT + * decode. Its width is specified by the parameters; each cycles it + * tries to decode that many instructions. Because instructions are + * actually decoded when the StaticInst is created, this stage does + * not do much other than check any PC-relative branches. + */ +template +class DefaultDecode +{ + private: + // Typedefs from the Impl. + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::Params Params; + typedef typename Impl::CPUPol CPUPol; + + // Typedefs from the CPU policy. + typedef typename CPUPol::FetchStruct FetchStruct; + typedef typename CPUPol::DecodeStruct DecodeStruct; + typedef typename CPUPol::TimeStruct TimeStruct; + + public: + /** Overall decode stage status. Used to determine if the CPU can + * deschedule itself due to a lack of activity. + */ + enum DecodeStatus { + Active, + Inactive + }; + + /** Individual thread status. */ + enum ThreadStatus { + Running, + Idle, + StartSquash, + Squashing, + Blocked, + Unblocking + }; + + private: + /** Decode status. */ + DecodeStatus _status; + + /** Per-thread status. */ + ThreadStatus decodeStatus[Impl::MaxThreads]; + + public: + /** DefaultDecode constructor. */ + DefaultDecode(Params *params); + + /** Returns the name of decode. */ + std::string name() const; + + /** Registers statistics. */ + void regStats(); + + /** Sets CPU pointer. */ + void setCPU(FullCPU *cpu_ptr); + + /** Sets the main backwards communication time buffer pointer. */ + void setTimeBuffer(TimeBuffer *tb_ptr); + + /** Sets pointer to time buffer used to communicate to the next stage. */ + void setDecodeQueue(TimeBuffer *dq_ptr); + + /** Sets pointer to time buffer coming from fetch. */ + void setFetchQueue(TimeBuffer *fq_ptr); + + /** Sets pointer to list of active threads. */ + void setActiveThreads(std::list *at_ptr); + ++ /** Switches out the decode stage. */ + void switchOut(); + ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); ++ + /** Ticks decode, processing all input signals and decoding as many + * instructions as possible. + */ + void tick(); + + /** Determines what to do based on decode's current status. + * @param status_change decode() sets this variable if there was a status + * change (ie switching from from blocking to unblocking). + * @param tid Thread id to decode instructions from. + */ + void decode(bool &status_change, unsigned tid); + + /** Processes instructions from fetch and passes them on to rename. + * Decoding of instructions actually happens when they are created in + * fetch, so this function mostly checks if PC-relative branches are + * correct. + */ + void decodeInsts(unsigned tid); + + private: + /** Inserts a thread's instructions into the skid buffer, to be decoded + * once decode unblocks. + */ + void skidInsert(unsigned tid); + + /** Returns if all of the skid buffers are empty. */ + bool skidsEmpty(); + + /** Updates overall decode status based on all of the threads' statuses. */ + void updateStatus(); + + /** Separates instructions from fetch into individual lists of instructions + * sorted by thread. + */ + void sortInsts(); + + /** Reads all stall signals from the backwards communication timebuffer. */ + void readStallSignals(unsigned tid); + + /** Checks all input signals and updates decode's status appropriately. */ + bool checkSignalsAndUpdate(unsigned tid); + + /** Checks all stall signals, and returns if any are true. */ + bool checkStall(unsigned tid) const; + + /** Returns if there any instructions from fetch on this cycle. */ + inline bool fetchInstsValid(); + + /** Switches decode to blocking, and signals back that decode has + * become blocked. + * @return Returns true if there is a status change. + */ + bool block(unsigned tid); + + /** Switches decode to unblocking if the skid buffer is empty, and + * signals back that decode has unblocked. + * @return Returns true if there is a status change. + */ + bool unblock(unsigned tid); + + /** Squashes if there is a PC-relative branch that was predicted + * incorrectly. Sends squash information back to fetch. + */ + void squash(DynInstPtr &inst, unsigned tid); + + public: + /** Squashes due to commit signalling a squash. Changes status to + * squashing and clears block/unblock signals as needed. + */ + unsigned squash(unsigned tid); + + private: + // Interfaces to objects outside of decode. + /** CPU interface. */ + FullCPU *cpu; + + /** Time buffer interface. */ + TimeBuffer *timeBuffer; + + /** Wire to get rename's output from backwards time buffer. */ + typename TimeBuffer::wire fromRename; + + /** Wire to get iew's information from backwards time buffer. */ + typename TimeBuffer::wire fromIEW; + + /** Wire to get commit's information from backwards time buffer. */ + typename TimeBuffer::wire fromCommit; + + /** Wire to write information heading to previous stages. */ + // Might not be the best name as not only fetch will read it. + typename TimeBuffer::wire toFetch; + + /** Decode instruction queue. */ + TimeBuffer *decodeQueue; + + /** Wire used to write any information heading to rename. */ + typename TimeBuffer::wire toRename; + + /** Fetch instruction queue interface. */ + TimeBuffer *fetchQueue; + + /** Wire to get fetch's output from fetch queue. */ + typename TimeBuffer::wire fromFetch; + + /** Queue of all instructions coming from fetch this cycle. */ + std::queue insts[Impl::MaxThreads]; + + /** Skid buffer between fetch and decode. */ + std::queue skidBuffer[Impl::MaxThreads]; + + /** Variable that tracks if decode has written to the time buffer this + * cycle. Used to tell CPU if there is activity this cycle. + */ + bool wroteToTimeBuffer; + + /** Source of possible stalls. */ + struct Stalls { + bool rename; + bool iew; + bool commit; + }; + + /** Tracks which stages are telling decode to stall. */ + Stalls stalls[Impl::MaxThreads]; + + /** Rename to decode delay, in ticks. */ + unsigned renameToDecodeDelay; + + /** IEW to decode delay, in ticks. */ + unsigned iewToDecodeDelay; + + /** Commit to decode delay, in ticks. */ + unsigned commitToDecodeDelay; + + /** Fetch to decode delay, in ticks. */ + unsigned fetchToDecodeDelay; + + /** The width of decode, in instructions. */ + unsigned decodeWidth; + + /** Index of instructions being sent to rename. */ + unsigned toRenameIndex; + + /** number of Active Threads*/ + unsigned numThreads; + + /** List of active thread ids */ + std::list *activeThreads; + + /** Number of branches in flight. */ + unsigned branchCount[Impl::MaxThreads]; + + /** Maximum size of the skid buffer. */ + unsigned skidBufferMax; + + /** Stat for total number of idle cycles. */ + Stats::Scalar<> decodeIdleCycles; + /** Stat for total number of blocked cycles. */ + Stats::Scalar<> decodeBlockedCycles; + /** Stat for total number of normal running cycles. */ + Stats::Scalar<> decodeRunCycles; + /** Stat for total number of unblocking cycles. */ + Stats::Scalar<> decodeUnblockCycles; + /** Stat for total number of squashing cycles. */ + Stats::Scalar<> decodeSquashCycles; + /** Stat for number of times a branch is resolved at decode. */ + Stats::Scalar<> decodeBranchResolved; + /** Stat for number of times a branch mispredict is detected. */ + Stats::Scalar<> decodeBranchMispred; + /** Stat for number of times decode detected a non-control instruction + * incorrectly predicted as a branch. + */ + Stats::Scalar<> decodeControlMispred; + /** Stat for total number of decoded instructions. */ + Stats::Scalar<> decodeDecodedInsts; + /** Stat for total number of squashed instructions. */ + Stats::Scalar<> decodeSquashedInsts; +}; + +#endif // __CPU_O3_DECODE_HH__ diff --cc src/cpu/o3/decode_impl.hh index b03daff11,000000000..64b04bc3d mode 100644,000000..100644 --- a/src/cpu/o3/decode_impl.hh +++ b/src/cpu/o3/decode_impl.hh @@@ -1,743 -1,0 +1,752 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include "cpu/o3/decode.hh" + +using namespace std; + +template +DefaultDecode::DefaultDecode(Params *params) + : renameToDecodeDelay(params->renameToDecodeDelay), + iewToDecodeDelay(params->iewToDecodeDelay), + commitToDecodeDelay(params->commitToDecodeDelay), + fetchToDecodeDelay(params->fetchToDecodeDelay), + decodeWidth(params->decodeWidth), + numThreads(params->numberOfThreads) +{ + _status = Inactive; + ++ // Setup status, make sure stall signals are clear. + for (int i = 0; i < numThreads; ++i) { + decodeStatus[i] = Idle; + + stalls[i].rename = false; + stalls[i].iew = false; + stalls[i].commit = false; + } + + // @todo: Make into a parameter + skidBufferMax = (fetchToDecodeDelay * params->fetchWidth) + decodeWidth; +} + +template +std::string +DefaultDecode::name() const +{ + return cpu->name() + ".decode"; +} + +template +void +DefaultDecode::regStats() +{ + decodeIdleCycles + .name(name() + ".DECODE:IdleCycles") + .desc("Number of cycles decode is idle") + .prereq(decodeIdleCycles); + decodeBlockedCycles + .name(name() + ".DECODE:BlockedCycles") + .desc("Number of cycles decode is blocked") + .prereq(decodeBlockedCycles); + decodeRunCycles + .name(name() + ".DECODE:RunCycles") + .desc("Number of cycles decode is running") + .prereq(decodeRunCycles); + decodeUnblockCycles + .name(name() + ".DECODE:UnblockCycles") + .desc("Number of cycles decode is unblocking") + .prereq(decodeUnblockCycles); + decodeSquashCycles + .name(name() + ".DECODE:SquashCycles") + .desc("Number of cycles decode is squashing") + .prereq(decodeSquashCycles); + decodeBranchResolved + .name(name() + ".DECODE:BranchResolved") + .desc("Number of times decode resolved a branch") + .prereq(decodeBranchResolved); + decodeBranchMispred + .name(name() + ".DECODE:BranchMispred") + .desc("Number of times decode detected a branch misprediction") + .prereq(decodeBranchMispred); + decodeControlMispred + .name(name() + ".DECODE:ControlMispred") + .desc("Number of times decode detected an instruction incorrectly" + " predicted as a control") + .prereq(decodeControlMispred); + decodeDecodedInsts + .name(name() + ".DECODE:DecodedInsts") + .desc("Number of instructions handled by decode") + .prereq(decodeDecodedInsts); + decodeSquashedInsts + .name(name() + ".DECODE:SquashedInsts") + .desc("Number of squashed instructions handled by decode") + .prereq(decodeSquashedInsts); +} + +template +void +DefaultDecode::setCPU(FullCPU *cpu_ptr) +{ + DPRINTF(Decode, "Setting CPU pointer.\n"); + cpu = cpu_ptr; +} + +template +void +DefaultDecode::setTimeBuffer(TimeBuffer *tb_ptr) +{ + DPRINTF(Decode, "Setting time buffer pointer.\n"); + timeBuffer = tb_ptr; + + // Setup wire to write information back to fetch. + toFetch = timeBuffer->getWire(0); + + // Create wires to get information from proper places in time buffer. + fromRename = timeBuffer->getWire(-renameToDecodeDelay); + fromIEW = timeBuffer->getWire(-iewToDecodeDelay); + fromCommit = timeBuffer->getWire(-commitToDecodeDelay); +} + +template +void +DefaultDecode::setDecodeQueue(TimeBuffer *dq_ptr) +{ + DPRINTF(Decode, "Setting decode queue pointer.\n"); + decodeQueue = dq_ptr; + + // Setup wire to write information to proper place in decode queue. + toRename = decodeQueue->getWire(0); +} + +template +void +DefaultDecode::setFetchQueue(TimeBuffer *fq_ptr) +{ + DPRINTF(Decode, "Setting fetch queue pointer.\n"); + fetchQueue = fq_ptr; + + // Setup wire to read information from fetch queue. + fromFetch = fetchQueue->getWire(-fetchToDecodeDelay); +} + +template +void +DefaultDecode::setActiveThreads(list *at_ptr) +{ + DPRINTF(Decode, "Setting active threads list pointer.\n"); + activeThreads = at_ptr; +} + +template +void +DefaultDecode::switchOut() +{ ++ // Decode can immediately switch out. + cpu->signalSwitched(); +} + +template +void +DefaultDecode::takeOverFrom() +{ + _status = Inactive; + ++ // Be sure to reset state and clear out any old instructions. + for (int i = 0; i < numThreads; ++i) { + decodeStatus[i] = Idle; + + stalls[i].rename = false; + stalls[i].iew = false; + stalls[i].commit = false; + while (!insts[i].empty()) + insts[i].pop(); + while (!skidBuffer[i].empty()) + skidBuffer[i].pop(); + branchCount[i] = 0; + } + wroteToTimeBuffer = false; +} + +template +bool +DefaultDecode::checkStall(unsigned tid) const +{ + bool ret_val = false; + + if (stalls[tid].rename) { + DPRINTF(Decode,"[tid:%i]: Stall fom Rename stage detected.\n", tid); + ret_val = true; + } else if (stalls[tid].iew) { + DPRINTF(Decode,"[tid:%i]: Stall fom IEW stage detected.\n", tid); + ret_val = true; + } else if (stalls[tid].commit) { + DPRINTF(Decode,"[tid:%i]: Stall fom Commit stage detected.\n", tid); + ret_val = true; + } + + return ret_val; +} + +template +inline bool +DefaultDecode::fetchInstsValid() +{ + return fromFetch->size > 0; +} + +template +bool +DefaultDecode::block(unsigned tid) +{ + DPRINTF(Decode, "[tid:%u]: Blocking.\n", tid); + - // If the decode status is blocked or unblocking then decode has not yet - // signalled fetch to unblock. In that case, there is no need to tell - // fetch to block. - if (decodeStatus[tid] != Blocked && - decodeStatus[tid] != Unblocking) { - toFetch->decodeBlock[tid] = true; - wroteToTimeBuffer = true; - } - + // Add the current inputs to the skid buffer so they can be + // reprocessed when this stage unblocks. + skidInsert(tid); + ++ // If the decode status is blocked or unblocking then decode has not yet ++ // signalled fetch to unblock. In that case, there is no need to tell ++ // fetch to block. + if (decodeStatus[tid] != Blocked) { + // Set the status to Blocked. + decodeStatus[tid] = Blocked; ++ ++ if (decodeStatus[tid] != Unblocking) { ++ toFetch->decodeBlock[tid] = true; ++ wroteToTimeBuffer = true; ++ } ++ + return true; + } + + return false; +} + +template +bool +DefaultDecode::unblock(unsigned tid) +{ + // Decode is done unblocking only if the skid buffer is empty. + if (skidBuffer[tid].empty()) { + DPRINTF(Decode, "[tid:%u]: Done unblocking.\n", tid); + toFetch->decodeUnblock[tid] = true; + wroteToTimeBuffer = true; + + decodeStatus[tid] = Running; + return true; + } + + DPRINTF(Decode, "[tid:%u]: Currently unblocking.\n", tid); + + return false; +} + +template +void +DefaultDecode::squash(DynInstPtr &inst, unsigned tid) +{ + DPRINTF(Decode, "[tid:%i]: Squashing due to incorrect branch prediction " + "detected at decode.\n", tid); + ++ // Send back mispredict information. + toFetch->decodeInfo[tid].branchMispredict = true; + toFetch->decodeInfo[tid].doneSeqNum = inst->seqNum; + toFetch->decodeInfo[tid].predIncorrect = true; + toFetch->decodeInfo[tid].squash = true; + toFetch->decodeInfo[tid].nextPC = inst->readNextPC(); - toFetch->decodeInfo[tid].branchTaken = true; ++ toFetch->decodeInfo[tid].branchTaken = ++ inst->readNextPC() != (inst->readPC() + sizeof(TheISA::MachInst)); + ++ // Might have to tell fetch to unblock. + if (decodeStatus[tid] == Blocked || + decodeStatus[tid] == Unblocking) { + toFetch->decodeUnblock[tid] = 1; + } + + // Set status to squashing. + decodeStatus[tid] = Squashing; + + for (int i=0; isize; i++) { + if (fromFetch->insts[i]->threadNumber == tid && + fromFetch->insts[i]->seqNum > inst->seqNum) { + fromFetch->insts[i]->squashed = true; + } + } + ++ // Clear the instruction list and skid buffer in case they have any ++ // insts in them. + while (!insts[tid].empty()) { + insts[tid].pop(); + } + - // Clear the skid buffer in case it has any data in it. + while (!skidBuffer[tid].empty()) { + skidBuffer[tid].pop(); + } + + // Squash instructions up until this one + cpu->removeInstsUntil(inst->seqNum, tid); +} + +template +unsigned +DefaultDecode::squash(unsigned tid) +{ + DPRINTF(Decode, "[tid:%i]: Squashing.\n",tid); + + if (decodeStatus[tid] == Blocked || + decodeStatus[tid] == Unblocking) { +#if !FULL_SYSTEM + // In syscall emulation, we can have both a block and a squash due + // to a syscall in the same cycle. This would cause both signals to + // be high. This shouldn't happen in full system. + // @todo: Determine if this still happens. + if (toFetch->decodeBlock[tid]) { + toFetch->decodeBlock[tid] = 0; + } else { + toFetch->decodeUnblock[tid] = 1; + } +#else + toFetch->decodeUnblock[tid] = 1; +#endif + } + + // Set status to squashing. + decodeStatus[tid] = Squashing; + + // Go through incoming instructions from fetch and squash them. + unsigned squash_count = 0; + + for (int i=0; isize; i++) { + if (fromFetch->insts[i]->threadNumber == tid) { + fromFetch->insts[i]->squashed = true; + squash_count++; + } + } + ++ // Clear the instruction list and skid buffer in case they have any ++ // insts in them. + while (!insts[tid].empty()) { + insts[tid].pop(); + } + - // Clear the skid buffer in case it has any data in it. + while (!skidBuffer[tid].empty()) { + skidBuffer[tid].pop(); + } + + return squash_count; +} + +template +void +DefaultDecode::skidInsert(unsigned tid) +{ + DynInstPtr inst = NULL; + + while (!insts[tid].empty()) { + inst = insts[tid].front(); + + insts[tid].pop(); + + assert(tid == inst->threadNumber); + + DPRINTF(Decode,"Inserting [sn:%lli] PC:%#x into decode skidBuffer %i\n", + inst->seqNum, inst->readPC(), inst->threadNumber); + + skidBuffer[tid].push(inst); + } + + // @todo: Eventually need to enforce this by not letting a thread + // fetch past its skidbuffer + assert(skidBuffer[tid].size() <= skidBufferMax); +} + +template +bool +DefaultDecode::skidsEmpty() +{ + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + if (!skidBuffer[*threads++].empty()) + return false; + } + + return true; +} + +template +void +DefaultDecode::updateStatus() +{ + bool any_unblocking = false; + + list::iterator threads = (*activeThreads).begin(); + + threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (decodeStatus[tid] == Unblocking) { + any_unblocking = true; + break; + } + } + + // Decode will have activity if it's unblocking. + if (any_unblocking) { + if (_status == Inactive) { + _status = Active; + + DPRINTF(Activity, "Activating stage.\n"); + + cpu->activateStage(FullCPU::DecodeIdx); + } + } else { + // If it's not unblocking, then decode will not have any internal + // activity. Switch it to inactive. + if (_status == Active) { + _status = Inactive; + DPRINTF(Activity, "Deactivating stage.\n"); + + cpu->deactivateStage(FullCPU::DecodeIdx); + } + } +} + +template +void +DefaultDecode::sortInsts() +{ + int insts_from_fetch = fromFetch->size; +#ifdef DEBUG + for (int i=0; i < numThreads; i++) + assert(insts[i].empty()); +#endif + for (int i = 0; i < insts_from_fetch; ++i) { + insts[fromFetch->insts[i]->threadNumber].push(fromFetch->insts[i]); + } +} + +template +void +DefaultDecode::readStallSignals(unsigned tid) +{ + if (fromRename->renameBlock[tid]) { + stalls[tid].rename = true; + } + + if (fromRename->renameUnblock[tid]) { + assert(stalls[tid].rename); + stalls[tid].rename = false; + } + + if (fromIEW->iewBlock[tid]) { + stalls[tid].iew = true; + } + + if (fromIEW->iewUnblock[tid]) { + assert(stalls[tid].iew); + stalls[tid].iew = false; + } + + if (fromCommit->commitBlock[tid]) { + stalls[tid].commit = true; + } + + if (fromCommit->commitUnblock[tid]) { + assert(stalls[tid].commit); + stalls[tid].commit = false; + } +} + +template +bool +DefaultDecode::checkSignalsAndUpdate(unsigned tid) +{ + // Check if there's a squash signal, squash if there is. + // Check stall signals, block if necessary. + // If status was blocked + // Check if stall conditions have passed + // if so then go to unblocking + // If status was Squashing + // check if squashing is not high. Switch to running this cycle. + + // Update the per thread stall statuses. + readStallSignals(tid); + + // Check squash signals from commit. + if (fromCommit->commitInfo[tid].squash) { + + DPRINTF(Decode, "[tid:%u]: Squashing instructions due to squash " + "from commit.\n", tid); + + squash(tid); + + return true; + } + + // Check ROB squash signals from commit. + if (fromCommit->commitInfo[tid].robSquashing) { + DPRINTF(Decode, "[tid:%]: ROB is still squashing.\n",tid); + + // Continue to squash. + decodeStatus[tid] = Squashing; + + return true; + } + + if (checkStall(tid)) { + return block(tid); + } + + if (decodeStatus[tid] == Blocked) { + DPRINTF(Decode, "[tid:%u]: Done blocking, switching to unblocking.\n", + tid); + + decodeStatus[tid] = Unblocking; + + unblock(tid); + + return true; + } + + if (decodeStatus[tid] == Squashing) { + // Switch status to running if decode isn't being told to block or + // squash this cycle. + DPRINTF(Decode, "[tid:%u]: Done squashing, switching to running.\n", + tid); + + decodeStatus[tid] = Running; + + return false; + } + + // If we've reached this point, we have not gotten any signals that + // cause decode to change its status. Decode remains the same as before. + return false; +} + +template +void +DefaultDecode::tick() +{ + wroteToTimeBuffer = false; + + bool status_change = false; + + toRenameIndex = 0; + + list::iterator threads = (*activeThreads).begin(); + + sortInsts(); + + //Check stall and squash signals. + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + DPRINTF(Decode,"Processing [tid:%i]\n",tid); + status_change = checkSignalsAndUpdate(tid) || status_change; + + decode(status_change, tid); + } + + if (status_change) { + updateStatus(); + } + + if (wroteToTimeBuffer) { + DPRINTF(Activity, "Activity this cycle.\n"); + + cpu->activityThisCycle(); + } +} + +template +void +DefaultDecode::decode(bool &status_change, unsigned tid) +{ + // If status is Running or idle, + // call decodeInsts() + // If status is Unblocking, + // buffer any instructions coming from fetch + // continue trying to empty skid buffer + // check if stall conditions have passed + + if (decodeStatus[tid] == Blocked) { + ++decodeBlockedCycles; + } else if (decodeStatus[tid] == Squashing) { + ++decodeSquashCycles; + } + + // Decode should try to decode as many instructions as its bandwidth + // will allow, as long as it is not currently blocked. + if (decodeStatus[tid] == Running || + decodeStatus[tid] == Idle) { + DPRINTF(Decode, "[tid:%u] Not blocked, so attempting to run " + "stage.\n",tid); + + decodeInsts(tid); + } else if (decodeStatus[tid] == Unblocking) { + // Make sure that the skid buffer has something in it if the + // status is unblocking. + assert(!skidsEmpty()); + + // If the status was unblocking, then instructions from the skid + // buffer were used. Remove those instructions and handle + // the rest of unblocking. + decodeInsts(tid); + + if (fetchInstsValid()) { + // Add the current inputs to the skid buffer so they can be + // reprocessed when this stage unblocks. + skidInsert(tid); + } + + status_change = unblock(tid) || status_change; + } +} + +template +void +DefaultDecode::decodeInsts(unsigned tid) +{ + // Instructions can come either from the skid buffer or the list of + // instructions coming from fetch, depending on decode's status. + int insts_available = decodeStatus[tid] == Unblocking ? + skidBuffer[tid].size() : insts[tid].size(); + + if (insts_available == 0) { + DPRINTF(Decode, "[tid:%u] Nothing to do, breaking out" + " early.\n",tid); + // Should I change the status to idle? + ++decodeIdleCycles; + return; + } else if (decodeStatus[tid] == Unblocking) { + DPRINTF(Decode, "[tid:%u] Unblocking, removing insts from skid " + "buffer.\n",tid); + ++decodeUnblockCycles; + } else if (decodeStatus[tid] == Running) { + ++decodeRunCycles; + } + + DynInstPtr inst; + + std::queue + &insts_to_decode = decodeStatus[tid] == Unblocking ? + skidBuffer[tid] : insts[tid]; + + DPRINTF(Decode, "[tid:%u]: Sending instruction to rename.\n",tid); + + while (insts_available > 0 && toRenameIndex < decodeWidth) { + assert(!insts_to_decode.empty()); + + inst = insts_to_decode.front(); + + insts_to_decode.pop(); + + DPRINTF(Decode, "[tid:%u]: Processing instruction [sn:%lli] with " + "PC %#x\n", + tid, inst->seqNum, inst->readPC()); + + if (inst->isSquashed()) { + DPRINTF(Decode, "[tid:%u]: Instruction %i with PC %#x is " + "squashed, skipping.\n", + tid, inst->seqNum, inst->readPC()); + + ++decodeSquashedInsts; + + --insts_available; + + continue; + } + + // Also check if instructions have no source registers. Mark + // them as ready to issue at any time. Not sure if this check + // should exist here or at a later stage; however it doesn't matter + // too much for function correctness. + if (inst->numSrcRegs() == 0) { + inst->setCanIssue(); + } + + // This current instruction is valid, so add it into the decode + // queue. The next instruction may not be valid, so check to + // see if branches were predicted correctly. + toRename->insts[toRenameIndex] = inst; + + ++(toRename->size); + ++toRenameIndex; + ++decodeDecodedInsts; + --insts_available; + + // Ensure that if it was predicted as a branch, it really is a + // branch. + if (inst->predTaken() && !inst->isControl()) { + panic("Instruction predicted as a branch!"); + + ++decodeControlMispred; + + // Might want to set some sort of boolean and just do + // a check at the end + squash(inst, inst->threadNumber); + + break; + } + + // Go ahead and compute any PC-relative branches. + if (inst->isDirectCtrl() && inst->isUncondCtrl()) { + ++decodeBranchResolved; + inst->setNextPC(inst->branchTarget()); + + if (inst->mispredicted()) { + ++decodeBranchMispred; + + // Might want to set some sort of boolean and just do + // a check at the end + squash(inst, inst->threadNumber); ++ inst->setPredTarg(inst->branchTarget()); + + break; + } + } + } + + // If we didn't process all instructions, then we will need to block + // and put all those instructions into the skid buffer. + if (!insts_to_decode.empty()) { + block(tid); + } + + // Record that decode has written to the time buffer for activity + // tracking. + if (toRenameIndex) { + wroteToTimeBuffer = true; + } +} diff --cc src/cpu/o3/dep_graph.hh index f8ae38da4,000000000..b6c5f1ab1 mode 100644,000000..100644 --- a/src/cpu/o3/dep_graph.hh +++ b/src/cpu/o3/dep_graph.hh @@@ -1,213 -1,0 +1,235 @@@ + +#ifndef __CPU_O3_DEP_GRAPH_HH__ +#define __CPU_O3_DEP_GRAPH_HH__ + +#include "cpu/o3/comm.hh" + ++/** Node in a linked list. */ +template +class DependencyEntry +{ + public: + DependencyEntry() + : inst(NULL), next(NULL) + { } + + DynInstPtr inst; + //Might want to include data about what arch. register the + //dependence is waiting on. + DependencyEntry *next; +}; + ++/** Array of linked list that maintains the dependencies between ++ * producing instructions and consuming instructions. Each linked ++ * list represents a single physical register, having the future ++ * producer of the register's value, and all consumers waiting on that ++ * value on the list. The head node of each linked list represents ++ * the producing instruction of that register. Instructions are put ++ * on the list upon reaching the IQ, and are removed from the list ++ * either when the producer completes, or the instruction is squashed. ++*/ +template +class DependencyGraph +{ + public: + typedef DependencyEntry DepEntry; + ++ /** Default construction. Must call resize() prior to use. */ + DependencyGraph() + : numEntries(0), memAllocCounter(0), nodesTraversed(0), nodesRemoved(0) + { } + ++ /** Resize the dependency graph to have num_entries registers. */ + void resize(int num_entries); + ++ /** Clears all of the linked lists. */ + void reset(); + ++ /** Inserts an instruction to be dependent on the given index. */ + void insert(PhysRegIndex idx, DynInstPtr &new_inst); + ++ /** Sets the producing instruction of a given register. */ + void setInst(PhysRegIndex idx, DynInstPtr &new_inst) + { dependGraph[idx].inst = new_inst; } + ++ /** Clears the producing instruction. */ + void clearInst(PhysRegIndex idx) + { dependGraph[idx].inst = NULL; } + ++ /** Removes an instruction from a single linked list. */ + void remove(PhysRegIndex idx, DynInstPtr &inst_to_remove); + ++ /** Removes and returns the newest dependent of a specific register. */ + DynInstPtr pop(PhysRegIndex idx); + ++ /** Checks if there are any dependents on a specific register. */ + bool empty(PhysRegIndex idx) { return !dependGraph[idx].next; } + + /** Debugging function to dump out the dependency graph. + */ + void dump(); + + private: + /** Array of linked lists. Each linked list is a list of all the + * instructions that depend upon a given register. The actual + * register's index is used to index into the graph; ie all + * instructions in flight that are dependent upon r34 will be + * in the linked list of dependGraph[34]. + */ + DepEntry *dependGraph; + ++ /** Number of linked lists; identical to the number of registers. */ + int numEntries; + + // Debug variable, remove when done testing. + unsigned memAllocCounter; + + public: ++ // Debug variable, remove when done testing. + uint64_t nodesTraversed; ++ // Debug variable, remove when done testing. + uint64_t nodesRemoved; +}; + +template +void +DependencyGraph::resize(int num_entries) +{ + numEntries = num_entries; + dependGraph = new DepEntry[numEntries]; +} + +template +void +DependencyGraph::reset() +{ + // Clear the dependency graph + DepEntry *curr; + DepEntry *prev; + + for (int i = 0; i < numEntries; ++i) { + curr = dependGraph[i].next; + + while (curr) { + memAllocCounter--; + + prev = curr; + curr = prev->next; + prev->inst = NULL; + + delete prev; + } + + if (dependGraph[i].inst) { + dependGraph[i].inst = NULL; + } + + dependGraph[i].next = NULL; + } +} + +template +void +DependencyGraph::insert(PhysRegIndex idx, DynInstPtr &new_inst) +{ + //Add this new, dependent instruction at the head of the dependency + //chain. + + // First create the entry that will be added to the head of the + // dependency chain. + DepEntry *new_entry = new DepEntry; + new_entry->next = dependGraph[idx].next; + new_entry->inst = new_inst; + + // Then actually add it to the chain. + dependGraph[idx].next = new_entry; + + ++memAllocCounter; +} + + +template +void +DependencyGraph::remove(PhysRegIndex idx, + DynInstPtr &inst_to_remove) +{ + DepEntry *prev = &dependGraph[idx]; + DepEntry *curr = dependGraph[idx].next; + + // Make sure curr isn't NULL. Because this instruction is being + // removed from a dependency list, it must have been placed there at + // an earlier time. The dependency chain should not be empty, + // unless the instruction dependent upon it is already ready. + if (curr == NULL) { + return; + } + + nodesRemoved++; + + // Find the instruction to remove within the dependency linked list. + while (curr->inst != inst_to_remove) { + prev = curr; + curr = curr->next; + nodesTraversed++; + + assert(curr != NULL); + } + + // Now remove this instruction from the list. + prev->next = curr->next; + + --memAllocCounter; + + // Could push this off to the destructor of DependencyEntry + curr->inst = NULL; + + delete curr; +} + +template +DynInstPtr +DependencyGraph::pop(PhysRegIndex idx) +{ + DepEntry *node; + node = dependGraph[idx].next; + DynInstPtr inst = NULL; + if (node) { + inst = node->inst; + dependGraph[idx].next = node->next; + node->inst = NULL; + memAllocCounter--; + delete node; + } + return inst; +} + +template +void +DependencyGraph::dump() +{ + DepEntry *curr; + + for (int i = 0; i < numEntries; ++i) + { + curr = &dependGraph[i]; + + if (curr->inst) { + cprintf("dependGraph[%i]: producer: %#x [sn:%lli] consumer: ", + i, curr->inst->readPC(), curr->inst->seqNum); + } else { + cprintf("dependGraph[%i]: No producer. consumer: ", i); + } + + while (curr->next != NULL) { + curr = curr->next; + + cprintf("%#x [sn:%lli] ", + curr->inst->readPC(), curr->inst->seqNum); + } + + cprintf("\n"); + } + cprintf("memAllocCounter: %i\n", memAllocCounter); +} + +#endif // __CPU_O3_DEP_GRAPH_HH__ diff --cc src/cpu/o3/fetch.hh index 3c4fc7d93,000000000..23328c534 mode 100644,000000..100644 --- a/src/cpu/o3/fetch.hh +++ b/src/cpu/o3/fetch.hh @@@ -1,422 -1,0 +1,439 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_FETCH_HH__ +#define __CPU_O3_FETCH_HH__ + +#include "arch/utility.hh" +#include "base/statistics.hh" +#include "base/timebuf.hh" +#include "cpu/pc_event.hh" +#include "mem/packet.hh" +#include "mem/port.hh" +#include "sim/eventq.hh" + +class Sampler; + +/** + * DefaultFetch class handles both single threaded and SMT fetch. Its + * width is specified by the parameters; each cycle it tries to fetch + * that many instructions. It supports using a branch predictor to + * predict direction and targets. - * It supports the idling functionalitiy of the CPU by indicating to ++ * It supports the idling functionality of the CPU by indicating to + * the CPU when it is active and inactive. + */ +template +class DefaultFetch +{ + public: + /** Typedefs from Impl. */ + typedef typename Impl::CPUPol CPUPol; + typedef typename Impl::DynInst DynInst; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::Params Params; + + /** Typedefs from the CPU policy. */ + typedef typename CPUPol::BPredUnit BPredUnit; + typedef typename CPUPol::FetchStruct FetchStruct; + typedef typename CPUPol::TimeStruct TimeStruct; + + /** Typedefs from ISA. */ + typedef TheISA::MachInst MachInst; + typedef TheISA::ExtMachInst ExtMachInst; + + class IcachePort : public Port + { + protected: + DefaultFetch *fetch; + + public: + IcachePort(DefaultFetch *_fetch) + : Port(_fetch->name() + "-iport"), fetch(_fetch) + { } + + protected: + virtual Tick recvAtomic(PacketPtr pkt); + + virtual void recvFunctional(PacketPtr pkt); + + virtual void recvStatusChange(Status status); + + virtual void getDeviceAddressRanges(AddrRangeList &resp, + AddrRangeList &snoop) + { resp.clear(); snoop.clear(); } + + virtual bool recvTiming(PacketPtr pkt); + + virtual void recvRetry(); + }; + + public: + /** Overall fetch status. Used to determine if the CPU can + * deschedule itsef due to a lack of activity. + */ + enum FetchStatus { + Active, + Inactive + }; + + /** Individual thread status. */ + enum ThreadStatus { + Running, + Idle, + Squashing, + Blocked, + Fetching, + TrapPending, + QuiescePending, + SwitchOut, + IcacheWaitResponse, + IcacheRetry, + IcacheAccessComplete + }; + + /** Fetching Policy, Add new policies here.*/ + enum FetchPriority { + SingleThread, + RoundRobin, + Branch, + IQ, + LSQ + }; + + private: + /** Fetch status. */ + FetchStatus _status; + + /** Per-thread status. */ + ThreadStatus fetchStatus[Impl::MaxThreads]; + + /** Fetch policy. */ + FetchPriority fetchPolicy; + + /** List that has the threads organized by priority. */ + std::list priorityList; + + public: + /** DefaultFetch constructor. */ + DefaultFetch(Params *params); + + /** Returns the name of fetch. */ + std::string name() const; + + /** Registers statistics. */ + void regStats(); + + /** Sets CPU pointer. */ + void setCPU(FullCPU *cpu_ptr); + + /** Sets the main backwards communication time buffer pointer. */ + void setTimeBuffer(TimeBuffer *time_buffer); + + /** Sets pointer to list of active threads. */ + void setActiveThreads(std::list *at_ptr); + + /** Sets pointer to time buffer used to communicate to the next stage. */ + void setFetchQueue(TimeBuffer *fq_ptr); + + /** Sets pointer to page table. */ +// void setPageTable(PageTable *pt_ptr); + + /** Initialize stage. */ + void initStage(); + + /** Processes cache completion event. */ + void processCacheCompletion(PacketPtr pkt); + ++ /** Begins the switch out of the fetch stage. */ + void switchOut(); + ++ /** Completes the switch out of the fetch stage. */ + void doSwitchOut(); + ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); + ++ /** Checks if the fetch stage is switched out. */ + bool isSwitchedOut() { return switchedOut; } + ++ /** Tells fetch to wake up from a quiesce instruction. */ + void wakeFromQuiesce(); + + private: + /** Changes the status of this stage to active, and indicates this + * to the CPU. + */ + inline void switchToActive(); + + /** Changes the status of this stage to inactive, and indicates + * this to the CPU. + */ + inline void switchToInactive(); + + /** + * Looks up in the branch predictor to see if the next PC should be + * either next PC+=MachInst or a branch target. + * @param next_PC Next PC variable passed in by reference. It is + * expected to be set to the current PC; it will be updated with what + * the next PC will be. + * @return Whether or not a branch was predicted as taken. + */ + bool lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC); + + /** + * Fetches the cache line that contains fetch_PC. Returns any + * fault that happened. Puts the data into the class variable + * cacheData. + * @param fetch_PC The PC address that is being fetched from. + * @param ret_fault The fault reference that will be set to the result of + * the icache access. + * @param tid Thread id. + * @return Any fault that occured. + */ + bool fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid); + + /** Squashes a specific thread and resets the PC. */ + inline void doSquash(const Addr &new_PC, unsigned tid); + + /** Squashes a specific thread and resets the PC. Also tells the CPU to + * remove any instructions between fetch and decode that should be sqaushed. + */ + void squashFromDecode(const Addr &new_PC, const InstSeqNum &seq_num, + unsigned tid); + + /** Checks if a thread is stalled. */ + bool checkStall(unsigned tid) const; + + /** Updates overall fetch stage status; to be called at the end of each + * cycle. */ + FetchStatus updateFetchStatus(); + + public: + /** Squashes a specific thread and resets the PC. Also tells the CPU to + * remove any instructions that are not in the ROB. The source of this + * squash should be the commit stage. + */ + void squash(const Addr &new_PC, unsigned tid); + + /** Ticks the fetch stage, processing all inputs signals and fetching + * as many instructions as possible. + */ + void tick(); + + /** Checks all input signals and updates the status as necessary. + * @return: Returns if the status has changed due to input signals. + */ + bool checkSignalsAndUpdate(unsigned tid); + + /** Does the actual fetching of instructions and passing them on to the + * next stage. + * @param status_change fetch() sets this variable if there was a status + * change (ie switching to IcacheMissStall). + */ + void fetch(bool &status_change); + + /** Align a PC to the start of an I-cache block. */ + Addr icacheBlockAlignPC(Addr addr) + { + addr = TheISA::realPCToFetchPC(addr); + return (addr & ~(cacheBlkMask)); + } + + private: + /** Returns the appropriate thread to fetch, given the fetch policy. */ + int getFetchingThread(FetchPriority &fetch_priority); + + /** Returns the appropriate thread to fetch using a round robin policy. */ + int roundRobin(); + + /** Returns the appropriate thread to fetch using the IQ count policy. */ + int iqCount(); + + /** Returns the appropriate thread to fetch using the LSQ count policy. */ + int lsqCount(); + + /** Returns the appropriate thread to fetch using the branch count policy. */ + int branchCount(); + + private: + /** Pointer to the FullCPU. */ + FullCPU *cpu; + + /** Time buffer interface. */ + TimeBuffer *timeBuffer; + + /** Wire to get decode's information from backwards time buffer. */ + typename TimeBuffer::wire fromDecode; + + /** Wire to get rename's information from backwards time buffer. */ + typename TimeBuffer::wire fromRename; + + /** Wire to get iew's information from backwards time buffer. */ + typename TimeBuffer::wire fromIEW; + + /** Wire to get commit's information from backwards time buffer. */ + typename TimeBuffer::wire fromCommit; + + /** Internal fetch instruction queue. */ + TimeBuffer *fetchQueue; + + //Might be annoying how this name is different than the queue. + /** Wire used to write any information heading to decode. */ + typename TimeBuffer::wire toDecode; + + MemObject *mem; + + /** Icache interface. */ + IcachePort *icachePort; + + /** BPredUnit. */ + BPredUnit branchPred; + ++ /** Per-thread fetch PC. */ + Addr PC[Impl::MaxThreads]; + ++ /** Per-thread next PC. */ + Addr nextPC[Impl::MaxThreads]; + + /** Memory packet used to access cache. */ + PacketPtr memPkt[Impl::MaxThreads]; + + /** Variable that tracks if fetch has written to the time buffer this + * cycle. Used to tell CPU if there is activity this cycle. + */ + bool wroteToTimeBuffer; + + /** Tracks how many instructions has been fetched this cycle. */ + int numInst; + + /** Source of possible stalls. */ + struct Stalls { + bool decode; + bool rename; + bool iew; + bool commit; + }; + + /** Tracks which stages are telling fetch to stall. */ + Stalls stalls[Impl::MaxThreads]; + + /** Decode to fetch delay, in ticks. */ + unsigned decodeToFetchDelay; + + /** Rename to fetch delay, in ticks. */ + unsigned renameToFetchDelay; + + /** IEW to fetch delay, in ticks. */ + unsigned iewToFetchDelay; + + /** Commit to fetch delay, in ticks. */ + unsigned commitToFetchDelay; + + /** The width of fetch in instructions. */ + unsigned fetchWidth; + + /** Cache block size. */ + int cacheBlkSize; + + /** Mask to get a cache block's address. */ + Addr cacheBlkMask; + + /** The cache line being fetched. */ + uint8_t *cacheData[Impl::MaxThreads]; + + /** Size of instructions. */ + int instSize; + + /** Icache stall statistics. */ + Counter lastIcacheStall[Impl::MaxThreads]; + + /** List of Active Threads */ + std::list *activeThreads; + + /** Number of threads. */ + unsigned numThreads; + + /** Number of threads that are actively fetching. */ + unsigned numFetchingThreads; + + /** Thread ID being fetched. */ + int threadFetched; + ++ /** Checks if there is an interrupt pending. If there is, fetch ++ * must stop once it is not fetching PAL instructions. ++ */ + bool interruptPending; + ++ /** Records if fetch is switched out. */ + bool switchedOut; + +#if !FULL_SYSTEM + /** Page table pointer. */ +// PageTable *pTable; +#endif + + // @todo: Consider making these vectors and tracking on a per thread basis. + /** Stat for total number of cycles stalled due to an icache miss. */ + Stats::Scalar<> icacheStallCycles; + /** Stat for total number of fetched instructions. */ + Stats::Scalar<> fetchedInsts; + Stats::Scalar<> fetchedBranches; + /** Stat for total number of predicted branches. */ + Stats::Scalar<> predictedBranches; + /** Stat for total number of cycles spent fetching. */ + Stats::Scalar<> fetchCycles; + /** Stat for total number of cycles spent squashing. */ + Stats::Scalar<> fetchSquashCycles; + /** Stat for total number of cycles spent blocked due to other stages in + * the pipeline. + */ + Stats::Scalar<> fetchIdleCycles; ++ /** Total number of cycles spent blocked. */ + Stats::Scalar<> fetchBlockedCycles; - ++ /** Total number of cycles spent in any other state. */ + Stats::Scalar<> fetchMiscStallCycles; + /** Stat for total number of fetched cache lines. */ + Stats::Scalar<> fetchedCacheLines; - ++ /** Total number of outstanding icache accesses that were dropped ++ * due to a squash. ++ */ + Stats::Scalar<> fetchIcacheSquashes; + /** Distribution of number of instructions fetched each cycle. */ + Stats::Distribution<> fetchNisnDist; ++ /** Rate of how often fetch was idle. */ + Stats::Formula idleRate; ++ /** Number of branch fetches per cycle. */ + Stats::Formula branchRate; ++ /** Number of instruction fetched per cycle. */ + Stats::Formula fetchRate; +}; + +#endif //__CPU_O3_FETCH_HH__ diff --cc src/cpu/o3/fetch_impl.hh index 5d3164dbf,000000000..69c43a6a2 mode 100644,000000..100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@@ -1,1233 -1,0 +1,1235 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include "arch/isa_traits.hh" +#include "arch/utility.hh" +#include "cpu/exetrace.hh" +#include "cpu/o3/fetch.hh" +#include "mem/packet.hh" +#include "mem/request.hh" +#include "sim/byteswap.hh" +#include "sim/host.hh" +#include "sim/root.hh" + +#if FULL_SYSTEM +#include "arch/tlb.hh" +#include "arch/vtophys.hh" +#include "base/remote_gdb.hh" +#include "mem/functional/memory_control.hh" +#include "mem/functional/physical.hh" +#include "sim/system.hh" +#endif // FULL_SYSTEM + +#include + +using namespace std; +using namespace TheISA; + +template +Tick +DefaultFetch::IcachePort::recvAtomic(PacketPtr pkt) +{ + panic("DefaultFetch doesn't expect recvAtomic callback!"); + return curTick; +} + +template +void +DefaultFetch::IcachePort::recvFunctional(PacketPtr pkt) +{ + panic("DefaultFetch doesn't expect recvFunctional callback!"); +} + +template +void +DefaultFetch::IcachePort::recvStatusChange(Status status) +{ + if (status == RangeChange) + return; + + panic("DefaultFetch doesn't expect recvStatusChange callback!"); +} + +template +bool +DefaultFetch::IcachePort::recvTiming(Packet *pkt) +{ + fetch->processCacheCompletion(pkt); + return true; +} + +template +void +DefaultFetch::IcachePort::recvRetry() +{ + panic("DefaultFetch doesn't support retry yet."); + // we shouldn't get a retry unless we have a packet that we're + // waiting to transmit +/* + assert(cpu->dcache_pkt != NULL); + assert(cpu->_status == DcacheRetry); + Packet *tmp = cpu->dcache_pkt; + if (sendTiming(tmp)) { + cpu->_status = DcacheWaitResponse; + cpu->dcache_pkt = NULL; + } +*/ +} + +template +DefaultFetch::DefaultFetch(Params *params) + : branchPred(params), + decodeToFetchDelay(params->decodeToFetchDelay), + renameToFetchDelay(params->renameToFetchDelay), + iewToFetchDelay(params->iewToFetchDelay), + commitToFetchDelay(params->commitToFetchDelay), + fetchWidth(params->fetchWidth), + numThreads(params->numberOfThreads), + numFetchingThreads(params->smtNumFetchingThreads), + interruptPending(false) +{ + if (numThreads > Impl::MaxThreads) + fatal("numThreads is not a valid value\n"); + + DPRINTF(Fetch, "Fetch constructor called\n"); + + // Set fetch stage's status to inactive. + _status = Inactive; + + string policy = params->smtFetchPolicy; + + // Convert string to lowercase + std::transform(policy.begin(), policy.end(), policy.begin(), + (int(*)(int)) tolower); + + // Figure out fetch policy + if (policy == "singlethread") { + fetchPolicy = SingleThread; + } else if (policy == "roundrobin") { + fetchPolicy = RoundRobin; + DPRINTF(Fetch, "Fetch policy set to Round Robin\n"); + } else if (policy == "branch") { + fetchPolicy = Branch; + DPRINTF(Fetch, "Fetch policy set to Branch Count\n"); + } else if (policy == "iqcount") { + fetchPolicy = IQ; + DPRINTF(Fetch, "Fetch policy set to IQ count\n"); + } else if (policy == "lsqcount") { + fetchPolicy = LSQ; + DPRINTF(Fetch, "Fetch policy set to LSQ count\n"); + } else { + fatal("Invalid Fetch Policy. Options Are: {SingleThread," + " RoundRobin,LSQcount,IQcount}\n"); + } + + // Size of cache block. + cacheBlkSize = 64; + + // Create mask to get rid of offset bits. + cacheBlkMask = (cacheBlkSize - 1); + + for (int tid=0; tid < numThreads; tid++) { + + fetchStatus[tid] = Running; + + priorityList.push_back(tid); + + memPkt[tid] = NULL; + + // Create space to store a cache line. + cacheData[tid] = new uint8_t[cacheBlkSize]; + + stalls[tid].decode = 0; + stalls[tid].rename = 0; + stalls[tid].iew = 0; + stalls[tid].commit = 0; + } + + // Get the size of an instruction. + instSize = sizeof(MachInst); +} + +template +std::string +DefaultFetch::name() const +{ + return cpu->name() + ".fetch"; +} + +template +void +DefaultFetch::regStats() +{ + icacheStallCycles - .name(name() + ".FETCH:icacheStallCycles") ++ .name(name() + ".icacheStallCycles") + .desc("Number of cycles fetch is stalled on an Icache miss") + .prereq(icacheStallCycles); + + fetchedInsts - .name(name() + ".FETCH:Insts") ++ .name(name() + ".Insts") + .desc("Number of instructions fetch has processed") + .prereq(fetchedInsts); + + fetchedBranches - .name(name() + ".FETCH:Branches") ++ .name(name() + ".Branches") + .desc("Number of branches that fetch encountered") + .prereq(fetchedBranches); + + predictedBranches - .name(name() + ".FETCH:predictedBranches") ++ .name(name() + ".predictedBranches") + .desc("Number of branches that fetch has predicted taken") + .prereq(predictedBranches); + + fetchCycles - .name(name() + ".FETCH:Cycles") ++ .name(name() + ".Cycles") + .desc("Number of cycles fetch has run and was not squashing or" + " blocked") + .prereq(fetchCycles); + + fetchSquashCycles - .name(name() + ".FETCH:SquashCycles") ++ .name(name() + ".SquashCycles") + .desc("Number of cycles fetch has spent squashing") + .prereq(fetchSquashCycles); + + fetchIdleCycles - .name(name() + ".FETCH:IdleCycles") ++ .name(name() + ".IdleCycles") + .desc("Number of cycles fetch was idle") + .prereq(fetchIdleCycles); + + fetchBlockedCycles - .name(name() + ".FETCH:BlockedCycles") ++ .name(name() + ".BlockedCycles") + .desc("Number of cycles fetch has spent blocked") + .prereq(fetchBlockedCycles); + + fetchedCacheLines - .name(name() + ".FETCH:CacheLines") ++ .name(name() + ".CacheLines") + .desc("Number of cache lines fetched") + .prereq(fetchedCacheLines); + + fetchMiscStallCycles - .name(name() + ".FETCH:MiscStallCycles") ++ .name(name() + ".MiscStallCycles") + .desc("Number of cycles fetch has spent waiting on interrupts, or " + "bad addresses, or out of MSHRs") + .prereq(fetchMiscStallCycles); + + fetchIcacheSquashes - .name(name() + ".FETCH:IcacheSquashes") ++ .name(name() + ".IcacheSquashes") + .desc("Number of outstanding Icache misses that were squashed") + .prereq(fetchIcacheSquashes); + + fetchNisnDist + .init(/* base value */ 0, + /* last value */ fetchWidth, + /* bucket size */ 1) - .name(name() + ".FETCH:rateDist") ++ .name(name() + ".rateDist") + .desc("Number of instructions fetched each cycle (Total)") + .flags(Stats::pdf); + + idleRate - .name(name() + ".FETCH:idleRate") ++ .name(name() + ".idleRate") + .desc("Percent of cycles fetch was idle") + .prereq(idleRate); + idleRate = fetchIdleCycles * 100 / cpu->numCycles; + + branchRate - .name(name() + ".FETCH:branchRate") ++ .name(name() + ".branchRate") + .desc("Number of branch fetches per cycle") + .flags(Stats::total); - branchRate = predictedBranches / cpu->numCycles; ++ branchRate = fetchedBranches / cpu->numCycles; + + fetchRate - .name(name() + ".FETCH:rate") ++ .name(name() + ".rate") + .desc("Number of inst fetches per cycle") + .flags(Stats::total); + fetchRate = fetchedInsts / cpu->numCycles; + + branchPred.regStats(); +} + +template +void +DefaultFetch::setCPU(FullCPU *cpu_ptr) +{ + DPRINTF(Fetch, "Setting the CPU pointer.\n"); + cpu = cpu_ptr; + + // Name is finally available, so create the port. + icachePort = new IcachePort(this); + + // Fetch needs to start fetching instructions at the very beginning, + // so it must start up in active state. + switchToActive(); +} + +template +void +DefaultFetch::setTimeBuffer(TimeBuffer *time_buffer) +{ + DPRINTF(Fetch, "Setting the time buffer pointer.\n"); + timeBuffer = time_buffer; + + // Create wires to get information from proper places in time buffer. + fromDecode = timeBuffer->getWire(-decodeToFetchDelay); + fromRename = timeBuffer->getWire(-renameToFetchDelay); + fromIEW = timeBuffer->getWire(-iewToFetchDelay); + fromCommit = timeBuffer->getWire(-commitToFetchDelay); +} + +template +void +DefaultFetch::setActiveThreads(list *at_ptr) +{ + DPRINTF(Fetch, "Setting active threads list pointer.\n"); + activeThreads = at_ptr; +} + +template +void +DefaultFetch::setFetchQueue(TimeBuffer *fq_ptr) +{ + DPRINTF(Fetch, "Setting the fetch queue pointer.\n"); + fetchQueue = fq_ptr; + + // Create wire to write information to proper place in fetch queue. + toDecode = fetchQueue->getWire(0); +} + +#if 0 +template +void +DefaultFetch::setPageTable(PageTable *pt_ptr) +{ + DPRINTF(Fetch, "Setting the page table pointer.\n"); +#if !FULL_SYSTEM + pTable = pt_ptr; +#endif +} +#endif + +template +void +DefaultFetch::initStage() +{ ++ // Setup PC and nextPC with initial state. + for (int tid = 0; tid < numThreads; tid++) { + PC[tid] = cpu->readPC(tid); + nextPC[tid] = cpu->readNextPC(tid); + } +} + +template +void +DefaultFetch::processCacheCompletion(PacketPtr pkt) +{ + unsigned tid = pkt->req->getThreadNum(); + + DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n",tid); + + // Only change the status if it's still waiting on the icache access + // to return. - // Can keep track of how many cache accesses go unused due to - // misspeculation here. + if (fetchStatus[tid] != IcacheWaitResponse || + pkt != memPkt[tid] || + isSwitchedOut()) { + ++fetchIcacheSquashes; + delete pkt; + return; + } + + // Wake up the CPU (if it went to sleep and was waiting on this completion + // event). + cpu->wakeCPU(); + + DPRINTF(Activity, "[tid:%u] Activating fetch due to cache completion\n", + tid); + + switchToActive(); + + // Only switch to IcacheAccessComplete if we're not stalled as well. + if (checkStall(tid)) { + fetchStatus[tid] = Blocked; + } else { + fetchStatus[tid] = IcacheAccessComplete; + } + +// memcpy(cacheData[tid], memReq[tid]->data, memReq[tid]->size); + + // Reset the mem req to NULL. + delete pkt->req; + delete pkt; + memPkt[tid] = NULL; +} + +template +void +DefaultFetch::switchOut() +{ ++ // Fetch is ready to switch out at any time. + switchedOut = true; + cpu->signalSwitched(); +} + +template +void +DefaultFetch::doSwitchOut() +{ ++ // Branch predictor needs to have its state cleared. + branchPred.switchOut(); +} + +template +void +DefaultFetch::takeOverFrom() +{ + // Reset all state + for (int i = 0; i < Impl::MaxThreads; ++i) { + stalls[i].decode = 0; + stalls[i].rename = 0; + stalls[i].iew = 0; + stalls[i].commit = 0; + PC[i] = cpu->readPC(i); + nextPC[i] = cpu->readNextPC(i); + fetchStatus[i] = Running; + } + numInst = 0; + wroteToTimeBuffer = false; + _status = Inactive; + switchedOut = false; + branchPred.takeOverFrom(); +} + +template +void +DefaultFetch::wakeFromQuiesce() +{ + DPRINTF(Fetch, "Waking up from quiesce\n"); + // Hopefully this is safe ++ // @todo: Allow other threads to wake from quiesce. + fetchStatus[0] = Running; +} + +template +inline void +DefaultFetch::switchToActive() +{ + if (_status == Inactive) { + DPRINTF(Activity, "Activating stage.\n"); + + cpu->activateStage(FullCPU::FetchIdx); + + _status = Active; + } +} + +template +inline void +DefaultFetch::switchToInactive() +{ + if (_status == Active) { + DPRINTF(Activity, "Deactivating stage.\n"); + + cpu->deactivateStage(FullCPU::FetchIdx); + + _status = Inactive; + } +} + +template +bool +DefaultFetch::lookupAndUpdateNextPC(DynInstPtr &inst, Addr &next_PC) +{ + // Do branch prediction check here. + // A bit of a misnomer...next_PC is actually the current PC until + // this function updates it. + bool predict_taken; + + if (!inst->isControl()) { + next_PC = next_PC + instSize; + inst->setPredTarg(next_PC); + return false; + } + + predict_taken = branchPred.predict(inst, next_PC, inst->threadNumber); + + ++fetchedBranches; + + if (predict_taken) { + ++predictedBranches; + } + + return predict_taken; +} + +template +bool +DefaultFetch::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid) +{ + Fault fault = NoFault; + +#if FULL_SYSTEM + // Flag to say whether or not address is physical addr. + unsigned flags = cpu->inPalMode(fetch_PC) ? PHYSICAL : 0; +#else + unsigned flags = 0; +#endif // FULL_SYSTEM + + if (interruptPending && flags == 0 || switchedOut) { + // Hold off fetch from getting new instructions while an interrupt + // is pending. + return false; + } + + // Align the fetch PC so it's at the start of a cache block. + fetch_PC = icacheBlockAlignPC(fetch_PC); + + // Setup the memReq to do a read of the first instruction's address. + // Set the appropriate read size and flags as well. + // Build request here. + RequestPtr mem_req = new Request(tid, fetch_PC, cacheBlkSize, flags, + fetch_PC, cpu->readCpuId(), tid); + + memPkt[tid] = NULL; + + // Translate the instruction request. +//#if FULL_SYSTEM + fault = cpu->translateInstReq(mem_req); +//#else +// fault = pTable->translate(memReq[tid]); +//#endif + + // In the case of faults, the fetch stage may need to stall and wait + // for the ITB miss to be handled. + + // If translation was successful, attempt to read the first + // instruction. + if (fault == NoFault) { +#if FULL_SYSTEM + if (cpu->system->memctrl->badaddr(memReq[tid]->paddr) || + memReq[tid]->flags & UNCACHEABLE) { + DPRINTF(Fetch, "Fetch: Bad address %#x (hopefully on a " + "misspeculating path)!", + memReq[tid]->paddr); + ret_fault = TheISA::genMachineCheckFault(); + return false; + } +#endif + + // Build packet here. + PacketPtr data_pkt = new Packet(mem_req, + Packet::ReadReq, Packet::Broadcast); + data_pkt->dataStatic(cacheData[tid]); + + DPRINTF(Fetch, "Fetch: Doing instruction read.\n"); + + fetchedCacheLines++; + + // Now do the timing access to see whether or not the instruction + // exists within the cache. + if (!icachePort->sendTiming(data_pkt)) { + DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid); + ret_fault = NoFault; + return false; + } + + DPRINTF(Fetch, "Doing cache access.\n"); + + lastIcacheStall[tid] = curTick; + + DPRINTF(Activity, "[tid:%i]: Activity: Waiting on I-cache " + "response.\n", tid); + + fetchStatus[tid] = IcacheWaitResponse; + } + + ret_fault = fault; + return true; +} + +template +inline void +DefaultFetch::doSquash(const Addr &new_PC, unsigned tid) +{ + DPRINTF(Fetch, "[tid:%i]: Squashing, setting PC to: %#x.\n", + tid, new_PC); + + PC[tid] = new_PC; + nextPC[tid] = new_PC + instSize; + + // Clear the icache miss if it's outstanding. + if (fetchStatus[tid] == IcacheWaitResponse) { + DPRINTF(Fetch, "[tid:%i]: Squashing outstanding Icache miss.\n", + tid); + delete memPkt[tid]; + memPkt[tid] = NULL; + } + + fetchStatus[tid] = Squashing; + + ++fetchSquashCycles; +} + +template +void +DefaultFetch::squashFromDecode(const Addr &new_PC, + const InstSeqNum &seq_num, + unsigned tid) +{ + DPRINTF(Fetch, "[tid:%i]: Squashing from decode.\n",tid); + + doSquash(new_PC, tid); + + // Tell the CPU to remove any instructions that are in flight between + // fetch and decode. + cpu->removeInstsUntil(seq_num, tid); +} + +template +bool +DefaultFetch::checkStall(unsigned tid) const +{ + bool ret_val = false; + + if (cpu->contextSwitch) { + DPRINTF(Fetch,"[tid:%i]: Stalling for a context switch.\n",tid); + ret_val = true; + } else if (stalls[tid].decode) { + DPRINTF(Fetch,"[tid:%i]: Stall from Decode stage detected.\n",tid); + ret_val = true; + } else if (stalls[tid].rename) { + DPRINTF(Fetch,"[tid:%i]: Stall from Rename stage detected.\n",tid); + ret_val = true; + } else if (stalls[tid].iew) { + DPRINTF(Fetch,"[tid:%i]: Stall from IEW stage detected.\n",tid); + ret_val = true; + } else if (stalls[tid].commit) { + DPRINTF(Fetch,"[tid:%i]: Stall from Commit stage detected.\n",tid); + ret_val = true; + } + + return ret_val; +} + +template +typename DefaultFetch::FetchStatus +DefaultFetch::updateFetchStatus() +{ + //Check Running + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + + unsigned tid = *threads++; + + if (fetchStatus[tid] == Running || + fetchStatus[tid] == Squashing || + fetchStatus[tid] == IcacheAccessComplete) { + + if (_status == Inactive) { + DPRINTF(Activity, "[tid:%i]: Activating stage.\n",tid); + + if (fetchStatus[tid] == IcacheAccessComplete) { + DPRINTF(Activity, "[tid:%i]: Activating fetch due to cache" + "completion\n",tid); + } + + cpu->activateStage(FullCPU::FetchIdx); + } + + return Active; + } + } + + // Stage is switching from active to inactive, notify CPU of it. + if (_status == Active) { + DPRINTF(Activity, "Deactivating stage.\n"); + + cpu->deactivateStage(FullCPU::FetchIdx); + } + + return Inactive; +} + +template +void +DefaultFetch::squash(const Addr &new_PC, unsigned tid) +{ + DPRINTF(Fetch, "[tid:%u]: Squash from commit.\n",tid); + + doSquash(new_PC, tid); + + // Tell the CPU to remove any instructions that are not in the ROB. + cpu->removeInstsNotInROB(tid); +} + +template +void +DefaultFetch::tick() +{ + list::iterator threads = (*activeThreads).begin(); + bool status_change = false; + + wroteToTimeBuffer = false; + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + // Check the signals for each thread to determine the proper status + // for each thread. + bool updated_status = checkSignalsAndUpdate(tid); + status_change = status_change || updated_status; + } + + DPRINTF(Fetch, "Running stage.\n"); + + // Reset the number of the instruction we're fetching. + numInst = 0; + + if (fromCommit->commitInfo[0].interruptPending) { + interruptPending = true; + } + if (fromCommit->commitInfo[0].clearInterrupt) { + interruptPending = false; + } + + for (threadFetched = 0; threadFetched < numFetchingThreads; + threadFetched++) { + // Fetch each of the actively fetching threads. + fetch(status_change); + } + + // Record number of instructions fetched this cycle for distribution. + fetchNisnDist.sample(numInst); + + if (status_change) { + // Change the fetch stage status if there was a status change. + _status = updateFetchStatus(); + } + + // If there was activity this cycle, inform the CPU of it. + if (wroteToTimeBuffer || cpu->contextSwitch) { + DPRINTF(Activity, "Activity this cycle.\n"); + + cpu->activityThisCycle(); + } +} + +template +bool +DefaultFetch::checkSignalsAndUpdate(unsigned tid) +{ + // Update the per thread stall statuses. + if (fromDecode->decodeBlock[tid]) { + stalls[tid].decode = true; + } + + if (fromDecode->decodeUnblock[tid]) { + assert(stalls[tid].decode); + assert(!fromDecode->decodeBlock[tid]); + stalls[tid].decode = false; + } + + if (fromRename->renameBlock[tid]) { + stalls[tid].rename = true; + } + + if (fromRename->renameUnblock[tid]) { + assert(stalls[tid].rename); + assert(!fromRename->renameBlock[tid]); + stalls[tid].rename = false; + } + + if (fromIEW->iewBlock[tid]) { + stalls[tid].iew = true; + } + + if (fromIEW->iewUnblock[tid]) { + assert(stalls[tid].iew); + assert(!fromIEW->iewBlock[tid]); + stalls[tid].iew = false; + } + + if (fromCommit->commitBlock[tid]) { + stalls[tid].commit = true; + } + + if (fromCommit->commitUnblock[tid]) { + assert(stalls[tid].commit); + assert(!fromCommit->commitBlock[tid]); + stalls[tid].commit = false; + } + + // Check squash signals from commit. + if (fromCommit->commitInfo[tid].squash) { + + DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash " + "from commit.\n",tid); + + // In any case, squash. + squash(fromCommit->commitInfo[tid].nextPC,tid); + + // Also check if there's a mispredict that happened. + if (fromCommit->commitInfo[tid].branchMispredict) { + branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum, + fromCommit->commitInfo[tid].nextPC, + fromCommit->commitInfo[tid].branchTaken, + tid); + } else { + branchPred.squash(fromCommit->commitInfo[tid].doneSeqNum, + tid); + } + + return true; + } else if (fromCommit->commitInfo[tid].doneSeqNum) { + // Update the branch predictor if it wasn't a squashed instruction + // that was broadcasted. + branchPred.update(fromCommit->commitInfo[tid].doneSeqNum, tid); + } + + // Check ROB squash signals from commit. + if (fromCommit->commitInfo[tid].robSquashing) { + DPRINTF(Fetch, "[tid:%u]: ROB is still squashing Thread %u.\n", tid); + + // Continue to squash. + fetchStatus[tid] = Squashing; + + return true; + } + + // Check squash signals from decode. + if (fromDecode->decodeInfo[tid].squash) { + DPRINTF(Fetch, "[tid:%u]: Squashing instructions due to squash " + "from decode.\n",tid); + + // Update the branch predictor. + if (fromDecode->decodeInfo[tid].branchMispredict) { + branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum, + fromDecode->decodeInfo[tid].nextPC, + fromDecode->decodeInfo[tid].branchTaken, + tid); + } else { + branchPred.squash(fromDecode->decodeInfo[tid].doneSeqNum, + tid); + } + + if (fetchStatus[tid] != Squashing) { + // Squash unless we're already squashing + squashFromDecode(fromDecode->decodeInfo[tid].nextPC, + fromDecode->decodeInfo[tid].doneSeqNum, + tid); + + return true; + } + } + + if (checkStall(tid) && fetchStatus[tid] != IcacheWaitResponse) { + DPRINTF(Fetch, "[tid:%i]: Setting to blocked\n",tid); + + fetchStatus[tid] = Blocked; + + return true; + } + + if (fetchStatus[tid] == Blocked || + fetchStatus[tid] == Squashing) { + // Switch status to running if fetch isn't being told to block or + // squash this cycle. + DPRINTF(Fetch, "[tid:%i]: Done squashing, switching to running.\n", + tid); + + fetchStatus[tid] = Running; + + return true; + } + + // If we've reached this point, we have not gotten any signals that + // cause fetch to change its status. Fetch remains the same as before. + return false; +} + +template +void +DefaultFetch::fetch(bool &status_change) +{ + ////////////////////////////////////////// + // Start actual fetch + ////////////////////////////////////////// + int tid = getFetchingThread(fetchPolicy); + + if (tid == -1) { + DPRINTF(Fetch,"There are no more threads available to fetch from.\n"); + + // Breaks looping condition in tick() + threadFetched = numFetchingThreads; + return; + } + + // The current PC. + Addr &fetch_PC = PC[tid]; + + // Fault code for memory access. + Fault fault = NoFault; + + // If returning from the delay of a cache miss, then update the status + // to running, otherwise do the cache access. Possibly move this up + // to tick() function. + if (fetchStatus[tid] == IcacheAccessComplete) { + DPRINTF(Fetch, "[tid:%i]: Icache miss is complete.\n", + tid); + + fetchStatus[tid] = Running; + status_change = true; + } else if (fetchStatus[tid] == Running) { + DPRINTF(Fetch, "[tid:%i]: Attempting to translate and read " + "instruction, starting at PC %08p.\n", + tid, fetch_PC); + + bool fetch_success = fetchCacheLine(fetch_PC, fault, tid); + if (!fetch_success) { + ++fetchMiscStallCycles; + return; + } + } else { + if (fetchStatus[tid] == Idle) { + ++fetchIdleCycles; + } else if (fetchStatus[tid] == Blocked) { + ++fetchBlockedCycles; + } else if (fetchStatus[tid] == Squashing) { + ++fetchSquashCycles; + } else if (fetchStatus[tid] == IcacheWaitResponse) { + ++icacheStallCycles; + } + + // Status is Idle, Squashing, Blocked, or IcacheWaitResponse, so + // fetch should do nothing. + return; + } + + ++fetchCycles; + + // If we had a stall due to an icache miss, then return. + if (fetchStatus[tid] == IcacheWaitResponse) { + ++icacheStallCycles; + status_change = true; + return; + } + + Addr next_PC = fetch_PC; + InstSeqNum inst_seq; + MachInst inst; + ExtMachInst ext_inst; + // @todo: Fix this hack. + unsigned offset = (fetch_PC & cacheBlkMask) & ~3; + + if (fault == NoFault) { + // If the read of the first instruction was successful, then grab the + // instructions from the rest of the cache line and put them into the + // queue heading to decode. + + DPRINTF(Fetch, "[tid:%i]: Adding instructions to queue to " + "decode.\n",tid); + + // Need to keep track of whether or not a predicted branch + // ended this fetch block. + bool predicted_branch = false; + + for (; + offset < cacheBlkSize && + numInst < fetchWidth && + !predicted_branch; + ++numInst) { + + // Get a sequence number. + inst_seq = cpu->getAndIncrementInstSeq(); + + // Make sure this is a valid index. + assert(offset <= cacheBlkSize - instSize); + + // Get the instruction from the array of the cache line. + inst = gtoh(*reinterpret_cast + (&cacheData[tid][offset])); + + ext_inst = TheISA::makeExtMI(inst, fetch_PC); + + // Create a new DynInst from the instruction fetched. + DynInstPtr instruction = new DynInst(ext_inst, fetch_PC, + next_PC, + inst_seq, cpu); + instruction->setThread(tid); + + instruction->setASID(tid); + + instruction->setState(cpu->thread[tid]); + + DPRINTF(Fetch, "[tid:%i]: Instruction PC %#x created " + "[sn:%lli]\n", + tid, instruction->readPC(), inst_seq); + + DPRINTF(Fetch, "[tid:%i]: Instruction is: %s\n", + tid, instruction->staticInst->disassemble(fetch_PC)); + + instruction->traceData = + Trace::getInstRecord(curTick, cpu->xcBase(tid), cpu, + instruction->staticInst, + instruction->readPC(),tid); + + predicted_branch = lookupAndUpdateNextPC(instruction, next_PC); + + // Add instruction to the CPU's list of instructions. + instruction->setInstListIt(cpu->addInst(instruction)); + + // Write the instruction to the first slot in the queue + // that heads to decode. + toDecode->insts[numInst] = instruction; + + toDecode->size++; + + // Increment stat of fetched instructions. + ++fetchedInsts; + + // Move to the next instruction, unless we have a branch. + fetch_PC = next_PC; + + if (instruction->isQuiesce()) { + warn("%lli: Quiesce instruction encountered, halting fetch!", + curTick); + fetchStatus[tid] = QuiescePending; + ++numInst; + status_change = true; + break; + } + + offset+= instSize; + } + } + + if (numInst > 0) { + wroteToTimeBuffer = true; + } + + // Now that fetching is completed, update the PC to signify what the next + // cycle will be. + if (fault == NoFault) { + DPRINTF(Fetch, "[tid:%i]: Setting PC to %08p.\n",tid, next_PC); + + PC[tid] = next_PC; + nextPC[tid] = next_PC + instSize; + } else { + // We shouldn't be in an icache miss and also have a fault (an ITB + // miss) + if (fetchStatus[tid] == IcacheWaitResponse) { + panic("Fetch should have exited prior to this!"); + } + + // Send the fault to commit. This thread will not do anything + // until commit handles the fault. The only other way it can + // wake up is if a squash comes along and changes the PC. +#if FULL_SYSTEM + assert(numInst != fetchWidth); + // Get a sequence number. + inst_seq = cpu->getAndIncrementInstSeq(); + // We will use a nop in order to carry the fault. + ext_inst = TheISA::NoopMachInst; + + // Create a new DynInst from the dummy nop. + DynInstPtr instruction = new DynInst(ext_inst, fetch_PC, + next_PC, + inst_seq, cpu); + instruction->setPredTarg(next_PC + instSize); + instruction->setThread(tid); + + instruction->setASID(tid); + + instruction->setState(cpu->thread[tid]); + + instruction->traceData = NULL; + + instruction->setInstListIt(cpu->addInst(instruction)); + + instruction->fault = fault; + + toDecode->insts[numInst] = instruction; + toDecode->size++; + + DPRINTF(Fetch, "[tid:%i]: Blocked, need to handle the trap.\n",tid); + + fetchStatus[tid] = TrapPending; + status_change = true; + + warn("%lli fault (%d) detected @ PC %08p", curTick, fault, PC[tid]); +#else // !FULL_SYSTEM + fatal("fault (%d) detected @ PC %08p", fault, PC[tid]); +#endif // FULL_SYSTEM + } +} + + +/////////////////////////////////////// +// // +// SMT FETCH POLICY MAINTAINED HERE // +// // +/////////////////////////////////////// +template +int +DefaultFetch::getFetchingThread(FetchPriority &fetch_priority) +{ + if (numThreads > 1) { + switch (fetch_priority) { + + case SingleThread: + return 0; + + case RoundRobin: + return roundRobin(); + + case IQ: + return iqCount(); + + case LSQ: + return lsqCount(); + + case Branch: + return branchCount(); + + default: + return -1; + } + } else { + int tid = *((*activeThreads).begin()); + + if (fetchStatus[tid] == Running || + fetchStatus[tid] == IcacheAccessComplete || + fetchStatus[tid] == Idle) { + return tid; + } else { + return -1; + } + } + +} + + +template +int +DefaultFetch::roundRobin() +{ + list::iterator pri_iter = priorityList.begin(); + list::iterator end = priorityList.end(); + + int high_pri; + + while (pri_iter != end) { + high_pri = *pri_iter; + + assert(high_pri <= numThreads); + + if (fetchStatus[high_pri] == Running || + fetchStatus[high_pri] == IcacheAccessComplete || + fetchStatus[high_pri] == Idle) { + + priorityList.erase(pri_iter); + priorityList.push_back(high_pri); + + return high_pri; + } + + pri_iter++; + } + + return -1; +} + +template +int +DefaultFetch::iqCount() +{ + priority_queue PQ; + + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + PQ.push(fromIEW->iewInfo[tid].iqCount); + } + + while (!PQ.empty()) { + + unsigned high_pri = PQ.top(); + + if (fetchStatus[high_pri] == Running || + fetchStatus[high_pri] == IcacheAccessComplete || + fetchStatus[high_pri] == Idle) + return high_pri; + else + PQ.pop(); + + } + + return -1; +} + +template +int +DefaultFetch::lsqCount() +{ + priority_queue PQ; + + + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + PQ.push(fromIEW->iewInfo[tid].ldstqCount); + } + + while (!PQ.empty()) { + + unsigned high_pri = PQ.top(); + + if (fetchStatus[high_pri] == Running || + fetchStatus[high_pri] == IcacheAccessComplete || - fetchStatus[high_pri] == Idle) ++ fetchStatus[high_pri] == Idle) + return high_pri; + else + PQ.pop(); + + } + + return -1; +} + +template +int +DefaultFetch::branchCount() +{ + list::iterator threads = (*activeThreads).begin(); + + return *threads; +} diff --cc src/cpu/o3/fu_pool.cc index fb2b5c00d,000000000..b28b5d37f mode 100644,000000..100644 --- a/src/cpu/o3/fu_pool.cc +++ b/src/cpu/o3/fu_pool.cc @@@ -1,295 -1,0 +1,297 @@@ +/* + * Copyright (c) 2002-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "cpu/o3/fu_pool.hh" +#include "encumbered/cpu/full/fu_pool.hh" +#include "sim/builder.hh" + +using namespace std; + +//////////////////////////////////////////////////////////////////////////// +// +// A pool of function units +// + +inline void +FUPool::FUIdxQueue::addFU(int fu_idx) +{ + funcUnitsIdx.push_back(fu_idx); + ++size; +} + +inline int +FUPool::FUIdxQueue::getFU() +{ + int retval = funcUnitsIdx[idx++]; + + if (idx == size) + idx = 0; + + return retval; +} + +FUPool::~FUPool() +{ + fuListIterator i = funcUnits.begin(); + fuListIterator end = funcUnits.end(); + for (; i != end; ++i) + delete *i; +} + + +// Constructor +FUPool::FUPool(string name, vector paramList) + : SimObject(name) +{ + numFU = 0; + + funcUnits.clear(); + + for (int i = 0; i < Num_OpClasses; ++i) { + maxOpLatencies[i] = 0; + maxIssueLatencies[i] = 0; + } + + // + // Iterate through the list of FUDescData structures + // + for (FUDDiterator i = paramList.begin(); i != paramList.end(); ++i) { + + // + // Don't bother with this if we're not going to create any FU's + // + if ((*i)->number) { + // + // Create the FuncUnit object from this structure + // - add the capabilities listed in the FU's operation + // description + // + // We create the first unit, then duplicate it as needed + // + FuncUnit *fu = new FuncUnit; + + OPDDiterator j = (*i)->opDescList.begin(); + OPDDiterator end = (*i)->opDescList.end(); + for (; j != end; ++j) { + // indicate that this pool has this capability + capabilityList.set((*j)->opClass); + + // Add each of the FU's that will have this capability to the + // appropriate queue. + for (int k = 0; k < (*i)->number; ++k) + fuPerCapList[(*j)->opClass].addFU(numFU + k); + + // indicate that this FU has the capability + fu->addCapability((*j)->opClass, (*j)->opLat, (*j)->issueLat); + + if ((*j)->opLat > maxOpLatencies[(*j)->opClass]) + maxOpLatencies[(*j)->opClass] = (*j)->opLat; + + if ((*j)->issueLat > maxIssueLatencies[(*j)->opClass]) + maxIssueLatencies[(*j)->opClass] = (*j)->issueLat; + } + + numFU++; + + // Add the appropriate number of copies of this FU to the list + ostringstream s; + + s << (*i)->name() << "(0)"; + fu->name = s.str(); + funcUnits.push_back(fu); + + for (int c = 1; c < (*i)->number; ++c) { + ostringstream s; + numFU++; + FuncUnit *fu2 = new FuncUnit(*fu); + + s << (*i)->name() << "(" << c << ")"; + fu2->name = s.str(); + funcUnits.push_back(fu2); + } + } + } + + unitBusy.resize(numFU); + + for (int i = 0; i < numFU; i++) { + unitBusy[i] = false; + } +} + +void +FUPool::annotateMemoryUnits(unsigned hit_latency) +{ + maxOpLatencies[MemReadOp] = hit_latency; + + fuListIterator i = funcUnits.begin(); + fuListIterator iend = funcUnits.end(); + for (; i != iend; ++i) { + if ((*i)->provides(MemReadOp)) + (*i)->opLatency(MemReadOp) = hit_latency; + + if ((*i)->provides(MemWriteOp)) + (*i)->opLatency(MemWriteOp) = hit_latency; + } +} + +int +FUPool::getUnit(OpClass capability) +{ + // If this pool doesn't have the specified capability, + // return this information to the caller + if (!capabilityList[capability]) + return -2; + + int fu_idx = fuPerCapList[capability].getFU(); + int start_idx = fu_idx; + + // Iterate through the circular queue if needed, stopping if we've reached + // the first element again. + while (unitBusy[fu_idx]) { + fu_idx = fuPerCapList[capability].getFU(); + if (fu_idx == start_idx) { + // No FU available + return -1; + } + } + ++ assert(fu_idx < numFU); ++ + unitBusy[fu_idx] = true; + + return fu_idx; +} + +void +FUPool::freeUnitNextCycle(int fu_idx) +{ + assert(unitBusy[fu_idx]); + unitsToBeFreed.push_back(fu_idx); +} + +void +FUPool::processFreeUnits() +{ + while (!unitsToBeFreed.empty()) { + int fu_idx = unitsToBeFreed.back(); + unitsToBeFreed.pop_back(); + + assert(unitBusy[fu_idx]); + + unitBusy[fu_idx] = false; + } +} + +void +FUPool::dump() +{ + cout << "Function Unit Pool (" << name() << ")\n"; + cout << "======================================\n"; + cout << "Free List:\n"; + + for (int i = 0; i < numFU; ++i) { + if (unitBusy[i]) { + continue; + } + + cout << " [" << i << "] : "; + + cout << funcUnits[i]->name << " "; + + cout << "\n"; + } + + cout << "======================================\n"; + cout << "Busy List:\n"; + for (int i = 0; i < numFU; ++i) { + if (!unitBusy[i]) { + continue; + } + + cout << " [" << i << "] : "; + + cout << funcUnits[i]->name << " "; + + cout << "\n"; + } +} + +void +FUPool::switchOut() +{ +} + +void +FUPool::takeOverFrom() +{ + for (int i = 0; i < numFU; i++) { + unitBusy[i] = false; + } + unitsToBeFreed.clear(); +} + +// + +//////////////////////////////////////////////////////////////////////////// +// +// The SimObjects we use to get the FU information into the simulator +// +//////////////////////////////////////////////////////////////////////////// + +// +// FUPool - Contails a list of FUDesc objects to make available +// + +// +// The FuPool object +// + +BEGIN_DECLARE_SIM_OBJECT_PARAMS(FUPool) + + SimObjectVectorParam FUList; + +END_DECLARE_SIM_OBJECT_PARAMS(FUPool) + + +BEGIN_INIT_SIM_OBJECT_PARAMS(FUPool) + + INIT_PARAM(FUList, "list of FU's for this pool") + +END_INIT_SIM_OBJECT_PARAMS(FUPool) + + +CREATE_SIM_OBJECT(FUPool) +{ + return new FUPool(getInstanceName(), FUList); +} + +REGISTER_SIM_OBJECT("FUPool", FUPool) + diff --cc src/cpu/o3/fu_pool.hh index f590c4149,000000000..1d4c76690 mode 100644,000000..100644 --- a/src/cpu/o3/fu_pool.hh +++ b/src/cpu/o3/fu_pool.hh @@@ -1,162 -1,0 +1,165 @@@ +/* + * Copyright (c) 2002-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_O3_FU_POOL_HH__ +#define __CPU_O3_FU_POOL_HH__ + +#include +#include +#include +#include + +#include "base/sched_list.hh" +#include "cpu/op_class.hh" +#include "sim/sim_object.hh" + +class FUDesc; +class FuncUnit; + +/** + * Pool of FU's, specific to the new CPU model. The old FU pool had lists of + * free units and busy units, and whenever a FU was needed it would iterate + * through the free units to find a FU that provided the capability. This pool + * has lists of units specific to each of the capabilities, and whenever a FU + * is needed, it iterates through that list to find a free unit. The previous + * FU pool would have to be ticked each cycle to update which units became + * free. This FU pool lets the IEW stage handle freeing units, which frees + * them as their scheduled execution events complete. This limits units in this + * model to either have identical issue and op latencies, or 1 cycle issue + * latencies. + */ +class FUPool : public SimObject +{ + private: + /** Maximum op execution latencies, per op class. */ + unsigned maxOpLatencies[Num_OpClasses]; + /** Maximum issue latencies, per op class. */ + unsigned maxIssueLatencies[Num_OpClasses]; + + /** Bitvector listing capabilities of this FU pool. */ + std::bitset capabilityList; + + /** Bitvector listing which FUs are busy. */ + std::vector unitBusy; + + /** List of units to be freed at the end of this cycle. */ + std::vector unitsToBeFreed; + + /** + * Class that implements a circular queue to hold FU indices. The hope is + * that FUs that have been just used will be moved to the end of the queue + * by iterating through it, thus leaving free units at the head of the + * queue. + */ + class FUIdxQueue { + public: + /** Constructs a circular queue of FU indices. */ + FUIdxQueue() + : idx(0), size(0) + { } + + /** Adds a FU to the queue. */ + inline void addFU(int fu_idx); + + /** Returns the index of the FU at the head of the queue, and changes + * the index to the next element. + */ + inline int getFU(); + + private: + /** Circular queue index. */ + int idx; + + /** Size of the queue. */ + int size; + + /** Queue of FU indices. */ + std::vector funcUnitsIdx; + }; + + /** Per op class queues of FUs that provide that capability. */ + FUIdxQueue fuPerCapList[Num_OpClasses]; + + /** Number of FUs. */ + int numFU; + + /** Functional units. */ + std::vector funcUnits; + + typedef std::vector::iterator fuListIterator; + + public: + + /** Constructs a FU pool. */ + FUPool(std::string name, std::vector l); + ~FUPool(); + + /** Annotates units that provide memory operations. Included only because + * old FU pool provided this function. + */ + void annotateMemoryUnits(unsigned hit_latency); + + /** + * Gets a FU providing the requested capability. Will mark the unit as busy, + * but leaves the freeing of the unit up to the IEW stage. + * @param capability The capability requested. + * @return Returns -2 if the FU pool does not have the capability, -1 if + * there is no free FU, and the FU's index otherwise. + */ + int getUnit(OpClass capability); + + /** Frees a FU at the end of this cycle. */ + void freeUnitNextCycle(int fu_idx); + + /** Frees all FUs on the list. */ + void processFreeUnits(); + + /** Returns the total number of FUs. */ + int size() { return numFU; } + + /** Debugging function used to dump FU information. */ + void dump(); + + /** Returns the operation execution latency of the given capability. */ + unsigned getOpLatency(OpClass capability) { + return maxOpLatencies[capability]; + } + + /** Returns the issue latency of the given capability. */ + unsigned getIssueLatency(OpClass capability) { + return maxIssueLatencies[capability]; + } + ++ /** Switches out functional unit pool. */ + void switchOut(); ++ ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); +}; + +#endif // __CPU_O3_FU_POOL_HH__ diff --cc src/cpu/o3/iew.hh index ae86536c9,000000000..7e79d5311 mode 100644,000000..100644 --- a/src/cpu/o3/iew.hh +++ b/src/cpu/o3/iew.hh @@@ -1,484 -1,0 +1,505 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_IEW_HH__ +#define __CPU_O3_IEW_HH__ + +#include + +#include "base/statistics.hh" +#include "base/timebuf.hh" +#include "config/full_system.hh" +#include "cpu/o3/comm.hh" +#include "cpu/o3/scoreboard.hh" +#include "cpu/o3/lsq.hh" + +class FUPool; + +/** + * DefaultIEW handles both single threaded and SMT IEW + * (issue/execute/writeback). It handles the dispatching of + * instructions to the LSQ/IQ as part of the issue stage, and has the + * IQ try to issue instructions each cycle. The execute latency is + * actually tied into the issue latency to allow the IQ to be able to + * do back-to-back scheduling without having to speculatively schedule + * instructions. This happens by having the IQ have access to the + * functional units, and the IQ gets the execution latencies from the + * FUs when it issues instructions. Instructions reach the execute + * stage on the last cycle of their execution, which is when the IQ + * knows to wake up any dependent instructions, allowing back to back + * scheduling. The execute portion of IEW separates memory + * instructions from non-memory instructions, either telling the LSQ + * to execute the instruction, or executing the instruction directly. + * The writeback portion of IEW completes the instructions by waking + * up any dependents, and marking the register ready on the + * scoreboard. + */ +template +class DefaultIEW +{ + private: + //Typedefs from Impl + typedef typename Impl::CPUPol CPUPol; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::Params Params; + + typedef typename CPUPol::IQ IQ; + typedef typename CPUPol::RenameMap RenameMap; + typedef typename CPUPol::LSQ LSQ; + + typedef typename CPUPol::TimeStruct TimeStruct; + typedef typename CPUPol::IEWStruct IEWStruct; + typedef typename CPUPol::RenameStruct RenameStruct; + typedef typename CPUPol::IssueStruct IssueStruct; + + friend class Impl::FullCPU; + friend class CPUPol::IQ; + + public: + /** Overall IEW stage status. Used to determine if the CPU can + * deschedule itself due to a lack of activity. + */ + enum Status { + Active, + Inactive + }; + + /** Status for Issue, Execute, and Writeback stages. */ + enum StageStatus { + Running, + Blocked, + Idle, + StartSquash, + Squashing, + Unblocking + }; + + private: + /** Overall stage status. */ + Status _status; + /** Dispatch status. */ + StageStatus dispatchStatus[Impl::MaxThreads]; + /** Execute status. */ + StageStatus exeStatus; + /** Writeback status. */ + StageStatus wbStatus; + + public: + /** Constructs a DefaultIEW with the given parameters. */ + DefaultIEW(Params *params); + + /** Returns the name of the DefaultIEW stage. */ + std::string name() const; + + /** Registers statistics. */ + void regStats(); + + /** Initializes stage; sends back the number of free IQ and LSQ entries. */ + void initStage(); + + /** Sets CPU pointer for IEW, IQ, and LSQ. */ + void setCPU(FullCPU *cpu_ptr); + + /** Sets main time buffer used for backwards communication. */ + void setTimeBuffer(TimeBuffer *tb_ptr); + + /** Sets time buffer for getting instructions coming from rename. */ + void setRenameQueue(TimeBuffer *rq_ptr); + + /** Sets time buffer to pass on instructions to commit. */ + void setIEWQueue(TimeBuffer *iq_ptr); + + /** Sets pointer to list of active threads. */ + void setActiveThreads(std::list *at_ptr); + + /** Sets pointer to the scoreboard. */ + void setScoreboard(Scoreboard *sb_ptr); + ++ /** Starts switch out of IEW stage. */ + void switchOut(); + ++ /** Completes switch out of IEW stage. */ + void doSwitchOut(); + ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); + ++ /** Returns if IEW is switched out. */ + bool isSwitchedOut() { return switchedOut; } + + /** Sets page table pointer within LSQ. */ +// void setPageTable(PageTable *pt_ptr); + + /** Squashes instructions in IEW for a specific thread. */ + void squash(unsigned tid); + + /** Wakes all dependents of a completed instruction. */ + void wakeDependents(DynInstPtr &inst); + + /** Tells memory dependence unit that a memory instruction needs to be + * rescheduled. It will re-execute once replayMemInst() is called. + */ + void rescheduleMemInst(DynInstPtr &inst); + + /** Re-executes all rescheduled memory instructions. */ + void replayMemInst(DynInstPtr &inst); + + /** Sends an instruction to commit through the time buffer. */ + void instToCommit(DynInstPtr &inst); + + /** Inserts unused instructions of a thread into the skid buffer. */ + void skidInsert(unsigned tid); + + /** Returns the max of the number of entries in all of the skid buffers. */ + int skidCount(); + + /** Returns if all of the skid buffers are empty. */ + bool skidsEmpty(); + + /** Updates overall IEW status based on all of the stages' statuses. */ + void updateStatus(); + + /** Resets entries of the IQ and the LSQ. */ + void resetEntries(); + + /** Tells the CPU to wakeup if it has descheduled itself due to no + * activity. Used mainly by the LdWritebackEvent. + */ + void wakeCPU(); + + /** Reports to the CPU that there is activity this cycle. */ + void activityThisCycle(); + + /** Tells CPU that the IEW stage is active and running. */ + inline void activateStage(); + + /** Tells CPU that the IEW stage is inactive and idle. */ + inline void deactivateStage(); + + /** Returns if the LSQ has any stores to writeback. */ + bool hasStoresToWB() { return ldstQueue.hasStoresToWB(); } + + private: + /** Sends commit proper information for a squash due to a branch + * mispredict. + */ + void squashDueToBranch(DynInstPtr &inst, unsigned thread_id); + + /** Sends commit proper information for a squash due to a memory order + * violation. + */ + void squashDueToMemOrder(DynInstPtr &inst, unsigned thread_id); + + /** Sends commit proper information for a squash due to memory becoming + * blocked (younger issued instructions must be retried). + */ + void squashDueToMemBlocked(DynInstPtr &inst, unsigned thread_id); + + /** Sets Dispatch to blocked, and signals back to other stages to block. */ + void block(unsigned thread_id); + + /** Unblocks Dispatch if the skid buffer is empty, and signals back to + * other stages to unblock. + */ + void unblock(unsigned thread_id); + + /** Determines proper actions to take given Dispatch's status. */ + void dispatch(unsigned tid); + + /** Dispatches instructions to IQ and LSQ. */ + void dispatchInsts(unsigned tid); + + /** Executes instructions. In the case of memory operations, it informs the + * LSQ to execute the instructions. Also handles any redirects that occur + * due to the executed instructions. + */ + void executeInsts(); + + /** Writebacks instructions. In our model, the instruction's execute() + * function atomically reads registers, executes, and writes registers. + * Thus this writeback only wakes up dependent instructions, and informs + * the scoreboard of registers becoming ready. + */ + void writebackInsts(); + + /** Returns the number of valid, non-squashed instructions coming from + * rename to dispatch. + */ + unsigned validInstsFromRename(); + + /** Reads the stall signals. */ + void readStallSignals(unsigned tid); + + /** Checks if any of the stall conditions are currently true. */ + bool checkStall(unsigned tid); + + /** Processes inputs and changes state accordingly. */ + void checkSignalsAndUpdate(unsigned tid); + + /** Sorts instructions coming from rename into lists separated by thread. */ + void sortInsts(); + + public: + /** Ticks IEW stage, causing Dispatch, the IQ, the LSQ, Execute, and + * Writeback to run for one cycle. + */ + void tick(); + + private: ++ /** Updates execution stats based on the instruction. */ + void updateExeInstStats(DynInstPtr &inst); + + /** Pointer to main time buffer used for backwards communication. */ + TimeBuffer *timeBuffer; + + /** Wire to write information heading to previous stages. */ + typename TimeBuffer::wire toFetch; + + /** Wire to get commit's output from backwards time buffer. */ + typename TimeBuffer::wire fromCommit; + + /** Wire to write information heading to previous stages. */ + typename TimeBuffer::wire toRename; + + /** Rename instruction queue interface. */ + TimeBuffer *renameQueue; + + /** Wire to get rename's output from rename queue. */ + typename TimeBuffer::wire fromRename; + + /** Issue stage queue. */ + TimeBuffer issueToExecQueue; + + /** Wire to read information from the issue stage time queue. */ + typename TimeBuffer::wire fromIssue; + + /** + * IEW stage time buffer. Holds ROB indices of instructions that + * can be marked as completed. + */ + TimeBuffer *iewQueue; + + /** Wire to write infromation heading to commit. */ + typename TimeBuffer::wire toCommit; + + /** Queue of all instructions coming from rename this cycle. */ + std::queue insts[Impl::MaxThreads]; + + /** Skid buffer between rename and IEW. */ + std::queue skidBuffer[Impl::MaxThreads]; + + /** Scoreboard pointer. */ + Scoreboard* scoreboard; + + public: + /** Instruction queue. */ + IQ instQueue; + + /** Load / store queue. */ + LSQ ldstQueue; + + /** Pointer to the functional unit pool. */ + FUPool *fuPool; + + private: + /** CPU pointer. */ + FullCPU *cpu; + + /** Records if IEW has written to the time buffer this cycle, so that the + * CPU can deschedule itself if there is no activity. + */ + bool wroteToTimeBuffer; + + /** Source of possible stalls. */ + struct Stalls { + bool commit; + }; + + /** Stages that are telling IEW to stall. */ + Stalls stalls[Impl::MaxThreads]; + + /** Debug function to print instructions that are issued this cycle. */ + void printAvailableInsts(); + + public: + /** Records if the LSQ needs to be updated on the next cycle, so that + * IEW knows if there will be activity on the next cycle. + */ + bool updateLSQNextCycle; + + private: + /** Records if there is a fetch redirect on this cycle for each thread. */ + bool fetchRedirect[Impl::MaxThreads]; + + /** Used to track if all instructions have been dispatched this cycle. + * If they have not, then blocking must have occurred, and the instructions + * would already be added to the skid buffer. + * @todo: Fix this hack. + */ + bool dispatchedAllInsts; + + /** Records if the queues have been changed (inserted or issued insts), + * so that IEW knows to broadcast the updated amount of free entries. + */ + bool updatedQueues; + + /** Commit to IEW delay, in ticks. */ + unsigned commitToIEWDelay; + + /** Rename to IEW delay, in ticks. */ + unsigned renameToIEWDelay; + + /** + * Issue to execute delay, in ticks. What this actually represents is + * the amount of time it takes for an instruction to wake up, be + * scheduled, and sent to a FU for execution. + */ + unsigned issueToExecuteDelay; + + /** Width of issue's read path, in instructions. The read path is both + * the skid buffer and the rename instruction queue. + * Note to self: is this really different than issueWidth? + */ + unsigned issueReadWidth; + + /** Width of issue, in instructions. */ + unsigned issueWidth; + + /** Width of execute, in instructions. Might make more sense to break + * down into FP vs int. + */ + unsigned executeWidth; + + /** Index into queue of instructions being written back. */ + unsigned wbNumInst; + + /** Cycle number within the queue of instructions being written back. + * Used in case there are too many instructions writing back at the current + * cycle and writesbacks need to be scheduled for the future. See comments + * in instToCommit(). + */ + unsigned wbCycle; + + /** Number of active threads. */ + unsigned numThreads; + + /** Pointer to list of active threads. */ + std::list *activeThreads; + + /** Maximum size of the skid buffer. */ + unsigned skidBufferMax; + ++ /** Is this stage switched out. */ + bool switchedOut; + + /** Stat for total number of idle cycles. */ + Stats::Scalar<> iewIdleCycles; + /** Stat for total number of squashing cycles. */ + Stats::Scalar<> iewSquashCycles; + /** Stat for total number of blocking cycles. */ + Stats::Scalar<> iewBlockCycles; + /** Stat for total number of unblocking cycles. */ + Stats::Scalar<> iewUnblockCycles; + /** Stat for total number of instructions dispatched. */ + Stats::Scalar<> iewDispatchedInsts; + /** Stat for total number of squashed instructions dispatch skips. */ + Stats::Scalar<> iewDispSquashedInsts; + /** Stat for total number of dispatched load instructions. */ + Stats::Scalar<> iewDispLoadInsts; + /** Stat for total number of dispatched store instructions. */ + Stats::Scalar<> iewDispStoreInsts; + /** Stat for total number of dispatched non speculative instructions. */ + Stats::Scalar<> iewDispNonSpecInsts; + /** Stat for number of times the IQ becomes full. */ + Stats::Scalar<> iewIQFullEvents; + /** Stat for number of times the LSQ becomes full. */ + Stats::Scalar<> iewLSQFullEvents; + /** Stat for total number of executed instructions. */ + Stats::Scalar<> iewExecutedInsts; + /** Stat for total number of executed load instructions. */ + Stats::Vector<> iewExecLoadInsts; + /** Stat for total number of executed store instructions. */ +// Stats::Scalar<> iewExecStoreInsts; + /** Stat for total number of squashed instructions skipped at execute. */ + Stats::Scalar<> iewExecSquashedInsts; + /** Stat for total number of memory ordering violation events. */ + Stats::Scalar<> memOrderViolationEvents; + /** Stat for total number of incorrect predicted taken branches. */ + Stats::Scalar<> predictedTakenIncorrect; + /** Stat for total number of incorrect predicted not taken branches. */ + Stats::Scalar<> predictedNotTakenIncorrect; + /** Stat for total number of mispredicted branches detected at execute. */ + Stats::Formula branchMispredicts; + ++ /** Number of executed software prefetches. */ + Stats::Vector<> exeSwp; ++ /** Number of executed nops. */ + Stats::Vector<> exeNop; ++ /** Number of executed meomory references. */ + Stats::Vector<> exeRefs; ++ /** Number of executed branches. */ + Stats::Vector<> exeBranches; + +// Stats::Vector<> issued_ops; +/* + Stats::Vector<> stat_fu_busy; + Stats::Vector2d<> stat_fuBusy; + Stats::Vector<> dist_unissued; + Stats::Vector2d<> stat_issued_inst_type; +*/ ++ /** Number of instructions issued per cycle. */ + Stats::Formula issueRate; ++ /** Number of executed store instructions. */ + Stats::Formula iewExecStoreInsts; +// Stats::Formula issue_op_rate; +// Stats::Formula fu_busy_rate; - ++ /** Number of instructions sent to commit. */ + Stats::Vector<> iewInstsToCommit; ++ /** Number of instructions that writeback. */ + Stats::Vector<> writebackCount; ++ /** Number of instructions that wake consumers. */ + Stats::Vector<> producerInst; ++ /** Number of instructions that wake up from producers. */ + Stats::Vector<> consumerInst; ++ /** Number of instructions that were delayed in writing back due ++ * to resource contention. ++ */ + Stats::Vector<> wbPenalized; + ++ /** Number of instructions per cycle written back. */ + Stats::Formula wbRate; ++ /** Average number of woken instructions per writeback. */ + Stats::Formula wbFanout; ++ /** Number of instructions per cycle delayed in writing back . */ + Stats::Formula wbPenalizedRate; +}; + +#endif // __CPU_O3_IEW_HH__ diff --cc src/cpu/o3/iew_impl.hh index 6b61b1b0e,000000000..23f101517 mode 100644,000000..100644 --- a/src/cpu/o3/iew_impl.hh +++ b/src/cpu/o3/iew_impl.hh @@@ -1,1528 -1,0 +1,1537 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +// @todo: Fix the instantaneous communication among all the stages within +// iew. There's a clear delay between issue and execute, yet backwards +// communication happens simultaneously. + +#include + +#include "base/timebuf.hh" +#include "cpu/o3/fu_pool.hh" +#include "cpu/o3/iew.hh" + +using namespace std; + +template +DefaultIEW::DefaultIEW(Params *params) + : // @todo: Make this into a parameter. + issueToExecQueue(5, 5), + instQueue(params), + ldstQueue(params), + fuPool(params->fuPool), + commitToIEWDelay(params->commitToIEWDelay), + renameToIEWDelay(params->renameToIEWDelay), + issueToExecuteDelay(params->issueToExecuteDelay), + issueReadWidth(params->issueWidth), + issueWidth(params->issueWidth), + executeWidth(params->executeWidth), + numThreads(params->numberOfThreads), + switchedOut(false) +{ + _status = Active; + exeStatus = Running; + wbStatus = Idle; + + // Setup wire to read instructions coming from issue. + fromIssue = issueToExecQueue.getWire(-issueToExecuteDelay); + + // Instruction queue needs the queue between issue and execute. + instQueue.setIssueToExecuteQueue(&issueToExecQueue); + + instQueue.setIEW(this); + ldstQueue.setIEW(this); + + for (int i=0; i < numThreads; i++) { + dispatchStatus[i] = Running; + stalls[i].commit = false; + fetchRedirect[i] = false; + } + + updateLSQNextCycle = false; + + skidBufferMax = (3 * (renameToIEWDelay * params->renameWidth)) + issueWidth; +} + +template +std::string +DefaultIEW::name() const +{ + return cpu->name() + ".iew"; +} + +template +void +DefaultIEW::regStats() +{ + using namespace Stats; + + instQueue.regStats(); + + iewIdleCycles + .name(name() + ".iewIdleCycles") + .desc("Number of cycles IEW is idle"); + + iewSquashCycles + .name(name() + ".iewSquashCycles") + .desc("Number of cycles IEW is squashing"); + + iewBlockCycles + .name(name() + ".iewBlockCycles") + .desc("Number of cycles IEW is blocking"); + + iewUnblockCycles + .name(name() + ".iewUnblockCycles") + .desc("Number of cycles IEW is unblocking"); + + iewDispatchedInsts + .name(name() + ".iewDispatchedInsts") + .desc("Number of instructions dispatched to IQ"); + + iewDispSquashedInsts + .name(name() + ".iewDispSquashedInsts") + .desc("Number of squashed instructions skipped by dispatch"); + + iewDispLoadInsts + .name(name() + ".iewDispLoadInsts") + .desc("Number of dispatched load instructions"); + + iewDispStoreInsts + .name(name() + ".iewDispStoreInsts") + .desc("Number of dispatched store instructions"); + + iewDispNonSpecInsts + .name(name() + ".iewDispNonSpecInsts") + .desc("Number of dispatched non-speculative instructions"); + + iewIQFullEvents + .name(name() + ".iewIQFullEvents") + .desc("Number of times the IQ has become full, causing a stall"); + + iewLSQFullEvents + .name(name() + ".iewLSQFullEvents") + .desc("Number of times the LSQ has become full, causing a stall"); + + iewExecutedInsts + .name(name() + ".iewExecutedInsts") + .desc("Number of executed instructions"); + + iewExecLoadInsts + .init(cpu->number_of_threads) + .name(name() + ".iewExecLoadInsts") + .desc("Number of load instructions executed") + .flags(total); + + iewExecSquashedInsts + .name(name() + ".iewExecSquashedInsts") + .desc("Number of squashed instructions skipped in execute"); + + memOrderViolationEvents + .name(name() + ".memOrderViolationEvents") + .desc("Number of memory order violations"); + + predictedTakenIncorrect + .name(name() + ".predictedTakenIncorrect") + .desc("Number of branches that were predicted taken incorrectly"); + + predictedNotTakenIncorrect + .name(name() + ".predictedNotTakenIncorrect") + .desc("Number of branches that were predicted not taken incorrectly"); + + branchMispredicts + .name(name() + ".branchMispredicts") + .desc("Number of branch mispredicts detected at execute"); + + branchMispredicts = predictedTakenIncorrect + predictedNotTakenIncorrect; + + exeSwp + .init(cpu->number_of_threads) + .name(name() + ".EXEC:swp") + .desc("number of swp insts executed") + .flags(total) + ; + + exeNop + .init(cpu->number_of_threads) + .name(name() + ".EXEC:nop") + .desc("number of nop insts executed") + .flags(total) + ; + + exeRefs + .init(cpu->number_of_threads) + .name(name() + ".EXEC:refs") + .desc("number of memory reference insts executed") + .flags(total) + ; + + exeBranches + .init(cpu->number_of_threads) + .name(name() + ".EXEC:branches") + .desc("Number of branches executed") + .flags(total) + ; + + issueRate + .name(name() + ".EXEC:rate") + .desc("Inst execution rate") + .flags(total) + ; + issueRate = iewExecutedInsts / cpu->numCycles; + + iewExecStoreInsts + .name(name() + ".EXEC:stores") + .desc("Number of stores executed") + .flags(total) + ; + iewExecStoreInsts = exeRefs - iewExecLoadInsts; +/* + for (int i=0; inumber_of_threads) + .name(name() + ".WB:sent") + .desc("cumulative count of insts sent to commit") + .flags(total) + ; + + writebackCount + .init(cpu->number_of_threads) + .name(name() + ".WB:count") + .desc("cumulative count of insts written-back") + .flags(total) + ; + + producerInst + .init(cpu->number_of_threads) + .name(name() + ".WB:producers") + .desc("num instructions producing a value") + .flags(total) + ; + + consumerInst + .init(cpu->number_of_threads) + .name(name() + ".WB:consumers") + .desc("num instructions consuming a value") + .flags(total) + ; + + wbPenalized + .init(cpu->number_of_threads) + .name(name() + ".WB:penalized") + .desc("number of instrctions required to write to 'other' IQ") + .flags(total) + ; + + wbPenalizedRate + .name(name() + ".WB:penalized_rate") + .desc ("fraction of instructions written-back that wrote to 'other' IQ") + .flags(total) + ; + + wbPenalizedRate = wbPenalized / writebackCount; + + wbFanout + .name(name() + ".WB:fanout") + .desc("average fanout of values written-back") + .flags(total) + ; + + wbFanout = producerInst / consumerInst; + + wbRate + .name(name() + ".WB:rate") + .desc("insts written-back per cycle") + .flags(total) + ; + wbRate = writebackCount / cpu->numCycles; +} + +template +void +DefaultIEW::initStage() +{ + for (int tid=0; tid < numThreads; tid++) { + toRename->iewInfo[tid].usedIQ = true; + toRename->iewInfo[tid].freeIQEntries = + instQueue.numFreeEntries(tid); + + toRename->iewInfo[tid].usedLSQ = true; + toRename->iewInfo[tid].freeLSQEntries = + ldstQueue.numFreeEntries(tid); + } +} + +template +void +DefaultIEW::setCPU(FullCPU *cpu_ptr) +{ + DPRINTF(IEW, "Setting CPU pointer.\n"); + cpu = cpu_ptr; + + instQueue.setCPU(cpu_ptr); + ldstQueue.setCPU(cpu_ptr); + + cpu->activateStage(FullCPU::IEWIdx); +} + +template +void +DefaultIEW::setTimeBuffer(TimeBuffer *tb_ptr) +{ + DPRINTF(IEW, "Setting time buffer pointer.\n"); + timeBuffer = tb_ptr; + + // Setup wire to read information from time buffer, from commit. + fromCommit = timeBuffer->getWire(-commitToIEWDelay); + + // Setup wire to write information back to previous stages. + toRename = timeBuffer->getWire(0); + + toFetch = timeBuffer->getWire(0); + + // Instruction queue also needs main time buffer. + instQueue.setTimeBuffer(tb_ptr); +} + +template +void +DefaultIEW::setRenameQueue(TimeBuffer *rq_ptr) +{ + DPRINTF(IEW, "Setting rename queue pointer.\n"); + renameQueue = rq_ptr; + + // Setup wire to read information from rename queue. + fromRename = renameQueue->getWire(-renameToIEWDelay); +} + +template +void +DefaultIEW::setIEWQueue(TimeBuffer *iq_ptr) +{ + DPRINTF(IEW, "Setting IEW queue pointer.\n"); + iewQueue = iq_ptr; + + // Setup wire to write instructions to commit. + toCommit = iewQueue->getWire(0); +} + +template +void +DefaultIEW::setActiveThreads(list *at_ptr) +{ + DPRINTF(IEW, "Setting active threads list pointer.\n"); + activeThreads = at_ptr; + + ldstQueue.setActiveThreads(at_ptr); + instQueue.setActiveThreads(at_ptr); +} + +template +void +DefaultIEW::setScoreboard(Scoreboard *sb_ptr) +{ + DPRINTF(IEW, "Setting scoreboard pointer.\n"); + scoreboard = sb_ptr; +} + +#if 0 +template +void +DefaultIEW::setPageTable(PageTable *pt_ptr) +{ + ldstQueue.setPageTable(pt_ptr); +} +#endif + +template +void +DefaultIEW::switchOut() +{ ++ // IEW is ready to switch out at any time. + cpu->signalSwitched(); +} + +template +void +DefaultIEW::doSwitchOut() +{ ++ // Clear any state. + switchedOut = true; + + instQueue.switchOut(); + ldstQueue.switchOut(); + fuPool->switchOut(); + + for (int i = 0; i < numThreads; i++) { + while (!insts[i].empty()) + insts[i].pop(); + while (!skidBuffer[i].empty()) + skidBuffer[i].pop(); + } +} + +template +void +DefaultIEW::takeOverFrom() +{ ++ // Reset all state. + _status = Active; + exeStatus = Running; + wbStatus = Idle; + switchedOut = false; + + instQueue.takeOverFrom(); + ldstQueue.takeOverFrom(); + fuPool->takeOverFrom(); + + initStage(); + cpu->activityThisCycle(); + + for (int i=0; i < numThreads; i++) { + dispatchStatus[i] = Running; + stalls[i].commit = false; + fetchRedirect[i] = false; + } + + updateLSQNextCycle = false; + + // @todo: Fix hardcoded number + for (int i = 0; i < 6; ++i) { + issueToExecQueue.advance(); + } +} + +template +void +DefaultIEW::squash(unsigned tid) +{ + DPRINTF(IEW, "[tid:%i]: Squashing all instructions.\n", + tid); + + // Tell the IQ to start squashing. + instQueue.squash(tid); + + // Tell the LDSTQ to start squashing. + ldstQueue.squash(fromCommit->commitInfo[tid].doneSeqNum, tid); + + updatedQueues = true; + + // Clear the skid buffer in case it has any data in it. + while (!skidBuffer[tid].empty()) { + + if (skidBuffer[tid].front()->isLoad() || + skidBuffer[tid].front()->isStore() ) { + toRename->iewInfo[tid].dispatchedToLSQ++; + } + + toRename->iewInfo[tid].dispatched++; + + skidBuffer[tid].pop(); + } + + while (!insts[tid].empty()) { + if (insts[tid].front()->isLoad() || + insts[tid].front()->isStore() ) { + toRename->iewInfo[tid].dispatchedToLSQ++; + } + + toRename->iewInfo[tid].dispatched++; + + insts[tid].pop(); + } +} + +template +void +DefaultIEW::squashDueToBranch(DynInstPtr &inst, unsigned tid) +{ + DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, PC: %#x " + "[sn:%i].\n", tid, inst->readPC(), inst->seqNum); + + toCommit->squash[tid] = true; + toCommit->squashedSeqNum[tid] = inst->seqNum; + toCommit->mispredPC[tid] = inst->readPC(); + toCommit->nextPC[tid] = inst->readNextPC(); + toCommit->branchMispredict[tid] = true; + toCommit->branchTaken[tid] = inst->readNextPC() != + (inst->readPC() + sizeof(TheISA::MachInst)); + + toCommit->includeSquashInst[tid] = false; + + wroteToTimeBuffer = true; +} + +template +void +DefaultIEW::squashDueToMemOrder(DynInstPtr &inst, unsigned tid) +{ + DPRINTF(IEW, "[tid:%i]: Squashing from a specific instruction, " + "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); + + toCommit->squash[tid] = true; + toCommit->squashedSeqNum[tid] = inst->seqNum; + toCommit->nextPC[tid] = inst->readNextPC(); + + toCommit->includeSquashInst[tid] = false; + + wroteToTimeBuffer = true; +} + +template +void +DefaultIEW::squashDueToMemBlocked(DynInstPtr &inst, unsigned tid) +{ + DPRINTF(IEW, "[tid:%i]: Memory blocked, squashing load and younger insts, " + "PC: %#x [sn:%i].\n", tid, inst->readPC(), inst->seqNum); + + toCommit->squash[tid] = true; + toCommit->squashedSeqNum[tid] = inst->seqNum; + toCommit->nextPC[tid] = inst->readPC(); + ++ // Must include the broadcasted SN in the squash. + toCommit->includeSquashInst[tid] = true; + + ldstQueue.setLoadBlockedHandled(tid); + + wroteToTimeBuffer = true; +} + +template +void +DefaultIEW::block(unsigned tid) +{ + DPRINTF(IEW, "[tid:%u]: Blocking.\n", tid); + + if (dispatchStatus[tid] != Blocked && + dispatchStatus[tid] != Unblocking) { + toRename->iewBlock[tid] = true; + wroteToTimeBuffer = true; + } + + // Add the current inputs to the skid buffer so they can be + // reprocessed when this stage unblocks. + skidInsert(tid); + + dispatchStatus[tid] = Blocked; +} + +template +void +DefaultIEW::unblock(unsigned tid) +{ + DPRINTF(IEW, "[tid:%i]: Reading instructions out of the skid " + "buffer %u.\n",tid, tid); + + // If the skid bufffer is empty, signal back to previous stages to unblock. + // Also switch status to running. + if (skidBuffer[tid].empty()) { + toRename->iewUnblock[tid] = true; + wroteToTimeBuffer = true; + DPRINTF(IEW, "[tid:%i]: Done unblocking.\n",tid); + dispatchStatus[tid] = Running; + } +} + +template +void +DefaultIEW::wakeDependents(DynInstPtr &inst) +{ + instQueue.wakeDependents(inst); +} + +template +void +DefaultIEW::rescheduleMemInst(DynInstPtr &inst) +{ + instQueue.rescheduleMemInst(inst); +} + +template +void +DefaultIEW::replayMemInst(DynInstPtr &inst) +{ + instQueue.replayMemInst(inst); +} + +template +void +DefaultIEW::instToCommit(DynInstPtr &inst) +{ + // First check the time slot that this instruction will write + // to. If there are free write ports at the time, then go ahead + // and write the instruction to that time. If there are not, + // keep looking back to see where's the first time there's a + // free slot. + while ((*iewQueue)[wbCycle].insts[wbNumInst]) { + ++wbNumInst; + if (wbNumInst == issueWidth) { + ++wbCycle; + wbNumInst = 0; + } + + assert(wbCycle < 5); + } + + // Add finished instruction to queue to commit. + (*iewQueue)[wbCycle].insts[wbNumInst] = inst; + (*iewQueue)[wbCycle].size++; +} + +template +unsigned +DefaultIEW::validInstsFromRename() +{ + unsigned inst_count = 0; + + for (int i=0; isize; i++) { + if (!fromRename->insts[i]->squashed) + inst_count++; + } + + return inst_count; +} + +template +void +DefaultIEW::skidInsert(unsigned tid) +{ + DynInstPtr inst = NULL; + + while (!insts[tid].empty()) { + inst = insts[tid].front(); + + insts[tid].pop(); + + DPRINTF(Decode,"[tid:%i]: Inserting [sn:%lli] PC:%#x into " + "dispatch skidBuffer %i\n",tid, inst->seqNum, + inst->readPC(),tid); + + skidBuffer[tid].push(inst); + } + + assert(skidBuffer[tid].size() <= skidBufferMax && + "Skidbuffer Exceeded Max Size"); +} + +template +int +DefaultIEW::skidCount() +{ + int max=0; + + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned thread_count = skidBuffer[*threads++].size(); + if (max < thread_count) + max = thread_count; + } + + return max; +} + +template +bool +DefaultIEW::skidsEmpty() +{ + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + if (!skidBuffer[*threads++].empty()) + return false; + } + + return true; +} + +template +void +DefaultIEW::updateStatus() +{ + bool any_unblocking = false; + + list::iterator threads = (*activeThreads).begin(); + + threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (dispatchStatus[tid] == Unblocking) { + any_unblocking = true; + break; + } + } + + // If there are no ready instructions waiting to be scheduled by the IQ, + // and there's no stores waiting to write back, and dispatch is not + // unblocking, then there is no internal activity for the IEW stage. + if (_status == Active && !instQueue.hasReadyInsts() && + !ldstQueue.willWB() && !any_unblocking) { + DPRINTF(IEW, "IEW switching to idle\n"); + + deactivateStage(); + + _status = Inactive; + } else if (_status == Inactive && (instQueue.hasReadyInsts() || + ldstQueue.willWB() || + any_unblocking)) { + // Otherwise there is internal activity. Set to active. + DPRINTF(IEW, "IEW switching to active\n"); + + activateStage(); + + _status = Active; + } +} + +template +void +DefaultIEW::resetEntries() +{ + instQueue.resetEntries(); + ldstQueue.resetEntries(); +} + +template +void +DefaultIEW::readStallSignals(unsigned tid) +{ + if (fromCommit->commitBlock[tid]) { + stalls[tid].commit = true; + } + + if (fromCommit->commitUnblock[tid]) { + assert(stalls[tid].commit); + stalls[tid].commit = false; + } +} + +template +bool +DefaultIEW::checkStall(unsigned tid) +{ + bool ret_val(false); + + if (stalls[tid].commit) { + DPRINTF(IEW,"[tid:%i]: Stall from Commit stage detected.\n",tid); + ret_val = true; + } else if (instQueue.isFull(tid)) { + DPRINTF(IEW,"[tid:%i]: Stall: IQ is full.\n",tid); + ret_val = true; + } else if (ldstQueue.isFull(tid)) { + DPRINTF(IEW,"[tid:%i]: Stall: LSQ is full\n",tid); + + if (ldstQueue.numLoads(tid) > 0 ) { + + DPRINTF(IEW,"[tid:%i]: LSQ oldest load: [sn:%i] \n", + tid,ldstQueue.getLoadHeadSeqNum(tid)); + } + + if (ldstQueue.numStores(tid) > 0) { + + DPRINTF(IEW,"[tid:%i]: LSQ oldest store: [sn:%i] \n", + tid,ldstQueue.getStoreHeadSeqNum(tid)); + } + + ret_val = true; + } else if (ldstQueue.isStalled(tid)) { + DPRINTF(IEW,"[tid:%i]: Stall: LSQ stall detected.\n",tid); + ret_val = true; + } + + return ret_val; +} + +template +void +DefaultIEW::checkSignalsAndUpdate(unsigned tid) +{ + // Check if there's a squash signal, squash if there is + // Check stall signals, block if there is. + // If status was Blocked + // if so then go to unblocking + // If status was Squashing + // check if squashing is not high. Switch to running this cycle. + + readStallSignals(tid); + + if (fromCommit->commitInfo[tid].squash) { + squash(tid); + + if (dispatchStatus[tid] == Blocked || + dispatchStatus[tid] == Unblocking) { + toRename->iewUnblock[tid] = true; + wroteToTimeBuffer = true; + } + + dispatchStatus[tid] = Squashing; + + fetchRedirect[tid] = false; + return; + } + + if (fromCommit->commitInfo[tid].robSquashing) { + DPRINTF(IEW, "[tid:%i]: ROB is still squashing.\n"); + + dispatchStatus[tid] = Squashing; + + return; + } + + if (checkStall(tid)) { + block(tid); + dispatchStatus[tid] = Blocked; + return; + } + + if (dispatchStatus[tid] == Blocked) { + // Status from previous cycle was blocked, but there are no more stall + // conditions. Switch over to unblocking. + DPRINTF(IEW, "[tid:%i]: Done blocking, switching to unblocking.\n", + tid); + + dispatchStatus[tid] = Unblocking; + + unblock(tid); + + return; + } + + if (dispatchStatus[tid] == Squashing) { + // Switch status to running if rename isn't being told to block or + // squash this cycle. + DPRINTF(IEW, "[tid:%i]: Done squashing, switching to running.\n", + tid); + + dispatchStatus[tid] = Running; + + return; + } +} + +template +void +DefaultIEW::sortInsts() +{ + int insts_from_rename = fromRename->size; +#ifdef DEBUG + for (int i = 0; i < numThreads; i++) + assert(insts[i].empty()); +#endif + for (int i = 0; i < insts_from_rename; ++i) { + insts[fromRename->insts[i]->threadNumber].push(fromRename->insts[i]); + } +} + +template +void +DefaultIEW::wakeCPU() +{ + cpu->wakeCPU(); +} + +template +void +DefaultIEW::activityThisCycle() +{ + DPRINTF(Activity, "Activity this cycle.\n"); + cpu->activityThisCycle(); +} + +template +inline void +DefaultIEW::activateStage() +{ + DPRINTF(Activity, "Activating stage.\n"); + cpu->activateStage(FullCPU::IEWIdx); +} + +template +inline void +DefaultIEW::deactivateStage() +{ + DPRINTF(Activity, "Deactivating stage.\n"); + cpu->deactivateStage(FullCPU::IEWIdx); +} + +template +void +DefaultIEW::dispatch(unsigned tid) +{ + // If status is Running or idle, + // call dispatchInsts() + // If status is Unblocking, + // buffer any instructions coming from rename + // continue trying to empty skid buffer + // check if stall conditions have passed + + if (dispatchStatus[tid] == Blocked) { + ++iewBlockCycles; + + } else if (dispatchStatus[tid] == Squashing) { + ++iewSquashCycles; + } + + // Dispatch should try to dispatch as many instructions as its bandwidth + // will allow, as long as it is not currently blocked. + if (dispatchStatus[tid] == Running || + dispatchStatus[tid] == Idle) { + DPRINTF(IEW, "[tid:%i] Not blocked, so attempting to run " + "dispatch.\n", tid); + + dispatchInsts(tid); + } else if (dispatchStatus[tid] == Unblocking) { + // Make sure that the skid buffer has something in it if the + // status is unblocking. + assert(!skidsEmpty()); + + // If the status was unblocking, then instructions from the skid + // buffer were used. Remove those instructions and handle + // the rest of unblocking. + dispatchInsts(tid); + + ++iewUnblockCycles; + + if (validInstsFromRename() && dispatchedAllInsts) { + // Add the current inputs to the skid buffer so they can be + // reprocessed when this stage unblocks. + skidInsert(tid); + } + + unblock(tid); + } +} + +template +void +DefaultIEW::dispatchInsts(unsigned tid) +{ + dispatchedAllInsts = true; + + // Obtain instructions from skid buffer if unblocking, or queue from rename + // otherwise. + std::queue &insts_to_dispatch = + dispatchStatus[tid] == Unblocking ? + skidBuffer[tid] : insts[tid]; + + int insts_to_add = insts_to_dispatch.size(); + + DynInstPtr inst; + bool add_to_iq = false; + int dis_num_inst = 0; + + // Loop through the instructions, putting them in the instruction + // queue. + for ( ; dis_num_inst < insts_to_add && + dis_num_inst < issueReadWidth; + ++dis_num_inst) + { + inst = insts_to_dispatch.front(); + + if (dispatchStatus[tid] == Unblocking) { + DPRINTF(IEW, "[tid:%i]: Issue: Examining instruction from skid " + "buffer\n", tid); + } + + // Make sure there's a valid instruction there. + assert(inst); + + DPRINTF(IEW, "[tid:%i]: Issue: Adding PC %#x [sn:%lli] [tid:%i] to " + "IQ.\n", + tid, inst->readPC(), inst->seqNum, inst->threadNumber); + + // Be sure to mark these instructions as ready so that the + // commit stage can go ahead and execute them, and mark + // them as issued so the IQ doesn't reprocess them. + + // Check for squashed instructions. + if (inst->isSquashed()) { + DPRINTF(IEW, "[tid:%i]: Issue: Squashed instruction encountered, " + "not adding to IQ.\n", tid); + + ++iewDispSquashedInsts; + + insts_to_dispatch.pop(); + + //Tell Rename That An Instruction has been processed + if (inst->isLoad() || inst->isStore()) { + toRename->iewInfo[tid].dispatchedToLSQ++; + } + toRename->iewInfo[tid].dispatched++; + + continue; + } + + // Check for full conditions. + if (instQueue.isFull(tid)) { + DPRINTF(IEW, "[tid:%i]: Issue: IQ has become full.\n", tid); + + // Call function to start blocking. + block(tid); + + // Set unblock to false. Special case where we are using + // skidbuffer (unblocking) instructions but then we still + // get full in the IQ. + toRename->iewUnblock[tid] = false; + + dispatchedAllInsts = false; + + ++iewIQFullEvents; + break; + } else if (ldstQueue.isFull(tid)) { + DPRINTF(IEW, "[tid:%i]: Issue: LSQ has become full.\n",tid); + + // Call function to start blocking. + block(tid); + + // Set unblock to false. Special case where we are using + // skidbuffer (unblocking) instructions but then we still + // get full in the IQ. + toRename->iewUnblock[tid] = false; + + dispatchedAllInsts = false; + + ++iewLSQFullEvents; + break; + } + + // Otherwise issue the instruction just fine. + if (inst->isLoad()) { + DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " + "encountered, adding to LSQ.\n", tid); + + // Reserve a spot in the load store queue for this + // memory access. + ldstQueue.insertLoad(inst); + + ++iewDispLoadInsts; + + add_to_iq = true; + + toRename->iewInfo[tid].dispatchedToLSQ++; + } else if (inst->isStore()) { + DPRINTF(IEW, "[tid:%i]: Issue: Memory instruction " + "encountered, adding to LSQ.\n", tid); + + ldstQueue.insertStore(inst); + + ++iewDispStoreInsts; + + if (inst->isStoreConditional()) { + // Store conditionals need to be set as "canCommit()" + // so that commit can process them when they reach the + // head of commit. ++ // @todo: This is somewhat specific to Alpha. + inst->setCanCommit(); + instQueue.insertNonSpec(inst); + add_to_iq = false; + + ++iewDispNonSpecInsts; + } else { + add_to_iq = true; + } + + toRename->iewInfo[tid].dispatchedToLSQ++; +#if FULL_SYSTEM + } else if (inst->isMemBarrier() || inst->isWriteBarrier()) { + // Same as non-speculative stores. + inst->setCanCommit(); + instQueue.insertBarrier(inst); + add_to_iq = false; +#endif + } else if (inst->isNonSpeculative()) { + DPRINTF(IEW, "[tid:%i]: Issue: Nonspeculative instruction " + "encountered, skipping.\n", tid); + + // Same as non-speculative stores. + inst->setCanCommit(); + + // Specifically insert it as nonspeculative. + instQueue.insertNonSpec(inst); + + ++iewDispNonSpecInsts; + + add_to_iq = false; + } else if (inst->isNop()) { + DPRINTF(IEW, "[tid:%i]: Issue: Nop instruction encountered, " + "skipping.\n", tid); + + inst->setIssued(); + inst->setExecuted(); + inst->setCanCommit(); + + instQueue.recordProducer(inst); + + exeNop[tid]++; + + add_to_iq = false; + } else if (inst->isExecuted()) { + assert(0 && "Instruction shouldn't be executed.\n"); + DPRINTF(IEW, "Issue: Executed branch encountered, " + "skipping.\n"); + + inst->setIssued(); + inst->setCanCommit(); + + instQueue.recordProducer(inst); + + add_to_iq = false; + } else { + add_to_iq = true; + } + + // If the instruction queue is not full, then add the + // instruction. + if (add_to_iq) { + instQueue.insert(inst); + } + + insts_to_dispatch.pop(); + + toRename->iewInfo[tid].dispatched++; + + ++iewDispatchedInsts; + } + + if (!insts_to_dispatch.empty()) { + DPRINTF(IEW,"[tid:%i]: Issue: Bandwidth Full. Blocking.\n"); + block(tid); + toRename->iewUnblock[tid] = false; + } + + if (dispatchStatus[tid] == Idle && dis_num_inst) { + dispatchStatus[tid] = Running; + + updatedQueues = true; + } + + dis_num_inst = 0; +} + +template +void +DefaultIEW::printAvailableInsts() +{ + int inst = 0; + + cout << "Available Instructions: "; + + while (fromIssue->insts[inst]) { + + if (inst%3==0) cout << "\n\t"; + + cout << "PC: " << fromIssue->insts[inst]->readPC() + << " TN: " << fromIssue->insts[inst]->threadNumber + << " SN: " << fromIssue->insts[inst]->seqNum << " | "; + + inst++; + + } + + cout << "\n"; +} + +template +void +DefaultIEW::executeInsts() +{ + wbNumInst = 0; + wbCycle = 0; + + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + fetchRedirect[tid] = false; + } + +#if 0 + printAvailableInsts(); +#endif + + // Execute/writeback any instructions that are available. + int insts_to_execute = fromIssue->size; + int inst_num = 0; + for (; inst_num < insts_to_execute; + ++inst_num) { + + DPRINTF(IEW, "Execute: Executing instructions from IQ.\n"); + + DynInstPtr inst = instQueue.getInstToExecute(); + + DPRINTF(IEW, "Execute: Processing PC %#x, [tid:%i] [sn:%i].\n", + inst->readPC(), inst->threadNumber,inst->seqNum); + + // Check if the instruction is squashed; if so then skip it + if (inst->isSquashed()) { + DPRINTF(IEW, "Execute: Instruction was squashed.\n"); + + // Consider this instruction executed so that commit can go + // ahead and retire the instruction. + inst->setExecuted(); + + // Not sure if I should set this here or just let commit try to + // commit any squashed instructions. I like the latter a bit more. + inst->setCanCommit(); + + ++iewExecSquashedInsts; + + continue; + } + + Fault fault = NoFault; + + // Execute instruction. + // Note that if the instruction faults, it will be handled + // at the commit stage. + if (inst->isMemRef() && + (!inst->isDataPrefetch() && !inst->isInstPrefetch())) { + DPRINTF(IEW, "Execute: Calculating address for memory " + "reference.\n"); + + // Tell the LDSTQ to execute this instruction (if it is a load). + if (inst->isLoad()) { + // Loads will mark themselves as executed, and their writeback + // event adds the instruction to the queue to commit + fault = ldstQueue.executeLoad(inst); + } else if (inst->isStore()) { + ldstQueue.executeStore(inst); + + // If the store had a fault then it may not have a mem req + if (inst->req && !(inst->req->getFlags() & LOCKED)) { + inst->setExecuted(); + + instToCommit(inst); + } + + // Store conditionals will mark themselves as + // executed, and their writeback event will add the + // instruction to the queue to commit. + } else { + panic("Unexpected memory type!\n"); + } + + } else { + inst->execute(); + + inst->setExecuted(); + + instToCommit(inst); + } + + updateExeInstStats(inst); + + // Check if branch prediction was correct, if not then we need + // to tell commit to squash in flight instructions. Only + // handle this if there hasn't already been something that + // redirects fetch in this group of instructions. + + // This probably needs to prioritize the redirects if a different + // scheduler is used. Currently the scheduler schedules the oldest + // instruction first, so the branch resolution order will be correct. + unsigned tid = inst->threadNumber; + + if (!fetchRedirect[tid]) { + + if (inst->mispredicted()) { + fetchRedirect[tid] = true; + + DPRINTF(IEW, "Execute: Branch mispredict detected.\n"); + DPRINTF(IEW, "Execute: Redirecting fetch to PC: %#x.\n", + inst->nextPC); + + // If incorrect, then signal the ROB that it must be squashed. + squashDueToBranch(inst, tid); + + if (inst->predTaken()) { + predictedTakenIncorrect++; + } else { + predictedNotTakenIncorrect++; + } + } else if (ldstQueue.violation(tid)) { + fetchRedirect[tid] = true; + + // If there was an ordering violation, then get the + // DynInst that caused the violation. Note that this + // clears the violation signal. + DynInstPtr violator; + violator = ldstQueue.getMemDepViolator(tid); + + DPRINTF(IEW, "LDSTQ detected a violation. Violator PC: " + "%#x, inst PC: %#x. Addr is: %#x.\n", + violator->readPC(), inst->readPC(), inst->physEffAddr); + + // Tell the instruction queue that a violation has occured. + instQueue.violation(inst, violator); + + // Squash. + squashDueToMemOrder(inst,tid); + + ++memOrderViolationEvents; + } else if (ldstQueue.loadBlocked(tid) && + !ldstQueue.isLoadBlockedHandled(tid)) { + fetchRedirect[tid] = true; + + DPRINTF(IEW, "Load operation couldn't execute because the " + "memory system is blocked. PC: %#x [sn:%lli]\n", + inst->readPC(), inst->seqNum); + + squashDueToMemBlocked(inst, tid); + } + } + } + ++ // Update and record activity if we processed any instructions. + if (inst_num) { + if (exeStatus == Idle) { + exeStatus = Running; + } + + updatedQueues = true; + + cpu->activityThisCycle(); + } + + // Need to reset this in case a writeback event needs to write into the + // iew queue. That way the writeback event will write into the correct + // spot in the queue. + wbNumInst = 0; +} + +template +void +DefaultIEW::writebackInsts() +{ + // Loop through the head of the time buffer and wake any + // dependents. These instructions are about to write back. Also + // mark scoreboard that this instruction is finally complete. + // Either have IEW have direct access to scoreboard, or have this + // as part of backwards communication. + for (int inst_num = 0; inst_num < issueWidth && + toCommit->insts[inst_num]; inst_num++) { + DynInstPtr inst = toCommit->insts[inst_num]; + int tid = inst->threadNumber; + + DPRINTF(IEW, "Sending instructions to commit, PC %#x.\n", + inst->readPC()); + + iewInstsToCommit[tid]++; + + // Some instructions will be sent to commit without having + // executed because they need commit to handle them. + // E.g. Uncached loads have not actually executed when they + // are first sent to commit. Instead commit must tell the LSQ + // when it's ready to execute the uncached load. + if (!inst->isSquashed() && inst->isExecuted()) { + int dependents = instQueue.wakeDependents(inst); + + for (int i = 0; i < inst->numDestRegs(); i++) { + //mark as Ready + DPRINTF(IEW,"Setting Destination Register %i\n", + inst->renamedDestRegIdx(i)); + scoreboard->setReg(inst->renamedDestRegIdx(i)); + } + - producerInst[tid]++; - consumerInst[tid]+= dependents; ++ if (dependents) { ++ producerInst[tid]++; ++ consumerInst[tid]+= dependents; ++ } + writebackCount[tid]++; + } + } +} + +template +void +DefaultIEW::tick() +{ + wbNumInst = 0; + wbCycle = 0; + + wroteToTimeBuffer = false; + updatedQueues = false; + + sortInsts(); + + // Free function units marked as being freed this cycle. + fuPool->processFreeUnits(); + + list::iterator threads = (*activeThreads).begin(); + + // Check stall and squash signals, dispatch any instructions. + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + DPRINTF(IEW,"Issue: Processing [tid:%i]\n",tid); + + checkSignalsAndUpdate(tid); + dispatch(tid); + } + + if (exeStatus != Squashing) { + executeInsts(); + + writebackInsts(); + + // Have the instruction queue try to schedule any ready instructions. + // (In actuality, this scheduling is for instructions that will + // be executed next cycle.) + instQueue.scheduleReadyInsts(); + + // Also should advance its own time buffers if the stage ran. + // Not the best place for it, but this works (hopefully). + issueToExecQueue.advance(); + } + + bool broadcast_free_entries = false; + + if (updatedQueues || exeStatus == Running || updateLSQNextCycle) { + exeStatus = Idle; + updateLSQNextCycle = false; + + broadcast_free_entries = true; + } + + // Writeback any stores using any leftover bandwidth. + ldstQueue.writebackStores(); + + // Check the committed load/store signals to see if there's a load + // or store to commit. Also check if it's being told to execute a + // nonspeculative instruction. + // This is pretty inefficient... + + threads = (*activeThreads).begin(); + while (threads != (*activeThreads).end()) { + unsigned tid = (*threads++); + + DPRINTF(IEW,"Processing [tid:%i]\n",tid); + ++ // Update structures based on instructions committed. + if (fromCommit->commitInfo[tid].doneSeqNum != 0 && + !fromCommit->commitInfo[tid].squash && + !fromCommit->commitInfo[tid].robSquashing) { + + ldstQueue.commitStores(fromCommit->commitInfo[tid].doneSeqNum,tid); + + ldstQueue.commitLoads(fromCommit->commitInfo[tid].doneSeqNum,tid); + + updateLSQNextCycle = true; + instQueue.commit(fromCommit->commitInfo[tid].doneSeqNum,tid); + } + + if (fromCommit->commitInfo[tid].nonSpecSeqNum != 0) { + + //DPRINTF(IEW,"NonspecInst from thread %i",tid); + if (fromCommit->commitInfo[tid].uncached) { + instQueue.replayMemInst(fromCommit->commitInfo[tid].uncachedLoad); + } else { + instQueue.scheduleNonSpec( + fromCommit->commitInfo[tid].nonSpecSeqNum); + } + } + + if (broadcast_free_entries) { + toFetch->iewInfo[tid].iqCount = + instQueue.getCount(tid); + toFetch->iewInfo[tid].ldstqCount = + ldstQueue.getCount(tid); + + toRename->iewInfo[tid].usedIQ = true; + toRename->iewInfo[tid].freeIQEntries = + instQueue.numFreeEntries(); + toRename->iewInfo[tid].usedLSQ = true; + toRename->iewInfo[tid].freeLSQEntries = + ldstQueue.numFreeEntries(tid); + + wroteToTimeBuffer = true; + } + + DPRINTF(IEW, "[tid:%i], Dispatch dispatched %i instructions.\n", + tid, toRename->iewInfo[tid].dispatched); + } + + DPRINTF(IEW, "IQ has %i free entries (Can schedule: %i). " + "LSQ has %i free entries.\n", + instQueue.numFreeEntries(), instQueue.hasReadyInsts(), + ldstQueue.numFreeEntries()); + + updateStatus(); + + if (wroteToTimeBuffer) { + DPRINTF(Activity, "Activity this cycle.\n"); + cpu->activityThisCycle(); + } +} + +template +void +DefaultIEW::updateExeInstStats(DynInstPtr &inst) +{ + int thread_number = inst->threadNumber; + + // + // Pick off the software prefetches + // +#ifdef TARGET_ALPHA + if (inst->isDataPrefetch()) + exeSwp[thread_number]++; + else + iewExecutedInsts++; +#else + iewExecutedInsts++; +#endif + + // + // Control operations + // + if (inst->isControl()) + exeBranches[thread_number]++; + + // + // Memory operations + // + if (inst->isMemRef()) { + exeRefs[thread_number]++; + + if (inst->isLoad()) { + iewExecLoadInsts[thread_number]++; + } + } +} diff --cc src/cpu/o3/inst_queue.hh index 245601ccf,000000000..60a713020 mode 100644,000000..100644 --- a/src/cpu/o3/inst_queue.hh +++ b/src/cpu/o3/inst_queue.hh @@@ -1,481 -1,0 +1,503 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_INST_QUEUE_HH__ +#define __CPU_O3_INST_QUEUE_HH__ + +#include +#include +#include +#include + +#include "base/statistics.hh" +#include "base/timebuf.hh" +#include "cpu/inst_seq.hh" +#include "cpu/o3/dep_graph.hh" +#include "cpu/op_class.hh" +#include "sim/host.hh" + +class FUPool; +class MemInterface; + +/** + * A standard instruction queue class. It holds ready instructions, in + * order, in seperate priority queues to facilitate the scheduling of + * instructions. The IQ uses a separate linked list to track dependencies. + * Similar to the rename map and the free list, it expects that + * floating point registers have their indices start after the integer + * registers (ie with 96 int and 96 fp registers, regs 0-95 are integer + * and 96-191 are fp). This remains true even for both logical and + * physical register indices. The IQ depends on the memory dependence unit to + * track when memory operations are ready in terms of ordering; register + * dependencies are tracked normally. Right now the IQ also handles the + * execution timing; this is mainly to allow back-to-back scheduling without + * requiring IEW to be able to peek into the IQ. At the end of the execution + * latency, the instruction is put into the queue to execute, where it will + * have the execute() function called on it. + * @todo: Make IQ able to handle multiple FU pools. + */ +template +class InstructionQueue +{ + public: + //Typedefs from the Impl. + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::Params Params; + + typedef typename Impl::CPUPol::IEW IEW; + typedef typename Impl::CPUPol::MemDepUnit MemDepUnit; + typedef typename Impl::CPUPol::IssueStruct IssueStruct; + typedef typename Impl::CPUPol::TimeStruct TimeStruct; + + // Typedef of iterator through the list of instructions. + typedef typename std::list::iterator ListIt; + + friend class Impl::FullCPU; + + /** FU completion event class. */ + class FUCompletion : public Event { + private: + /** Executing instruction. */ + DynInstPtr inst; + + /** Index of the FU used for executing. */ + int fuIdx; + + /** Pointer back to the instruction queue. */ + InstructionQueue *iqPtr; + ++ /** Should the FU be added to the list to be freed upon ++ * completing this event. ++ */ + bool freeFU; + + public: + /** Construct a FU completion event. */ + FUCompletion(DynInstPtr &_inst, int fu_idx, + InstructionQueue *iq_ptr); + + virtual void process(); + virtual const char *description(); + void setFreeFU() { freeFU = true; } + }; + + /** Constructs an IQ. */ + InstructionQueue(Params *params); + + /** Destructs the IQ. */ + ~InstructionQueue(); + + /** Returns the name of the IQ. */ + std::string name() const; + + /** Registers statistics. */ + void regStats(); + ++ /** Resets all instruction queue state. */ + void resetState(); + + /** Sets CPU pointer. */ + void setCPU(FullCPU *_cpu) { cpu = _cpu; } + + /** Sets active threads list. */ + void setActiveThreads(std::list *at_ptr); + + /** Sets the IEW pointer. */ + void setIEW(IEW *iew_ptr) { iewStage = iew_ptr; } + + /** Sets the timer buffer between issue and execute. */ + void setIssueToExecuteQueue(TimeBuffer *i2eQueue); + + /** Sets the global time buffer. */ + void setTimeBuffer(TimeBuffer *tb_ptr); + ++ /** Switches out the instruction queue. */ + void switchOut(); + ++ /** Takes over execution from another CPU's thread. */ + void takeOverFrom(); + ++ /** Returns if the IQ is switched out. */ + bool isSwitchedOut() { return switchedOut; } + + /** Number of entries needed for given amount of threads. */ + int entryAmount(int num_threads); + + /** Resets max entries for all threads. */ + void resetEntries(); + + /** Returns total number of free entries. */ + unsigned numFreeEntries(); + + /** Returns number of free entries for a thread. */ + unsigned numFreeEntries(unsigned tid); + + /** Returns whether or not the IQ is full. */ + bool isFull(); + + /** Returns whether or not the IQ is full for a specific thread. */ + bool isFull(unsigned tid); + + /** Returns if there are any ready instructions in the IQ. */ + bool hasReadyInsts(); + + /** Inserts a new instruction into the IQ. */ + void insert(DynInstPtr &new_inst); + + /** Inserts a new, non-speculative instruction into the IQ. */ + void insertNonSpec(DynInstPtr &new_inst); + + /** Inserts a memory or write barrier into the IQ to make sure + * loads and stores are ordered properly. + */ + void insertBarrier(DynInstPtr &barr_inst); + ++ /** Returns the oldest scheduled instruction, and removes it from ++ * the list of instructions waiting to execute. ++ */ + DynInstPtr getInstToExecute(); + + /** + * Records the instruction as the producer of a register without + * adding it to the rest of the IQ. + */ + void recordProducer(DynInstPtr &inst) + { addToProducers(inst); } + + /** Process FU completion event. */ + void processFUCompletion(DynInstPtr &inst, int fu_idx); + + /** + * Schedules ready instructions, adding the ready ones (oldest first) to + * the queue to execute. + */ + void scheduleReadyInsts(); + + /** Schedules a single specific non-speculative instruction. */ + void scheduleNonSpec(const InstSeqNum &inst); + + /** + * Commits all instructions up to and including the given sequence number, + * for a specific thread. + */ + void commit(const InstSeqNum &inst, unsigned tid = 0); + + /** Wakes all dependents of a completed instruction. */ + int wakeDependents(DynInstPtr &completed_inst); + + /** Adds a ready memory instruction to the ready list. */ + void addReadyMemInst(DynInstPtr &ready_inst); + + /** + * Reschedules a memory instruction. It will be ready to issue once + * replayMemInst() is called. + */ + void rescheduleMemInst(DynInstPtr &resched_inst); + + /** Replays a memory instruction. It must be rescheduled first. */ + void replayMemInst(DynInstPtr &replay_inst); + + /** Completes a memory operation. */ + void completeMemInst(DynInstPtr &completed_inst); + + /** Indicates an ordering violation between a store and a load. */ + void violation(DynInstPtr &store, DynInstPtr &faulting_load); + + /** + * Squashes instructions for a thread. Squashing information is obtained + * from the time buffer. + */ + void squash(unsigned tid); + + /** Returns the number of used entries for a thread. */ + unsigned getCount(unsigned tid) { return count[tid]; }; + + /** Debug function to print all instructions. */ + void printInsts(); + + private: + /** Does the actual squashing. */ + void doSquash(unsigned tid); + + ///////////////////////// + // Various pointers + ///////////////////////// + + /** Pointer to the CPU. */ + FullCPU *cpu; + + /** Cache interface. */ + MemInterface *dcacheInterface; + + /** Pointer to IEW stage. */ + IEW *iewStage; + + /** The memory dependence unit, which tracks/predicts memory dependences + * between instructions. + */ + MemDepUnit memDepUnit[Impl::MaxThreads]; + + /** The queue to the execute stage. Issued instructions will be written + * into it. + */ + TimeBuffer *issueToExecuteQueue; + + /** The backwards time buffer. */ + TimeBuffer *timeBuffer; + + /** Wire to read information from timebuffer. */ + typename TimeBuffer::wire fromCommit; + + /** Function unit pool. */ + FUPool *fuPool; + + ////////////////////////////////////// + // Instruction lists, ready queues, and ordering + ////////////////////////////////////// + + /** List of all the instructions in the IQ (some of which may be issued). */ + std::list instList[Impl::MaxThreads]; + ++ /** List of instructions that are ready to be executed. */ + std::list instsToExecute; + + /** - * Struct for comparing entries to be added to the priority queue. This - * gives reverse ordering to the instructions in terms of sequence - * numbers: the instructions with smaller sequence numbers (and hence - * are older) will be at the top of the priority queue. ++ * Struct for comparing entries to be added to the priority queue. ++ * This gives reverse ordering to the instructions in terms of ++ * sequence numbers: the instructions with smaller sequence ++ * numbers (and hence are older) will be at the top of the ++ * priority queue. + */ + struct pqCompare { + bool operator() (const DynInstPtr &lhs, const DynInstPtr &rhs) const + { + return lhs->seqNum > rhs->seqNum; + } + }; + + typedef std::priority_queue, pqCompare> + ReadyInstQueue; + + /** List of ready instructions, per op class. They are separated by op + * class to allow for easy mapping to FUs. + */ + ReadyInstQueue readyInsts[Num_OpClasses]; + + /** List of non-speculative instructions that will be scheduled + * once the IQ gets a signal from commit. While it's redundant to + * have the key be a part of the value (the sequence number is stored + * inside of DynInst), when these instructions are woken up only + * the sequence number will be available. Thus it is most efficient to be + * able to search by the sequence number alone. + */ + std::map nonSpecInsts; + + typedef typename std::map::iterator NonSpecMapIt; + + /** Entry for the list age ordering by op class. */ + struct ListOrderEntry { + OpClass queueType; + InstSeqNum oldestInst; + }; + + /** List that contains the age order of the oldest instruction of each + * ready queue. Used to select the oldest instruction available + * among op classes. + * @todo: Might be better to just move these entries around instead + * of creating new ones every time the position changes due to an + * instruction issuing. Not sure std::list supports this. + */ + std::list listOrder; + + typedef typename std::list::iterator ListOrderIt; + + /** Tracks if each ready queue is on the age order list. */ + bool queueOnList[Num_OpClasses]; + + /** Iterators of each ready queue. Points to their spot in the age order + * list. + */ + ListOrderIt readyIt[Num_OpClasses]; + + /** Add an op class to the age order list. */ + void addToOrderList(OpClass op_class); + + /** + * Called when the oldest instruction has been removed from a ready queue; + * this places that ready queue into the proper spot in the age order list. + */ + void moveToYoungerInst(ListOrderIt age_order_it); + + DependencyGraph dependGraph; + + ////////////////////////////////////// + // Various parameters + ////////////////////////////////////// + + /** IQ Resource Sharing Policy */ + enum IQPolicy { + Dynamic, + Partitioned, + Threshold + }; + + /** IQ sharing policy for SMT. */ + IQPolicy iqPolicy; + + /** Number of Total Threads*/ + unsigned numThreads; + + /** Pointer to list of active threads. */ + std::list *activeThreads; + + /** Per Thread IQ count */ + unsigned count[Impl::MaxThreads]; + + /** Max IQ Entries Per Thread */ + unsigned maxEntries[Impl::MaxThreads]; + + /** Number of free IQ entries left. */ + unsigned freeEntries; + + /** The number of entries in the instruction queue. */ + unsigned numEntries; + + /** The total number of instructions that can be issued in one cycle. */ + unsigned totalWidth; + + /** The number of physical registers in the CPU. */ + unsigned numPhysRegs; + + /** The number of physical integer registers in the CPU. */ + unsigned numPhysIntRegs; + + /** The number of floating point registers in the CPU. */ + unsigned numPhysFloatRegs; + + /** Delay between commit stage and the IQ. + * @todo: Make there be a distinction between the delays within IEW. + */ + unsigned commitToIEWDelay; + ++ /** Is the IQ switched out. */ + bool switchedOut; + + /** The sequence number of the squashed instruction. */ + InstSeqNum squashedSeqNum[Impl::MaxThreads]; + + /** A cache of the recently woken registers. It is 1 if the register + * has been woken up recently, and 0 if the register has been added + * to the dependency graph and has not yet received its value. It + * is basically a secondary scoreboard, and should pretty much mirror + * the scoreboard that exists in the rename map. + */ + std::vector regScoreboard; + + /** Adds an instruction to the dependency graph, as a consumer. */ + bool addToDependents(DynInstPtr &new_inst); + + /** Adds an instruction to the dependency graph, as a producer. */ + void addToProducers(DynInstPtr &new_inst); + + /** Moves an instruction to the ready queue if it is ready. */ + void addIfReady(DynInstPtr &inst); + + /** Debugging function to count how many entries are in the IQ. It does + * a linear walk through the instructions, so do not call this function + * during normal execution. + */ + int countInsts(); + + /** Debugging function to dump all the list sizes, as well as print + * out the list of nonspeculative instructions. Should not be used + * in any other capacity, but it has no harmful sideaffects. + */ + void dumpLists(); + + /** Debugging function to dump out all instructions that are in the + * IQ. + */ + void dumpInsts(); + + /** Stat for number of instructions added. */ + Stats::Scalar<> iqInstsAdded; + /** Stat for number of non-speculative instructions added. */ + Stats::Scalar<> iqNonSpecInstsAdded; + + Stats::Scalar<> iqInstsIssued; + /** Stat for number of integer instructions issued. */ + Stats::Scalar<> iqIntInstsIssued; + /** Stat for number of floating point instructions issued. */ + Stats::Scalar<> iqFloatInstsIssued; + /** Stat for number of branch instructions issued. */ + Stats::Scalar<> iqBranchInstsIssued; + /** Stat for number of memory instructions issued. */ + Stats::Scalar<> iqMemInstsIssued; + /** Stat for number of miscellaneous instructions issued. */ + Stats::Scalar<> iqMiscInstsIssued; + /** Stat for number of squashed instructions that were ready to issue. */ + Stats::Scalar<> iqSquashedInstsIssued; + /** Stat for number of squashed instructions examined when squashing. */ + Stats::Scalar<> iqSquashedInstsExamined; + /** Stat for number of squashed instruction operands examined when + * squashing. + */ + Stats::Scalar<> iqSquashedOperandsExamined; + /** Stat for number of non-speculative instructions removed due to a squash. + */ + Stats::Scalar<> iqSquashedNonSpecRemoved; + ++ /** Distribution of number of instructions in the queue. */ + Stats::VectorDistribution<> queueResDist; ++ /** Distribution of the number of instructions issued. */ + Stats::Distribution<> numIssuedDist; ++ /** Distribution of the cycles it takes to issue an instruction. */ + Stats::VectorDistribution<> issueDelayDist; + ++ /** Number of times an instruction could not be issued because a ++ * FU was busy. ++ */ + Stats::Vector<> statFuBusy; +// Stats::Vector<> dist_unissued; ++ /** Stat for total number issued for each instruction type. */ + Stats::Vector2d<> statIssuedInstType; + ++ /** Number of instructions issued per cycle. */ + Stats::Formula issueRate; +// Stats::Formula issue_stores; +// Stats::Formula issue_op_rate; - Stats::Vector<> fuBusy; //cumulative fu busy - ++ /** Number of times the FU was busy. */ ++ Stats::Vector<> fuBusy; ++ /** Number of times the FU was busy per instruction issued. */ + Stats::Formula fuBusyRate; +}; + +#endif //__CPU_O3_INST_QUEUE_HH__ diff --cc src/cpu/o3/inst_queue_impl.hh index 34af8c641,000000000..2f03c6814 mode 100644,000000..100644 --- a/src/cpu/o3/inst_queue_impl.hh +++ b/src/cpu/o3/inst_queue_impl.hh @@@ -1,1368 -1,0 +1,1403 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include +#include + +#include "sim/root.hh" + +#include "cpu/o3/fu_pool.hh" +#include "cpu/o3/inst_queue.hh" + +using namespace std; + +template +InstructionQueue::FUCompletion::FUCompletion(DynInstPtr &_inst, + int fu_idx, + InstructionQueue *iq_ptr) + : Event(&mainEventQueue, Stat_Event_Pri), + inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false) +{ + this->setFlags(Event::AutoDelete); +} + +template +void +InstructionQueue::FUCompletion::process() +{ + iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1); + inst = NULL; +} + + +template +const char * +InstructionQueue::FUCompletion::description() +{ + return "Functional unit completion event"; +} + +template +InstructionQueue::InstructionQueue(Params *params) + : fuPool(params->fuPool), + numEntries(params->numIQEntries), + totalWidth(params->issueWidth), + numPhysIntRegs(params->numPhysIntRegs), + numPhysFloatRegs(params->numPhysFloatRegs), + commitToIEWDelay(params->commitToIEWDelay) +{ + assert(fuPool); + + switchedOut = false; + + numThreads = params->numberOfThreads; + + // Set the number of physical registers as the number of int + float + numPhysRegs = numPhysIntRegs + numPhysFloatRegs; + + DPRINTF(IQ, "There are %i physical registers.\n", numPhysRegs); + + //Create an entry for each physical register within the + //dependency graph. + dependGraph.resize(numPhysRegs); + + // Resize the register scoreboard. + regScoreboard.resize(numPhysRegs); + + //Initialize Mem Dependence Units + for (int i = 0; i < numThreads; i++) { + memDepUnit[i].init(params,i); + memDepUnit[i].setIQ(this); + } + + resetState(); + + string policy = params->smtIQPolicy; + + //Convert string to lowercase + std::transform(policy.begin(), policy.end(), policy.begin(), + (int(*)(int)) tolower); + + //Figure out resource sharing policy + if (policy == "dynamic") { + iqPolicy = Dynamic; + + //Set Max Entries to Total ROB Capacity + for (int i = 0; i < numThreads; i++) { + maxEntries[i] = numEntries; + } + + } else if (policy == "partitioned") { + iqPolicy = Partitioned; + + //@todo:make work if part_amt doesnt divide evenly. + int part_amt = numEntries / numThreads; + + //Divide ROB up evenly + for (int i = 0; i < numThreads; i++) { + maxEntries[i] = part_amt; + } + + DPRINTF(Fetch, "IQ sharing policy set to Partitioned:" + "%i entries per thread.\n",part_amt); + + } else if (policy == "threshold") { + iqPolicy = Threshold; + + double threshold = (double)params->smtIQThreshold / 100; + + int thresholdIQ = (int)((double)threshold * numEntries); + + //Divide up by threshold amount + for (int i = 0; i < numThreads; i++) { + maxEntries[i] = thresholdIQ; + } + + DPRINTF(Fetch, "IQ sharing policy set to Threshold:" + "%i entries per thread.\n",thresholdIQ); + } else { + assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic," + "Partitioned, Threshold}"); + } +} + +template +InstructionQueue::~InstructionQueue() +{ + dependGraph.reset(); ++#ifdef DEBUG + cprintf("Nodes traversed: %i, removed: %i\n", + dependGraph.nodesTraversed, dependGraph.nodesRemoved); ++#endif +} + +template +std::string +InstructionQueue::name() const +{ + return cpu->name() + ".iq"; +} + +template +void +InstructionQueue::regStats() +{ + using namespace Stats; + iqInstsAdded + .name(name() + ".iqInstsAdded") + .desc("Number of instructions added to the IQ (excludes non-spec)") + .prereq(iqInstsAdded); + + iqNonSpecInstsAdded + .name(name() + ".iqNonSpecInstsAdded") + .desc("Number of non-speculative instructions added to the IQ") + .prereq(iqNonSpecInstsAdded); + + iqInstsIssued + .name(name() + ".iqInstsIssued") + .desc("Number of instructions issued") + .prereq(iqInstsIssued); + + iqIntInstsIssued + .name(name() + ".iqIntInstsIssued") + .desc("Number of integer instructions issued") + .prereq(iqIntInstsIssued); + + iqFloatInstsIssued + .name(name() + ".iqFloatInstsIssued") + .desc("Number of float instructions issued") + .prereq(iqFloatInstsIssued); + + iqBranchInstsIssued + .name(name() + ".iqBranchInstsIssued") + .desc("Number of branch instructions issued") + .prereq(iqBranchInstsIssued); + + iqMemInstsIssued + .name(name() + ".iqMemInstsIssued") + .desc("Number of memory instructions issued") + .prereq(iqMemInstsIssued); + + iqMiscInstsIssued + .name(name() + ".iqMiscInstsIssued") + .desc("Number of miscellaneous instructions issued") + .prereq(iqMiscInstsIssued); + + iqSquashedInstsIssued + .name(name() + ".iqSquashedInstsIssued") + .desc("Number of squashed instructions issued") + .prereq(iqSquashedInstsIssued); + + iqSquashedInstsExamined + .name(name() + ".iqSquashedInstsExamined") + .desc("Number of squashed instructions iterated over during squash;" + " mainly for profiling") + .prereq(iqSquashedInstsExamined); + + iqSquashedOperandsExamined + .name(name() + ".iqSquashedOperandsExamined") + .desc("Number of squashed operands that are examined and possibly " + "removed from graph") + .prereq(iqSquashedOperandsExamined); + + iqSquashedNonSpecRemoved + .name(name() + ".iqSquashedNonSpecRemoved") + .desc("Number of squashed non-spec instructions that were removed") + .prereq(iqSquashedNonSpecRemoved); + + queueResDist + .init(Num_OpClasses, 0, 99, 2) + .name(name() + ".IQ:residence:") + .desc("cycles from dispatch to issue") + .flags(total | pdf | cdf ) + ; + for (int i = 0; i < Num_OpClasses; ++i) { + queueResDist.subname(i, opClassStrings[i]); + } + numIssuedDist + .init(0,totalWidth,1) + .name(name() + ".ISSUE:issued_per_cycle") + .desc("Number of insts issued each cycle") + .flags(pdf) + ; +/* + dist_unissued + .init(Num_OpClasses+2) + .name(name() + ".ISSUE:unissued_cause") + .desc("Reason ready instruction not issued") + .flags(pdf | dist) + ; + for (int i=0; i < (Num_OpClasses + 2); ++i) { + dist_unissued.subname(i, unissued_names[i]); + } +*/ + statIssuedInstType + .init(numThreads,Num_OpClasses) + .name(name() + ".ISSUE:FU_type") + .desc("Type of FU issued") + .flags(total | pdf | dist) + ; + statIssuedInstType.ysubnames(opClassStrings); + + // + // How long did instructions for a particular FU type wait prior to issue + // + + issueDelayDist + .init(Num_OpClasses,0,99,2) + .name(name() + ".ISSUE:") + .desc("cycles from operands ready to issue") + .flags(pdf | cdf) + ; + + for (int i=0; inumCycles; +/* + issue_stores + .name(name() + ".ISSUE:stores") + .desc("Number of stores issued") + .flags(total) + ; + issue_stores = exe_refs - exe_loads; +*/ +/* + issue_op_rate + .name(name() + ".ISSUE:op_rate") + .desc("Operation issue rate") + .flags(total) + ; + issue_op_rate = issued_ops / numCycles; +*/ + statFuBusy + .init(Num_OpClasses) + .name(name() + ".ISSUE:fu_full") + .desc("attempts to use FU when none available") + .flags(pdf | dist) + ; + for (int i=0; i < Num_OpClasses; ++i) { + statFuBusy.subname(i, opClassStrings[i]); + } + + fuBusy + .init(numThreads) + .name(name() + ".ISSUE:fu_busy_cnt") + .desc("FU busy when requested") + .flags(total) + ; + + fuBusyRate + .name(name() + ".ISSUE:fu_busy_rate") + .desc("FU busy rate (busy events/executed inst)") + .flags(total) + ; + fuBusyRate = fuBusy / iqInstsIssued; + + for ( int i=0; i < numThreads; i++) { + // Tell mem dependence unit to reg stats as well. + memDepUnit[i].regStats(); + } +} + +template +void +InstructionQueue::resetState() +{ + //Initialize thread IQ counts + for (int i = 0; i +void +InstructionQueue::setActiveThreads(list *at_ptr) +{ + DPRINTF(IQ, "Setting active threads list pointer.\n"); + activeThreads = at_ptr; +} + +template +void +InstructionQueue::setIssueToExecuteQueue(TimeBuffer *i2e_ptr) +{ + DPRINTF(IQ, "Set the issue to execute queue.\n"); + issueToExecuteQueue = i2e_ptr; +} + +template +void +InstructionQueue::setTimeBuffer(TimeBuffer *tb_ptr) +{ + DPRINTF(IQ, "Set the time buffer.\n"); + timeBuffer = tb_ptr; + + fromCommit = timeBuffer->getWire(-commitToIEWDelay); +} + +template +void +InstructionQueue::switchOut() +{ + resetState(); + dependGraph.reset(); + switchedOut = true; + for (int i = 0; i < numThreads; ++i) { + memDepUnit[i].switchOut(); + } +} + +template +void +InstructionQueue::takeOverFrom() +{ + switchedOut = false; +} + +template +int +InstructionQueue::entryAmount(int num_threads) +{ + if (iqPolicy == Partitioned) { + return numEntries / num_threads; + } else { + return 0; + } +} + + +template +void +InstructionQueue::resetEntries() +{ + if (iqPolicy != Dynamic || numThreads > 1) { + int active_threads = (*activeThreads).size(); + + list::iterator threads = (*activeThreads).begin(); + list::iterator list_end = (*activeThreads).end(); + + while (threads != list_end) { + if (iqPolicy == Partitioned) { + maxEntries[*threads++] = numEntries / active_threads; + } else if(iqPolicy == Threshold && active_threads == 1) { + maxEntries[*threads++] = numEntries; + } + } + } +} + +template +unsigned +InstructionQueue::numFreeEntries() +{ + return freeEntries; +} + +template +unsigned +InstructionQueue::numFreeEntries(unsigned tid) +{ + return maxEntries[tid] - count[tid]; +} + +// Might want to do something more complex if it knows how many instructions +// will be issued this cycle. +template +bool +InstructionQueue::isFull() +{ + if (freeEntries == 0) { + return(true); + } else { + return(false); + } +} + +template +bool +InstructionQueue::isFull(unsigned tid) +{ + if (numFreeEntries(tid) == 0) { + return(true); + } else { + return(false); + } +} + +template +bool +InstructionQueue::hasReadyInsts() +{ + if (!listOrder.empty()) { + return true; + } + + for (int i = 0; i < Num_OpClasses; ++i) { + if (!readyInsts[i].empty()) { + return true; + } + } + + return false; +} + +template +void +InstructionQueue::insert(DynInstPtr &new_inst) +{ + // Make sure the instruction is valid + assert(new_inst); + + DPRINTF(IQ, "Adding instruction [sn:%lli] PC %#x to the IQ.\n", + new_inst->seqNum, new_inst->readPC()); + + assert(freeEntries != 0); + + instList[new_inst->threadNumber].push_back(new_inst); + + --freeEntries; + + new_inst->setInIQ(); + + // Look through its source registers (physical regs), and mark any + // dependencies. + addToDependents(new_inst); + + // Have this instruction set itself as the producer of its destination + // register(s). + addToProducers(new_inst); + + if (new_inst->isMemRef()) { + memDepUnit[new_inst->threadNumber].insert(new_inst); + } else { + addIfReady(new_inst); + } + + ++iqInstsAdded; + + count[new_inst->threadNumber]++; + + assert(freeEntries == (numEntries - countInsts())); +} + +template +void +InstructionQueue::insertNonSpec(DynInstPtr &new_inst) +{ + // @todo: Clean up this code; can do it by setting inst as unable + // to issue, then calling normal insert on the inst. + + assert(new_inst); + + nonSpecInsts[new_inst->seqNum] = new_inst; + + DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %#x " + "to the IQ.\n", + new_inst->seqNum, new_inst->readPC()); + + assert(freeEntries != 0); + + instList[new_inst->threadNumber].push_back(new_inst); + + --freeEntries; + + new_inst->setInIQ(); + + // Have this instruction set itself as the producer of its destination + // register(s). + addToProducers(new_inst); + + // If it's a memory instruction, add it to the memory dependency + // unit. + if (new_inst->isMemRef()) { + memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst); + } + + ++iqNonSpecInstsAdded; + + count[new_inst->threadNumber]++; + + assert(freeEntries == (numEntries - countInsts())); +} + +template +void +InstructionQueue::insertBarrier(DynInstPtr &barr_inst) +{ + memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst); + + insertNonSpec(barr_inst); +} + +template +typename Impl::DynInstPtr +InstructionQueue::getInstToExecute() +{ + assert(!instsToExecute.empty()); + DynInstPtr inst = instsToExecute.front(); + instsToExecute.pop_front(); + return inst; +} + +template +void +InstructionQueue::addToOrderList(OpClass op_class) +{ + assert(!readyInsts[op_class].empty()); + + ListOrderEntry queue_entry; + + queue_entry.queueType = op_class; + + queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; + + ListOrderIt list_it = listOrder.begin(); + ListOrderIt list_end_it = listOrder.end(); + + while (list_it != list_end_it) { + if ((*list_it).oldestInst > queue_entry.oldestInst) { + break; + } + + list_it++; + } + + readyIt[op_class] = listOrder.insert(list_it, queue_entry); + queueOnList[op_class] = true; +} + +template +void +InstructionQueue::moveToYoungerInst(ListOrderIt list_order_it) +{ + // Get iterator of next item on the list + // Delete the original iterator + // Determine if the next item is either the end of the list or younger + // than the new instruction. If so, then add in a new iterator right here. + // If not, then move along. + ListOrderEntry queue_entry; + OpClass op_class = (*list_order_it).queueType; + ListOrderIt next_it = list_order_it; + + ++next_it; + + queue_entry.queueType = op_class; + queue_entry.oldestInst = readyInsts[op_class].top()->seqNum; + + while (next_it != listOrder.end() && + (*next_it).oldestInst < queue_entry.oldestInst) { + ++next_it; + } + + readyIt[op_class] = listOrder.insert(next_it, queue_entry); +} + +template +void +InstructionQueue::processFUCompletion(DynInstPtr &inst, int fu_idx) +{ + // The CPU could have been sleeping until this op completed (*extremely* + // long latency op). Wake it if it was. This may be overkill. + if (isSwitchedOut()) { + return; + } + + iewStage->wakeCPU(); + + if (fu_idx > -1) + fuPool->freeUnitNextCycle(fu_idx); + + // @todo: Ensure that these FU Completions happen at the beginning + // of a cycle, otherwise they could add too many instructions to + // the queue. - // @todo: This could break if there's multiple multi-cycle ops - // finishing on this cycle. Maybe implement something like - // instToCommit in iew_impl.hh. + issueToExecuteQueue->access(0)->size++; + instsToExecute.push_back(inst); - // int &size = issueToExecuteQueue->access(0)->size; - - // issueToExecuteQueue->access(0)->insts[size++] = inst; +} + +// @todo: Figure out a better way to remove the squashed items from the +// lists. Checking the top item of each list to see if it's squashed +// wastes time and forces jumps. +template +void +InstructionQueue::scheduleReadyInsts() +{ + DPRINTF(IQ, "Attempting to schedule ready instructions from " + "the IQ.\n"); + + IssueStruct *i2e_info = issueToExecuteQueue->access(0); + + // Have iterator to head of the list + // While I haven't exceeded bandwidth or reached the end of the list, + // Try to get a FU that can do what this op needs. + // If successful, change the oldestInst to the new top of the list, put + // the queue in the proper place in the list. + // Increment the iterator. + // This will avoid trying to schedule a certain op class if there are no + // FUs that handle it. + ListOrderIt order_it = listOrder.begin(); + ListOrderIt order_end_it = listOrder.end(); + int total_issued = 0; + + while (total_issued < totalWidth && + order_it != order_end_it) { + OpClass op_class = (*order_it).queueType; + + assert(!readyInsts[op_class].empty()); + + DynInstPtr issuing_inst = readyInsts[op_class].top(); + + assert(issuing_inst->seqNum == (*order_it).oldestInst); + + if (issuing_inst->isSquashed()) { + readyInsts[op_class].pop(); + + if (!readyInsts[op_class].empty()) { + moveToYoungerInst(order_it); + } else { + readyIt[op_class] = listOrder.end(); + queueOnList[op_class] = false; + } + + listOrder.erase(order_it++); + + ++iqSquashedInstsIssued; + + continue; + } + + int idx = -2; + int op_latency = 1; + int tid = issuing_inst->threadNumber; + + if (op_class != No_OpClass) { + idx = fuPool->getUnit(op_class); + + if (idx > -1) { + op_latency = fuPool->getOpLatency(op_class); + } + } + ++ // If we have an instruction that doesn't require a FU, or a ++ // valid FU, then schedule for execution. + if (idx == -2 || idx != -1) { + if (op_latency == 1) { - // i2e_info->insts[exec_queue_slot++] = issuing_inst; + i2e_info->size++; + instsToExecute.push_back(issuing_inst); + + // Add the FU onto the list of FU's to be freed next + // cycle if we used one. + if (idx >= 0) + fuPool->freeUnitNextCycle(idx); + } else { + int issue_latency = fuPool->getIssueLatency(op_class); + // Generate completion event for the FU + FUCompletion *execution = new FUCompletion(issuing_inst, + idx, this); + + execution->schedule(curTick + cpu->cycles(issue_latency - 1)); + + // @todo: Enforce that issue_latency == 1 or op_latency + if (issue_latency > 1) { ++ // If FU isn't pipelined, then it must be freed ++ // upon the execution completing. + execution->setFreeFU(); + } else { - // @todo: Not sure I'm accounting for the - // multi-cycle op in a pipelined FU properly, or - // the number of instructions issued in one cycle. - // i2e_info->insts[exec_queue_slot++] = issuing_inst; - // i2e_info->size++; - + // Add the FU onto the list of FU's to be freed next cycle. + fuPool->freeUnitNextCycle(idx); + } + } + + DPRINTF(IQ, "Thread %i: Issuing instruction PC %#x " + "[sn:%lli]\n", + tid, issuing_inst->readPC(), + issuing_inst->seqNum); + + readyInsts[op_class].pop(); + + if (!readyInsts[op_class].empty()) { + moveToYoungerInst(order_it); + } else { + readyIt[op_class] = listOrder.end(); + queueOnList[op_class] = false; + } + + issuing_inst->setIssued(); + ++total_issued; + + if (!issuing_inst->isMemRef()) { + // Memory instructions can not be freed from the IQ until they + // complete. + ++freeEntries; + count[tid]--; + issuing_inst->removeInIQ(); + } else { + memDepUnit[tid].issue(issuing_inst); + } + + listOrder.erase(order_it++); + statIssuedInstType[tid][op_class]++; + } else { + statFuBusy[op_class]++; + fuBusy[tid]++; + ++order_it; + } + } + + numIssuedDist.sample(total_issued); + iqInstsIssued+= total_issued; + ++ // If we issued any instructions, tell the CPU we had activity. + if (total_issued) { + cpu->activityThisCycle(); + } else { + DPRINTF(IQ, "Not able to schedule any instructions.\n"); + } +} + +template +void +InstructionQueue::scheduleNonSpec(const InstSeqNum &inst) +{ + DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready " + "to execute.\n", inst); + + NonSpecMapIt inst_it = nonSpecInsts.find(inst); + + assert(inst_it != nonSpecInsts.end()); + + unsigned tid = (*inst_it).second->threadNumber; + + (*inst_it).second->setCanIssue(); + + if (!(*inst_it).second->isMemRef()) { + addIfReady((*inst_it).second); + } else { + memDepUnit[tid].nonSpecInstReady((*inst_it).second); + } + + (*inst_it).second = NULL; + + nonSpecInsts.erase(inst_it); +} + +template +void +InstructionQueue::commit(const InstSeqNum &inst, unsigned tid) +{ + DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n", + tid,inst); + + ListIt iq_it = instList[tid].begin(); + + while (iq_it != instList[tid].end() && + (*iq_it)->seqNum <= inst) { + ++iq_it; + instList[tid].pop_front(); + } + + assert(freeEntries == (numEntries - countInsts())); +} + +template +int +InstructionQueue::wakeDependents(DynInstPtr &completed_inst) +{ + int dependents = 0; + + DPRINTF(IQ, "Waking dependents of completed instruction.\n"); + + assert(!completed_inst->isSquashed()); + + // Tell the memory dependence unit to wake any dependents on this + // instruction if it is a memory instruction. Also complete the memory + // instruction at this point since we know it executed without issues. + // @todo: Might want to rename "completeMemInst" to something that + // indicates that it won't need to be replayed, and call this + // earlier. Might not be a big deal. + if (completed_inst->isMemRef()) { + memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst); + completeMemInst(completed_inst); + } else if (completed_inst->isMemBarrier() || + completed_inst->isWriteBarrier()) { + memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst); + } + + for (int dest_reg_idx = 0; + dest_reg_idx < completed_inst->numDestRegs(); + dest_reg_idx++) + { + PhysRegIndex dest_reg = + completed_inst->renamedDestRegIdx(dest_reg_idx); + + // Special case of uniq or control registers. They are not + // handled by the IQ and thus have no dependency graph entry. + // @todo Figure out a cleaner way to handle this. + if (dest_reg >= numPhysRegs) { + continue; + } + + DPRINTF(IQ, "Waking any dependents on register %i.\n", + (int) dest_reg); + + //Go through the dependency chain, marking the registers as + //ready within the waiting instructions. + DynInstPtr dep_inst = dependGraph.pop(dest_reg); + + while (dep_inst) { + DPRINTF(IQ, "Waking up a dependent instruction, PC%#x.\n", + dep_inst->readPC()); + + // Might want to give more information to the instruction + // so that it knows which of its source registers is + // ready. However that would mean that the dependency + // graph entries would need to hold the src_reg_idx. + dep_inst->markSrcRegReady(); + + addIfReady(dep_inst); + + dep_inst = dependGraph.pop(dest_reg); + + ++dependents; + } + + // Reset the head node now that all of its dependents have + // been woken up. + assert(dependGraph.empty(dest_reg)); + dependGraph.clearInst(dest_reg); + + // Mark the scoreboard as having that register ready. + regScoreboard[dest_reg] = true; + } + return dependents; +} + +template +void +InstructionQueue::addReadyMemInst(DynInstPtr &ready_inst) +{ + OpClass op_class = ready_inst->opClass(); + + readyInsts[op_class].push(ready_inst); + + // Will need to reorder the list if either a queue is not on the list, + // or it has an older instruction than last time. + if (!queueOnList[op_class]) { + addToOrderList(op_class); + } else if (readyInsts[op_class].top()->seqNum < + (*readyIt[op_class]).oldestInst) { + listOrder.erase(readyIt[op_class]); + addToOrderList(op_class); + } + + DPRINTF(IQ, "Instruction is ready to issue, putting it onto " + "the ready list, PC %#x opclass:%i [sn:%lli].\n", + ready_inst->readPC(), op_class, ready_inst->seqNum); +} + +template +void +InstructionQueue::rescheduleMemInst(DynInstPtr &resched_inst) +{ + memDepUnit[resched_inst->threadNumber].reschedule(resched_inst); +} + +template +void +InstructionQueue::replayMemInst(DynInstPtr &replay_inst) +{ + memDepUnit[replay_inst->threadNumber].replay(replay_inst); +} + +template +void +InstructionQueue::completeMemInst(DynInstPtr &completed_inst) +{ + int tid = completed_inst->threadNumber; + + DPRINTF(IQ, "Completing mem instruction PC:%#x [sn:%lli]\n", + completed_inst->readPC(), completed_inst->seqNum); + + ++freeEntries; + + completed_inst->memOpDone = true; + + memDepUnit[tid].completed(completed_inst); + + count[tid]--; +} + +template +void +InstructionQueue::violation(DynInstPtr &store, + DynInstPtr &faulting_load) +{ + memDepUnit[store->threadNumber].violation(store, faulting_load); +} + +template +void +InstructionQueue::squash(unsigned tid) +{ + DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in " + "the IQ.\n", tid); + + // Read instruction sequence number of last instruction out of the + // time buffer. + squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum; + + // Call doSquash if there are insts in the IQ + if (count[tid] > 0) { + doSquash(tid); + } + + // Also tell the memory dependence unit to squash. + memDepUnit[tid].squash(squashedSeqNum[tid], tid); +} + +template +void +InstructionQueue::doSquash(unsigned tid) +{ + // Start at the tail. + ListIt squash_it = instList[tid].end(); + --squash_it; + + DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n", + tid, squashedSeqNum[tid]); + + // Squash any instructions younger than the squashed sequence number + // given. + while (squash_it != instList[tid].end() && + (*squash_it)->seqNum > squashedSeqNum[tid]) { + + DynInstPtr squashed_inst = (*squash_it); + + // Only handle the instruction if it actually is in the IQ and + // hasn't already been squashed in the IQ. + if (squashed_inst->threadNumber != tid || + squashed_inst->isSquashedInIQ()) { + --squash_it; + continue; + } + + if (!squashed_inst->isIssued() || + (squashed_inst->isMemRef() && + !squashed_inst->memOpDone)) { + + // Remove the instruction from the dependency list. + if (!squashed_inst->isNonSpeculative() && + !squashed_inst->isStoreConditional() && + !squashed_inst->isMemBarrier() && + !squashed_inst->isWriteBarrier()) { + + for (int src_reg_idx = 0; + src_reg_idx < squashed_inst->numSrcRegs(); + src_reg_idx++) + { + PhysRegIndex src_reg = + squashed_inst->renamedSrcRegIdx(src_reg_idx); + + // Only remove it from the dependency graph if it + // was placed there in the first place. + + // Instead of doing a linked list traversal, we + // can just remove these squashed instructions + // either at issue time, or when the register is + // overwritten. The only downside to this is it + // leaves more room for error. + + if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) && + src_reg < numPhysRegs) { + dependGraph.remove(src_reg, squashed_inst); + } + + + ++iqSquashedOperandsExamined; + } + } else { + NonSpecMapIt ns_inst_it = + nonSpecInsts.find(squashed_inst->seqNum); + assert(ns_inst_it != nonSpecInsts.end()); + + (*ns_inst_it).second = NULL; + + nonSpecInsts.erase(ns_inst_it); + + ++iqSquashedNonSpecRemoved; + } + + // Might want to also clear out the head of the dependency graph. + + // Mark it as squashed within the IQ. + squashed_inst->setSquashedInIQ(); + + // @todo: Remove this hack where several statuses are set so the + // inst will flow through the rest of the pipeline. + squashed_inst->setIssued(); + squashed_inst->setCanCommit(); + squashed_inst->removeInIQ(); + + //Update Thread IQ Count + count[squashed_inst->threadNumber]--; + + ++freeEntries; + + DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %#x " + "squashed.\n", + tid, squashed_inst->seqNum, squashed_inst->readPC()); + } + + instList[tid].erase(squash_it--); + ++iqSquashedInstsExamined; + } +} + +template +bool +InstructionQueue::addToDependents(DynInstPtr &new_inst) +{ + // Loop through the instruction's source registers, adding + // them to the dependency list if they are not ready. + int8_t total_src_regs = new_inst->numSrcRegs(); + bool return_val = false; + + for (int src_reg_idx = 0; + src_reg_idx < total_src_regs; + src_reg_idx++) + { + // Only add it to the dependency graph if it's not ready. + if (!new_inst->isReadySrcRegIdx(src_reg_idx)) { + PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx); + + // Check the IQ's scoreboard to make sure the register + // hasn't become ready while the instruction was in flight + // between stages. Only if it really isn't ready should + // it be added to the dependency graph. + if (src_reg >= numPhysRegs) { + continue; + } else if (regScoreboard[src_reg] == false) { + DPRINTF(IQ, "Instruction PC %#x has src reg %i that " + "is being added to the dependency chain.\n", + new_inst->readPC(), src_reg); + + dependGraph.insert(src_reg, new_inst); + + // Change the return value to indicate that something + // was added to the dependency graph. + return_val = true; + } else { + DPRINTF(IQ, "Instruction PC %#x has src reg %i that " + "became ready before it reached the IQ.\n", + new_inst->readPC(), src_reg); + // Mark a register ready within the instruction. + new_inst->markSrcRegReady(src_reg_idx); + } + } + } + + return return_val; +} + +template +void +InstructionQueue::addToProducers(DynInstPtr &new_inst) +{ + // Nothing really needs to be marked when an instruction becomes + // the producer of a register's value, but for convenience a ptr + // to the producing instruction will be placed in the head node of + // the dependency links. + int8_t total_dest_regs = new_inst->numDestRegs(); + + for (int dest_reg_idx = 0; + dest_reg_idx < total_dest_regs; + dest_reg_idx++) + { + PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx); + + // Instructions that use the misc regs will have a reg number + // higher than the normal physical registers. In this case these + // registers are not renamed, and there is no need to track + // dependencies as these instructions must be executed at commit. + if (dest_reg >= numPhysRegs) { + continue; + } + + if (!dependGraph.empty(dest_reg)) { + dependGraph.dump(); + panic("Dependency graph %i not empty!", dest_reg); + } + + dependGraph.setInst(dest_reg, new_inst); + + // Mark the scoreboard to say it's not yet ready. + regScoreboard[dest_reg] = false; + } +} + +template +void +InstructionQueue::addIfReady(DynInstPtr &inst) +{ + // If the instruction now has all of its source registers + // available, then add it to the list of ready instructions. + if (inst->readyToIssue()) { + + //Add the instruction to the proper ready list. + if (inst->isMemRef()) { + + DPRINTF(IQ, "Checking if memory instruction can issue.\n"); + + // Message to the mem dependence unit that this instruction has + // its registers ready. + memDepUnit[inst->threadNumber].regsReady(inst); + + return; + } + + OpClass op_class = inst->opClass(); + + DPRINTF(IQ, "Instruction is ready to issue, putting it onto " + "the ready list, PC %#x opclass:%i [sn:%lli].\n", + inst->readPC(), op_class, inst->seqNum); + + readyInsts[op_class].push(inst); + + // Will need to reorder the list if either a queue is not on the list, + // or it has an older instruction than last time. + if (!queueOnList[op_class]) { + addToOrderList(op_class); + } else if (readyInsts[op_class].top()->seqNum < + (*readyIt[op_class]).oldestInst) { + listOrder.erase(readyIt[op_class]); + addToOrderList(op_class); + } + } +} + +template +int +InstructionQueue::countInsts() +{ + //ksewell:This works but definitely could use a cleaner write + //with a more intuitive way of counting. Right now it's + //just brute force .... + +#if 0 + int total_insts = 0; + + for (int i = 0; i < numThreads; ++i) { + ListIt count_it = instList[i].begin(); + + while (count_it != instList[i].end()) { + if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) { + if (!(*count_it)->isIssued()) { + ++total_insts; + } else if ((*count_it)->isMemRef() && + !(*count_it)->memOpDone) { + // Loads that have not been marked as executed still count + // towards the total instructions. + ++total_insts; + } + } + + ++count_it; + } + } + + return total_insts; +#else + return numEntries - freeEntries; +#endif +} + +template +void +InstructionQueue::dumpLists() +{ + for (int i = 0; i < Num_OpClasses; ++i) { + cprintf("Ready list %i size: %i\n", i, readyInsts[i].size()); + + cprintf("\n"); + } + + cprintf("Non speculative list size: %i\n", nonSpecInsts.size()); + + NonSpecMapIt non_spec_it = nonSpecInsts.begin(); + NonSpecMapIt non_spec_end_it = nonSpecInsts.end(); + + cprintf("Non speculative list: "); + + while (non_spec_it != non_spec_end_it) { + cprintf("%#x [sn:%lli]", (*non_spec_it).second->readPC(), + (*non_spec_it).second->seqNum); + ++non_spec_it; + } + + cprintf("\n"); + + ListOrderIt list_order_it = listOrder.begin(); + ListOrderIt list_order_end_it = listOrder.end(); + int i = 1; + + cprintf("List order: "); + + while (list_order_it != list_order_end_it) { + cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType, + (*list_order_it).oldestInst); + + ++list_order_it; + ++i; + } + + cprintf("\n"); +} + + +template +void +InstructionQueue::dumpInsts() +{ + for (int i = 0; i < numThreads; ++i) { + int num = 0; + int valid_num = 0; + ListIt inst_list_it = instList[i].begin(); + + while (inst_list_it != instList[i].end()) + { + cprintf("Instruction:%i\n", + num); + if (!(*inst_list_it)->isSquashed()) { + if (!(*inst_list_it)->isIssued()) { + ++valid_num; + cprintf("Count:%i\n", valid_num); + } else if ((*inst_list_it)->isMemRef() && + !(*inst_list_it)->memOpDone) { + // Loads that have not been marked as executed + // still count towards the total instructions. + ++valid_num; + cprintf("Count:%i\n", valid_num); + } + } + + cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n" + "Issued:%i\nSquashed:%i\n", + (*inst_list_it)->readPC(), + (*inst_list_it)->seqNum, + (*inst_list_it)->threadNumber, + (*inst_list_it)->isIssued(), + (*inst_list_it)->isSquashed()); + + if ((*inst_list_it)->isMemRef()) { + cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); + } + + cprintf("\n"); + + inst_list_it++; + ++num; + } + } ++ ++ cprintf("Insts to Execute list:\n"); ++ ++ int num = 0; ++ int valid_num = 0; ++ ListIt inst_list_it = instsToExecute.begin(); ++ ++ while (inst_list_it != instsToExecute.end()) ++ { ++ cprintf("Instruction:%i\n", ++ num); ++ if (!(*inst_list_it)->isSquashed()) { ++ if (!(*inst_list_it)->isIssued()) { ++ ++valid_num; ++ cprintf("Count:%i\n", valid_num); ++ } else if ((*inst_list_it)->isMemRef() && ++ !(*inst_list_it)->memOpDone) { ++ // Loads that have not been marked as executed ++ // still count towards the total instructions. ++ ++valid_num; ++ cprintf("Count:%i\n", valid_num); ++ } ++ } ++ ++ cprintf("PC:%#x\n[sn:%lli]\n[tid:%i]\n" ++ "Issued:%i\nSquashed:%i\n", ++ (*inst_list_it)->readPC(), ++ (*inst_list_it)->seqNum, ++ (*inst_list_it)->threadNumber, ++ (*inst_list_it)->isIssued(), ++ (*inst_list_it)->isSquashed()); ++ ++ if ((*inst_list_it)->isMemRef()) { ++ cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone); ++ } ++ ++ cprintf("\n"); ++ ++ inst_list_it++; ++ ++num; ++ } +} diff --cc src/cpu/o3/lsq.hh index 51eb23cd7,000000000..d65510c30 mode 100644,000000..100644 --- a/src/cpu/o3/lsq.hh +++ b/src/cpu/o3/lsq.hh @@@ -1,324 -1,0 +1,323 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_O3_LSQ_HH__ +#define __CPU_O3_LSQ_HH__ + +#include +#include + +#include "config/full_system.hh" +#include "cpu/inst_seq.hh" +//#include "cpu/o3/cpu_policy.hh" +#include "cpu/o3/lsq_unit.hh" +#include "mem/port.hh" +//#include "mem/page_table.hh" +#include "sim/sim_object.hh" + +template +class LSQ { + public: + typedef typename Impl::Params Params; + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::CPUPol::IEW IEW; + typedef typename Impl::CPUPol::LSQUnit LSQUnit; + ++ /** SMT policy. */ + enum LSQPolicy { + Dynamic, + Partitioned, + Threshold + }; + + /** Constructs an LSQ with the given parameters. */ + LSQ(Params *params); + + /** Returns the name of the LSQ. */ + std::string name() const; + + /** Sets the pointer to the list of active threads. */ + void setActiveThreads(std::list *at_ptr); + /** Sets the CPU pointer. */ + void setCPU(FullCPU *cpu_ptr); + /** Sets the IEW stage pointer. */ + void setIEW(IEW *iew_ptr); + /** Sets the page table pointer. */ +// void setPageTable(PageTable *pt_ptr); - ++ /** Switches out the LSQ. */ + void switchOut(); ++ /** Takes over execution from another CPU's thread. */ + void takeOverFrom(); + + /** Number of entries needed for the given amount of threads.*/ + int entryAmount(int num_threads); + void removeEntries(unsigned tid); + /** Reset the max entries for each thread. */ + void resetEntries(); + /** Resize the max entries for a thread. */ + void resizeEntries(unsigned size, unsigned tid); + + /** Ticks the LSQ. */ + void tick(); + /** Ticks a specific LSQ Unit. */ + void tick(unsigned tid) + { thread[tid].tick(); } + + /** Inserts a load into the LSQ. */ + void insertLoad(DynInstPtr &load_inst); + /** Inserts a store into the LSQ. */ + void insertStore(DynInstPtr &store_inst); + + /** Executes a load. */ + Fault executeLoad(DynInstPtr &inst); + - Fault executeLoad(int lq_idx, unsigned tid) - { return thread[tid].executeLoad(lq_idx); } - + /** Executes a store. */ + Fault executeStore(DynInstPtr &inst); + + /** + * Commits loads up until the given sequence number for a specific thread. + */ + void commitLoads(InstSeqNum &youngest_inst, unsigned tid) + { thread[tid].commitLoads(youngest_inst); } + + /** + * Commits stores up until the given sequence number for a specific thread. + */ + void commitStores(InstSeqNum &youngest_inst, unsigned tid) + { thread[tid].commitStores(youngest_inst); } + + /** + * Attempts to write back stores until all cache ports are used or the + * interface becomes blocked. + */ + void writebackStores(); + /** Same as above, but only for one thread. */ + void writebackStores(unsigned tid); + + /** + * Squash instructions from a thread until the specified sequence number. + */ + void squash(const InstSeqNum &squashed_num, unsigned tid) + { thread[tid].squash(squashed_num); } + + /** Returns whether or not there was a memory ordering violation. */ + bool violation(); + /** + * Returns whether or not there was a memory ordering violation for a + * specific thread. + */ + bool violation(unsigned tid) + { return thread[tid].violation(); } + + /** Returns if a load is blocked due to the memory system for a specific + * thread. + */ + bool loadBlocked(unsigned tid) + { return thread[tid].loadBlocked(); } + + bool isLoadBlockedHandled(unsigned tid) + { return thread[tid].isLoadBlockedHandled(); } + + void setLoadBlockedHandled(unsigned tid) + { thread[tid].setLoadBlockedHandled(); } + + /** Gets the instruction that caused the memory ordering violation. */ + DynInstPtr getMemDepViolator(unsigned tid) + { return thread[tid].getMemDepViolator(); } + + /** Returns the head index of the load queue for a specific thread. */ + int getLoadHead(unsigned tid) + { return thread[tid].getLoadHead(); } + + /** Returns the sequence number of the head of the load queue. */ + InstSeqNum getLoadHeadSeqNum(unsigned tid) + { + return thread[tid].getLoadHeadSeqNum(); + } + + /** Returns the head index of the store queue. */ + int getStoreHead(unsigned tid) + { return thread[tid].getStoreHead(); } + + /** Returns the sequence number of the head of the store queue. */ + InstSeqNum getStoreHeadSeqNum(unsigned tid) + { + return thread[tid].getStoreHeadSeqNum(); + } + + /** Returns the number of instructions in all of the queues. */ + int getCount(); + /** Returns the number of instructions in the queues of one thread. */ + int getCount(unsigned tid) + { return thread[tid].getCount(); } + + /** Returns the total number of loads in the load queue. */ + int numLoads(); + /** Returns the total number of loads for a single thread. */ + int numLoads(unsigned tid) + { return thread[tid].numLoads(); } + + /** Returns the total number of stores in the store queue. */ + int numStores(); + /** Returns the total number of stores for a single thread. */ + int numStores(unsigned tid) + { return thread[tid].numStores(); } + + /** Returns the total number of loads that are ready. */ + int numLoadsReady(); + /** Returns the number of loads that are ready for a single thread. */ + int numLoadsReady(unsigned tid) + { return thread[tid].numLoadsReady(); } + + /** Returns the number of free entries. */ + unsigned numFreeEntries(); + /** Returns the number of free entries for a specific thread. */ + unsigned numFreeEntries(unsigned tid); + + /** Returns if the LSQ is full (either LQ or SQ is full). */ + bool isFull(); + /** + * Returns if the LSQ is full for a specific thread (either LQ or SQ is + * full). + */ + bool isFull(unsigned tid); + + /** Returns if any of the LQs are full. */ + bool lqFull(); + /** Returns if the LQ of a given thread is full. */ + bool lqFull(unsigned tid); + + /** Returns if any of the SQs are full. */ + bool sqFull(); + /** Returns if the SQ of a given thread is full. */ + bool sqFull(unsigned tid); + + /** + * Returns if the LSQ is stalled due to a memory operation that must be + * replayed. + */ + bool isStalled(); + /** + * Returns if the LSQ of a specific thread is stalled due to a memory + * operation that must be replayed. + */ + bool isStalled(unsigned tid); + + /** Returns whether or not there are any stores to write back to memory. */ + bool hasStoresToWB(); + + /** Returns whether or not a specific thread has any stores to write back + * to memory. + */ + bool hasStoresToWB(unsigned tid) + { return thread[tid].hasStoresToWB(); } + + /** Returns the number of stores a specific thread has to write back. */ + int numStoresToWB(unsigned tid) + { return thread[tid].numStoresToWB(); } + + /** Returns if the LSQ will write back to memory this cycle. */ + bool willWB(); + /** Returns if the LSQ of a specific thread will write back to memory this + * cycle. + */ + bool willWB(unsigned tid) + { return thread[tid].willWB(); } + + /** Debugging function to print out all instructions. */ + void dumpInsts(); + /** Debugging function to print out instructions from a specific thread. */ + void dumpInsts(unsigned tid) + { thread[tid].dumpInsts(); } + + /** Executes a read operation, using the load specified at the load index. */ + template + Fault read(RequestPtr req, T &data, int load_idx); + + /** Executes a store operation, using the store specified at the store + * index. + */ + template + Fault write(RequestPtr req, T &data, int store_idx); + + private: + /** The LSQ policy for SMT mode. */ + LSQPolicy lsqPolicy; + + /** The LSQ units for individual threads. */ + LSQUnit thread[Impl::MaxThreads]; + + /** The CPU pointer. */ + FullCPU *cpu; + + /** The IEW stage pointer. */ + IEW *iewStage; + + /** The pointer to the page table. */ +// PageTable *pTable; + + /** List of Active Threads in System. */ + std::list *activeThreads; + + /** Total Size of LQ Entries. */ + unsigned LQEntries; + /** Total Size of SQ Entries. */ + unsigned SQEntries; + + /** Max LQ Size - Used to Enforce Sharing Policies. */ + unsigned maxLQEntries; + + /** Max SQ Size - Used to Enforce Sharing Policies. */ + unsigned maxSQEntries; + + /** Number of Threads. */ + unsigned numThreads; +}; + +template +template +Fault +LSQ::read(RequestPtr req, T &data, int load_idx) +{ + unsigned tid = req->getThreadNum(); + + return thread[tid].read(req, data, load_idx); +} + +template +template +Fault +LSQ::write(RequestPtr req, T &data, int store_idx) +{ + unsigned tid = req->getThreadNum(); + + return thread[tid].write(req, data, store_idx); +} + +#endif // __CPU_O3_LSQ_HH__ diff --cc src/cpu/o3/lsq_unit.hh index b339cea2c,000000000..393d8947d mode 100644,000000..100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@@ -1,629 -1,0 +1,638 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_O3_LSQ_UNIT_HH__ +#define __CPU_O3_LSQ_UNIT_HH__ + +#include +#include +#include + +#include "arch/faults.hh" +#include "config/full_system.hh" +#include "base/hashmap.hh" +#include "cpu/inst_seq.hh" +#include "mem/packet.hh" +#include "mem/port.hh" +//#include "mem/page_table.hh" +//#include "sim/debug.hh" +//#include "sim/sim_object.hh" + +/** + * Class that implements the actual LQ and SQ for each specific + * thread. Both are circular queues; load entries are freed upon + * committing, while store entries are freed once they writeback. The + * LSQUnit tracks if there are memory ordering violations, and also + * detects partial load to store forwarding cases (a store only has + * part of a load's data) that requires the load to wait until the + * store writes back. In the former case it holds onto the instruction + * until the dependence unit looks at it, and in the latter it stalls + * the LSQ until the store writes back. At that point the load is + * replayed. + */ +template +class LSQUnit { + protected: + typedef TheISA::IntReg IntReg; + public: + typedef typename Impl::Params Params; + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::CPUPol::IEW IEW; + typedef typename Impl::CPUPol::IssueStruct IssueStruct; + + public: + /** Constructs an LSQ unit. init() must be called prior to use. */ + LSQUnit(); + + /** Initializes the LSQ unit with the specified number of entries. */ + void init(Params *params, unsigned maxLQEntries, + unsigned maxSQEntries, unsigned id); + + /** Returns the name of the LSQ unit. */ + std::string name() const; + + /** Sets the CPU pointer. */ + void setCPU(FullCPU *cpu_ptr); + + /** Sets the IEW stage pointer. */ + void setIEW(IEW *iew_ptr) + { iewStage = iew_ptr; } + + /** Sets the page table pointer. */ +// void setPageTable(PageTable *pt_ptr); + ++ /** Switches out LSQ unit. */ + void switchOut(); + ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); + ++ /** Returns if the LSQ is switched out. */ + bool isSwitchedOut() { return switchedOut; } + + /** Ticks the LSQ unit, which in this case only resets the number of + * used cache ports. + * @todo: Move the number of used ports up to the LSQ level so it can + * be shared by all LSQ units. + */ + void tick() { usedPorts = 0; } + + /** Inserts an instruction. */ + void insert(DynInstPtr &inst); + /** Inserts a load instruction. */ + void insertLoad(DynInstPtr &load_inst); + /** Inserts a store instruction. */ + void insertStore(DynInstPtr &store_inst); + + /** Executes a load instruction. */ + Fault executeLoad(DynInstPtr &inst); + + Fault executeLoad(int lq_idx) { panic("Not implemented"); return NoFault; } + /** Executes a store instruction. */ + Fault executeStore(DynInstPtr &inst); + + /** Commits the head load. */ + void commitLoad(); + /** Commits loads older than a specific sequence number. */ + void commitLoads(InstSeqNum &youngest_inst); + + /** Commits stores older than a specific sequence number. */ + void commitStores(InstSeqNum &youngest_inst); + + /** Writes back stores. */ + void writebackStores(); + + void completeDataAccess(PacketPtr pkt); + + void completeStoreDataAccess(DynInstPtr &inst); + + // @todo: Include stats in the LSQ unit. + //void regStats(); + + /** Clears all the entries in the LQ. */ + void clearLQ(); + + /** Clears all the entries in the SQ. */ + void clearSQ(); + + /** Resizes the LQ to a given size. */ + void resizeLQ(unsigned size); + + /** Resizes the SQ to a given size. */ + void resizeSQ(unsigned size); + + /** Squashes all instructions younger than a specific sequence number. */ + void squash(const InstSeqNum &squashed_num); + + /** Returns if there is a memory ordering violation. Value is reset upon + * call to getMemDepViolator(). + */ + bool violation() { return memDepViolator; } + + /** Returns the memory ordering violator. */ + DynInstPtr getMemDepViolator(); + + /** Returns if a load became blocked due to the memory system. */ + bool loadBlocked() + { return isLoadBlocked; } + ++ /** Clears the signal that a load became blocked. */ + void clearLoadBlocked() + { isLoadBlocked = false; } + ++ /** Returns if the blocked load was handled. */ + bool isLoadBlockedHandled() + { return loadBlockedHandled; } + ++ /** Records the blocked load as being handled. */ + void setLoadBlockedHandled() + { loadBlockedHandled = true; } + + /** Returns the number of free entries (min of free LQ and SQ entries). */ + unsigned numFreeEntries(); + + /** Returns the number of loads ready to execute. */ + int numLoadsReady(); + + /** Returns the number of loads in the LQ. */ + int numLoads() { return loads; } + + /** Returns the number of stores in the SQ. */ + int numStores() { return stores; } + + /** Returns if either the LQ or SQ is full. */ + bool isFull() { return lqFull() || sqFull(); } + + /** Returns if the LQ is full. */ + bool lqFull() { return loads >= (LQEntries - 1); } + + /** Returns if the SQ is full. */ + bool sqFull() { return stores >= (SQEntries - 1); } + + /** Returns the number of instructions in the LSQ. */ + unsigned getCount() { return loads + stores; } + + /** Returns if there are any stores to writeback. */ + bool hasStoresToWB() { return storesToWB; } + + /** Returns the number of stores to writeback. */ + int numStoresToWB() { return storesToWB; } + + /** Returns if the LSQ unit will writeback on this cycle. */ + bool willWB() { return storeQueue[storeWBIdx].canWB && + !storeQueue[storeWBIdx].completed/* && + !dcacheInterface->isBlocked()*/; } + + private: + /** Completes the store at the specified index. */ + void completeStore(int store_idx); + + /** Increments the given store index (circular queue). */ + inline void incrStIdx(int &store_idx); + /** Decrements the given store index (circular queue). */ + inline void decrStIdx(int &store_idx); + /** Increments the given load index (circular queue). */ + inline void incrLdIdx(int &load_idx); + /** Decrements the given load index (circular queue). */ + inline void decrLdIdx(int &load_idx); + + public: + /** Debugging function to dump instructions in the LSQ. */ + void dumpInsts(); + + private: + /** Pointer to the CPU. */ + FullCPU *cpu; + + /** Pointer to the IEW stage. */ + IEW *iewStage; + + MemObject *mem; + + class DcachePort : public Port + { + protected: + FullCPU *cpu; + LSQUnit *lsq; + + public: + DcachePort(FullCPU *_cpu, LSQUnit *_lsq) + : Port(_lsq->name() + "-dport"), cpu(_cpu), lsq(_lsq) + { } + + protected: + virtual Tick recvAtomic(PacketPtr pkt); + + virtual void recvFunctional(PacketPtr pkt); + + virtual void recvStatusChange(Status status); + + virtual void getDeviceAddressRanges(AddrRangeList &resp, + AddrRangeList &snoop) + { resp.clear(); snoop.clear(); } + + virtual bool recvTiming(PacketPtr pkt); + + virtual void recvRetry(); + }; + + /** Pointer to the D-cache. */ + DcachePort *dcachePort; + + /** Pointer to the page table. */ +// PageTable *pTable; + + public: + struct SQEntry { + /** Constructs an empty store queue entry. */ + SQEntry() + : inst(NULL), req(NULL), size(0), data(0), + canWB(0), committed(0), completed(0) + { } + + /** Constructs a store queue entry for a given instruction. */ + SQEntry(DynInstPtr &_inst) + : inst(_inst), req(NULL), size(0), data(0), + canWB(0), committed(0), completed(0) + { } + + /** The store instruction. */ + DynInstPtr inst; + /** The request for the store. */ + RequestPtr req; + /** The size of the store. */ + int size; + /** The store data. */ + IntReg data; + /** Whether or not the store can writeback. */ + bool canWB; + /** Whether or not the store is committed. */ + bool committed; + /** Whether or not the store is completed. */ + bool completed; + }; + + private: + /** The LSQUnit thread id. */ + unsigned lsqID; + + /** The store queue. */ + std::vector storeQueue; + + /** The load queue. */ + std::vector loadQueue; + + /** The number of LQ entries, plus a sentinel entry (circular queue). + * @todo: Consider having var that records the true number of LQ entries. + */ + unsigned LQEntries; + /** The number of SQ entries, plus a sentinel entry (circular queue). + * @todo: Consider having var that records the true number of SQ entries. + */ + unsigned SQEntries; + + /** The number of load instructions in the LQ. */ + int loads; + /** The number of store instructions in the SQ. */ + int stores; + /** The number of store instructions in the SQ waiting to writeback. */ + int storesToWB; + + /** The index of the head instruction in the LQ. */ + int loadHead; + /** The index of the tail instruction in the LQ. */ + int loadTail; + + /** The index of the head instruction in the SQ. */ + int storeHead; + /** The index of the first instruction that may be ready to be + * written back, and has not yet been written back. + */ + int storeWBIdx; + /** The index of the tail instruction in the SQ. */ + int storeTail; + + /// @todo Consider moving to a more advanced model with write vs read ports + /** The number of cache ports available each cycle. */ + int cachePorts; + + /** The number of used cache ports in this cycle. */ + int usedPorts; + ++ /** Is the LSQ switched out. */ + bool switchedOut; + + //list mshrSeqNums; + + /** Wire to read information from the issue stage time queue. */ + typename TimeBuffer::wire fromIssue; + + /** Whether or not the LSQ is stalled. */ + bool stalled; + /** The store that causes the stall due to partial store to load + * forwarding. + */ + InstSeqNum stallingStoreIsn; + /** The index of the above store. */ + int stallingLoadIdx; + + /** Whether or not a load is blocked due to the memory system. */ + bool isLoadBlocked; + ++ /** Has the blocked load been handled. */ + bool loadBlockedHandled; + ++ /** The sequence number of the blocked load. */ + InstSeqNum blockedLoadSeqNum; + + /** The oldest load that caused a memory ordering violation. */ + DynInstPtr memDepViolator; + + // Will also need how many read/write ports the Dcache has. Or keep track + // of that in stage that is one level up, and only call executeLoad/Store + // the appropriate number of times. +/* + // total number of loads forwaded from LSQ stores + Stats::Vector<> lsq_forw_loads; + + // total number of loads ignored due to invalid addresses + Stats::Vector<> inv_addr_loads; + + // total number of software prefetches ignored due to invalid addresses + Stats::Vector<> inv_addr_swpfs; + + // total non-speculative bogus addresses seen (debug var) + Counter sim_invalid_addrs; + Stats::Vector<> fu_busy; //cumulative fu busy + + // ready loads blocked due to memory disambiguation + Stats::Vector<> lsq_blocked_loads; + + Stats::Scalar<> lsqInversion; +*/ + public: + /** Executes the load at the given index. */ + template + Fault read(Request *req, T &data, int load_idx); + + /** Executes the store at the given index. */ + template + Fault write(Request *req, T &data, int store_idx); + + /** Returns the index of the head load instruction. */ + int getLoadHead() { return loadHead; } + /** Returns the sequence number of the head load instruction. */ + InstSeqNum getLoadHeadSeqNum() + { + if (loadQueue[loadHead]) { + return loadQueue[loadHead]->seqNum; + } else { + return 0; + } + + } + + /** Returns the index of the head store instruction. */ + int getStoreHead() { return storeHead; } + /** Returns the sequence number of the head store instruction. */ + InstSeqNum getStoreHeadSeqNum() + { + if (storeQueue[storeHead].inst) { + return storeQueue[storeHead].inst->seqNum; + } else { + return 0; + } + + } + + /** Returns whether or not the LSQ unit is stalled. */ + bool isStalled() { return stalled; } +}; + +template +template +Fault +LSQUnit::read(Request *req, T &data, int load_idx) +{ + DynInstPtr load_inst = loadQueue[load_idx]; + + assert(load_inst); + + assert(!load_inst->isExecuted()); + + // Make sure this isn't an uncacheable access + // A bit of a hackish way to get uncached accesses to work only if they're + // at the head of the LSQ and are ready to commit (at the head of the ROB + // too). + if (req->getFlags() & UNCACHEABLE && + (load_idx != loadHead || !load_inst->reachedCommit)) { + iewStage->rescheduleMemInst(load_inst); + return TheISA::genMachineCheckFault(); + } + + // Check the SQ for any previous stores that might lead to forwarding + int store_idx = load_inst->sqIdx; + + int store_size = 0; + + DPRINTF(LSQUnit, "Read called, load idx: %i, store idx: %i, " + "storeHead: %i addr: %#x\n", + load_idx, store_idx, storeHead, req->getPaddr()); + +#if 0 + if (req->getFlags() & LOCKED) { + cpu->lockAddr = req->getPaddr(); + cpu->lockFlag = true; + } +#endif + + while (store_idx != -1) { + // End once we've reached the top of the LSQ + if (store_idx == storeWBIdx) { + break; + } + + // Move the index to one younger + if (--store_idx < 0) + store_idx += SQEntries; + + assert(storeQueue[store_idx].inst); + + store_size = storeQueue[store_idx].size; + + if (store_size == 0) + continue; + + // Check if the store data is within the lower and upper bounds of + // addresses that the request needs. + bool store_has_lower_limit = + req->getVaddr() >= storeQueue[store_idx].inst->effAddr; + bool store_has_upper_limit = + (req->getVaddr() + req->getSize()) <= + (storeQueue[store_idx].inst->effAddr + store_size); + bool lower_load_has_store_part = + req->getVaddr() < (storeQueue[store_idx].inst->effAddr + + store_size); + bool upper_load_has_store_part = + (req->getVaddr() + req->getSize()) > + storeQueue[store_idx].inst->effAddr; + + // If the store's data has all of the data needed, we can forward. + if (store_has_lower_limit && store_has_upper_limit) { + // Get shift amount for offset into the store's data. + int shift_amt = req->getVaddr() & (store_size - 1); + // @todo: Magic number, assumes byte addressing + shift_amt = shift_amt << 3; + + // Cast this to type T? + data = storeQueue[store_idx].data >> shift_amt; + + assert(!load_inst->memData); + load_inst->memData = new uint8_t[64]; + + memcpy(load_inst->memData, &data, req->getSize()); + + DPRINTF(LSQUnit, "Forwarding from store idx %i to load to " + "addr %#x, data %#x\n", + store_idx, req->getVaddr(), *(load_inst->memData)); +/* + typename LdWritebackEvent *wb = + new typename LdWritebackEvent(load_inst, + iewStage); + + // We'll say this has a 1 cycle load-store forwarding latency + // for now. + // @todo: Need to make this a parameter. + wb->schedule(curTick); +*/ + // Should keep track of stat for forwarded data + return NoFault; + } else if ((store_has_lower_limit && lower_load_has_store_part) || + (store_has_upper_limit && upper_load_has_store_part) || + (lower_load_has_store_part && upper_load_has_store_part)) { + // This is the partial store-load forwarding case where a store + // has only part of the load's data. + + // If it's already been written back, then don't worry about + // stalling on it. + if (storeQueue[store_idx].completed) { + continue; + } + + // Must stall load and force it to retry, so long as it's the oldest + // load that needs to do so. + if (!stalled || + (stalled && + load_inst->seqNum < + loadQueue[stallingLoadIdx]->seqNum)) { + stalled = true; + stallingStoreIsn = storeQueue[store_idx].inst->seqNum; + stallingLoadIdx = load_idx; + } + + // Tell IQ/mem dep unit that this instruction will need to be + // rescheduled eventually + iewStage->rescheduleMemInst(load_inst); + + // Do not generate a writeback event as this instruction is not + // complete. + DPRINTF(LSQUnit, "Load-store forwarding mis-match. " + "Store idx %i to load addr %#x\n", + store_idx, req->getVaddr()); + + return NoFault; + } + } + + // If there's no forwarding case, then go access memory + DPRINTF(LSQUnit, "Doing functional access for inst [sn:%lli] PC %#x\n", + load_inst->seqNum, load_inst->readPC()); + + assert(!load_inst->memData); + load_inst->memData = new uint8_t[64]; + + ++usedPorts; + + DPRINTF(LSQUnit, "Doing timing access for inst PC %#x\n", + load_inst->readPC()); + + PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast); + data_pkt->dataStatic(load_inst->memData); + + // if we have a cache, do cache access too + if (!dcachePort->sendTiming(data_pkt)) { + // There's an older load that's already going to squash. + if (isLoadBlocked && blockedLoadSeqNum < load_inst->seqNum) + return NoFault; + + // Record that the load was blocked due to memory. This + // load will squash all instructions after it, be + // refetched, and re-executed. + isLoadBlocked = true; + loadBlockedHandled = false; + blockedLoadSeqNum = load_inst->seqNum; + // No fault occurred, even though the interface is blocked. + return NoFault; + } + + if (data_pkt->result != Packet::Success) { + DPRINTF(LSQUnit, "LSQUnit: D-cache miss!\n"); + DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n", + load_inst->seqNum); + } else { + DPRINTF(LSQUnit, "LSQUnit: D-cache hit!\n"); + DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n", + load_inst->seqNum); + } + + return NoFault; +} + +template +template +Fault +LSQUnit::write(Request *req, T &data, int store_idx) +{ + assert(storeQueue[store_idx].inst); + + DPRINTF(LSQUnit, "Doing write to store idx %i, addr %#x data %#x" + " | storeHead:%i [sn:%i]\n", + store_idx, req->getPaddr(), data, storeHead, + storeQueue[store_idx].inst->seqNum); + + storeQueue[store_idx].req = req; + storeQueue[store_idx].size = sizeof(T); + storeQueue[store_idx].data = data; + + // This function only writes the data to the store queue, so no fault + // can happen here. + return NoFault; +} + +#endif // __CPU_O3_LSQ_UNIT_HH__ diff --cc src/cpu/o3/lsq_unit_impl.hh index 3f6af3d2c,000000000..1ad561dc0 mode 100644,000000..100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@@ -1,866 -1,0 +1,869 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "cpu/checker/cpu.hh" +#include "cpu/o3/lsq_unit.hh" +#include "base/str.hh" +#include "mem/request.hh" + +template +void +LSQUnit::completeDataAccess(PacketPtr pkt) +{ +/* + DPRINTF(IEW, "Load writeback event [sn:%lli]\n", inst->seqNum); + DPRINTF(Activity, "Activity: Ld Writeback event [sn:%lli]\n", inst->seqNum); + + //iewStage->ldstQueue.removeMSHR(inst->threadNumber,inst->seqNum); + + if (iewStage->isSwitchedOut()) { + inst = NULL; + return; + } else if (inst->isSquashed()) { + iewStage->wakeCPU(); + inst = NULL; + return; + } + + iewStage->wakeCPU(); + + if (!inst->isExecuted()) { + inst->setExecuted(); + + // Complete access to copy data to proper place. + inst->completeAcc(); + } + + // Need to insert instruction into queue to commit + iewStage->instToCommit(inst); + + iewStage->activityThisCycle(); + + inst = NULL; +*/ +} + +template +void +LSQUnit::completeStoreDataAccess(DynInstPtr &inst) +{ +/* + DPRINTF(LSQ, "Cache miss complete for store idx:%i\n", storeIdx); + DPRINTF(Activity, "Activity: st writeback event idx:%i\n", storeIdx); + + //lsqPtr->removeMSHR(lsqPtr->storeQueue[storeIdx].inst->seqNum); + - if (lsqPtr->isSwitchedOut()) ++ if (lsqPtr->isSwitchedOut()) { ++ if (wbEvent) ++ delete wbEvent; ++ + return; ++ } + + lsqPtr->cpu->wakeCPU(); + + if (wb) + lsqPtr->completeDataAccess(storeIdx); + lsqPtr->completeStore(storeIdx); +*/ +} + +template +Tick +LSQUnit::DcachePort::recvAtomic(PacketPtr pkt) +{ + panic("O3CPU model does not work with atomic mode!"); + return curTick; +} + +template +void +LSQUnit::DcachePort::recvFunctional(PacketPtr pkt) +{ + panic("O3CPU doesn't expect recvFunctional callback!"); +} + +template +void +LSQUnit::DcachePort::recvStatusChange(Status status) +{ + if (status == RangeChange) + return; + + panic("O3CPU doesn't expect recvStatusChange callback!"); +} + +template +bool +LSQUnit::DcachePort::recvTiming(PacketPtr pkt) +{ + lsq->completeDataAccess(pkt); + return true; +} + +template +void +LSQUnit::DcachePort::recvRetry() +{ + panic("Retry unsupported for now!"); + // we shouldn't get a retry unless we have a packet that we're + // waiting to transmit +/* + assert(cpu->dcache_pkt != NULL); + assert(cpu->_status == DcacheRetry); + PacketPtr tmp = cpu->dcache_pkt; + if (sendTiming(tmp)) { + cpu->_status = DcacheWaitResponse; + cpu->dcache_pkt = NULL; + } +*/ +} + +template +LSQUnit::LSQUnit() + : loads(0), stores(0), storesToWB(0), stalled(false), isLoadBlocked(false), + loadBlockedHandled(false) +{ +} + +template +void +LSQUnit::init(Params *params, unsigned maxLQEntries, + unsigned maxSQEntries, unsigned id) +{ + DPRINTF(LSQUnit, "Creating LSQUnit%i object.\n",id); + + switchedOut = false; + + lsqID = id; + + // Add 1 for the sentinel entry (they are circular queues). + LQEntries = maxLQEntries + 1; + SQEntries = maxSQEntries + 1; + + loadQueue.resize(LQEntries); + storeQueue.resize(SQEntries); + + loadHead = loadTail = 0; + + storeHead = storeWBIdx = storeTail = 0; + + usedPorts = 0; + cachePorts = params->cachePorts; + + Port *mem_dport = params->mem->getPort(""); + dcachePort->setPeer(mem_dport); + mem_dport->setPeer(dcachePort); + + memDepViolator = NULL; + + blockedLoadSeqNum = 0; +} + +template +void +LSQUnit::setCPU(FullCPU *cpu_ptr) +{ + cpu = cpu_ptr; + dcachePort = new DcachePort(cpu, this); +} + +template +std::string +LSQUnit::name() const +{ + if (Impl::MaxThreads == 1) { + return iewStage->name() + ".lsq"; + } else { + return iewStage->name() + ".lsq.thread." + to_string(lsqID); + } +} + +template +void +LSQUnit::clearLQ() +{ + loadQueue.clear(); +} + +template +void +LSQUnit::clearSQ() +{ + storeQueue.clear(); +} + +#if 0 +template +void +LSQUnit::setPageTable(PageTable *pt_ptr) +{ + DPRINTF(LSQUnit, "Setting the page table pointer.\n"); + pTable = pt_ptr; +} +#endif + +template +void +LSQUnit::switchOut() +{ + switchedOut = true; + for (int i = 0; i < loadQueue.size(); ++i) + loadQueue[i] = NULL; + + assert(storesToWB == 0); +} + +template +void +LSQUnit::takeOverFrom() +{ + switchedOut = false; + loads = stores = storesToWB = 0; + + loadHead = loadTail = 0; + + storeHead = storeWBIdx = storeTail = 0; + + usedPorts = 0; + + memDepViolator = NULL; + + blockedLoadSeqNum = 0; + + stalled = false; + isLoadBlocked = false; + loadBlockedHandled = false; +} + +template +void +LSQUnit::resizeLQ(unsigned size) +{ + unsigned size_plus_sentinel = size + 1; + assert(size_plus_sentinel >= LQEntries); + + if (size_plus_sentinel > LQEntries) { + while (size_plus_sentinel > loadQueue.size()) { + DynInstPtr dummy; + loadQueue.push_back(dummy); + LQEntries++; + } + } else { + LQEntries = size_plus_sentinel; + } + +} + +template +void +LSQUnit::resizeSQ(unsigned size) +{ + unsigned size_plus_sentinel = size + 1; + if (size_plus_sentinel > SQEntries) { + while (size_plus_sentinel > storeQueue.size()) { + SQEntry dummy; + storeQueue.push_back(dummy); + SQEntries++; + } + } else { + SQEntries = size_plus_sentinel; + } +} + +template +void +LSQUnit::insert(DynInstPtr &inst) +{ + assert(inst->isMemRef()); + + assert(inst->isLoad() || inst->isStore()); + + if (inst->isLoad()) { + insertLoad(inst); + } else { + insertStore(inst); + } + + inst->setInLSQ(); +} + +template +void +LSQUnit::insertLoad(DynInstPtr &load_inst) +{ + assert((loadTail + 1) % LQEntries != loadHead); + assert(loads < LQEntries); + + DPRINTF(LSQUnit, "Inserting load PC %#x, idx:%i [sn:%lli]\n", + load_inst->readPC(), loadTail, load_inst->seqNum); + + load_inst->lqIdx = loadTail; + + if (stores == 0) { + load_inst->sqIdx = -1; + } else { + load_inst->sqIdx = storeTail; + } + + loadQueue[loadTail] = load_inst; + + incrLdIdx(loadTail); + + ++loads; +} + +template +void +LSQUnit::insertStore(DynInstPtr &store_inst) +{ + // Make sure it is not full before inserting an instruction. + assert((storeTail + 1) % SQEntries != storeHead); + assert(stores < SQEntries); + + DPRINTF(LSQUnit, "Inserting store PC %#x, idx:%i [sn:%lli]\n", + store_inst->readPC(), storeTail, store_inst->seqNum); + + store_inst->sqIdx = storeTail; + store_inst->lqIdx = loadTail; + + storeQueue[storeTail] = SQEntry(store_inst); + + incrStIdx(storeTail); + + ++stores; +} + +template +typename Impl::DynInstPtr +LSQUnit::getMemDepViolator() +{ + DynInstPtr temp = memDepViolator; + + memDepViolator = NULL; + + return temp; +} + +template +unsigned +LSQUnit::numFreeEntries() +{ + unsigned free_lq_entries = LQEntries - loads; + unsigned free_sq_entries = SQEntries - stores; + + // Both the LQ and SQ entries have an extra dummy entry to differentiate + // empty/full conditions. Subtract 1 from the free entries. + if (free_lq_entries < free_sq_entries) { + return free_lq_entries - 1; + } else { + return free_sq_entries - 1; + } +} + +template +int +LSQUnit::numLoadsReady() +{ + int load_idx = loadHead; + int retval = 0; + + while (load_idx != loadTail) { + assert(loadQueue[load_idx]); + + if (loadQueue[load_idx]->readyToIssue()) { + ++retval; + } + } + + return retval; +} + +template +Fault +LSQUnit::executeLoad(DynInstPtr &inst) +{ + // Execute a specific load. + Fault load_fault = NoFault; + + DPRINTF(LSQUnit, "Executing load PC %#x, [sn:%lli]\n", + inst->readPC(),inst->seqNum); + + load_fault = inst->initiateAcc(); + + // If the instruction faulted, then we need to send it along to commit + // without the instruction completing. + if (load_fault != NoFault) { + // Send this instruction to commit, also make sure iew stage + // realizes there is activity. + iewStage->instToCommit(inst); + iewStage->activityThisCycle(); + } + + return load_fault; +} + +template +Fault +LSQUnit::executeStore(DynInstPtr &store_inst) +{ + using namespace TheISA; + // Make sure that a store exists. + assert(stores != 0); + + int store_idx = store_inst->sqIdx; + + DPRINTF(LSQUnit, "Executing store PC %#x [sn:%lli]\n", + store_inst->readPC(), store_inst->seqNum); + + // Check the recently completed loads to see if any match this store's + // address. If so, then we have a memory ordering violation. + int load_idx = store_inst->lqIdx; + + Fault store_fault = store_inst->initiateAcc(); +// Fault store_fault = store_inst->execute(); + + if (storeQueue[store_idx].size == 0) { + DPRINTF(LSQUnit,"Fault on Store PC %#x, [sn:%lli],Size = 0\n", + store_inst->readPC(),store_inst->seqNum); + + return store_fault; + } + + assert(store_fault == NoFault); + + if (store_inst->isStoreConditional()) { + // Store conditionals need to set themselves as able to + // writeback if we haven't had a fault by here. + storeQueue[store_idx].canWB = true; + + ++storesToWB; + } + + if (!memDepViolator) { + while (load_idx != loadTail) { + // Really only need to check loads that have actually executed + // It's safe to check all loads because effAddr is set to + // InvalAddr when the dyn inst is created. + + // @todo: For now this is extra conservative, detecting a + // violation if the addresses match assuming all accesses + // are quad word accesses. + + // @todo: Fix this, magic number being used here + if ((loadQueue[load_idx]->effAddr >> 8) == + (store_inst->effAddr >> 8)) { + // A load incorrectly passed this store. Squash and refetch. + // For now return a fault to show that it was unsuccessful. + memDepViolator = loadQueue[load_idx]; + + return genMachineCheckFault(); + } + + incrLdIdx(load_idx); + } + + // If we've reached this point, there was no violation. + memDepViolator = NULL; + } + + return store_fault; +} + +template +void +LSQUnit::commitLoad() +{ + assert(loadQueue[loadHead]); + + DPRINTF(LSQUnit, "Committing head load instruction, PC %#x\n", + loadQueue[loadHead]->readPC()); + - + loadQueue[loadHead] = NULL; + + incrLdIdx(loadHead); + + --loads; +} + +template +void +LSQUnit::commitLoads(InstSeqNum &youngest_inst) +{ + assert(loads == 0 || loadQueue[loadHead]); + + while (loads != 0 && loadQueue[loadHead]->seqNum <= youngest_inst) { + commitLoad(); + } +} + +template +void +LSQUnit::commitStores(InstSeqNum &youngest_inst) +{ + assert(stores == 0 || storeQueue[storeHead].inst); + + int store_idx = storeHead; + + while (store_idx != storeTail) { + assert(storeQueue[store_idx].inst); + // Mark any stores that are now committed and have not yet + // been marked as able to write back. + if (!storeQueue[store_idx].canWB) { + if (storeQueue[store_idx].inst->seqNum > youngest_inst) { + break; + } + DPRINTF(LSQUnit, "Marking store as able to write back, PC " + "%#x [sn:%lli]\n", + storeQueue[store_idx].inst->readPC(), + storeQueue[store_idx].inst->seqNum); + + storeQueue[store_idx].canWB = true; + + ++storesToWB; + } + + incrStIdx(store_idx); + } +} + +template +void +LSQUnit::writebackStores() +{ + while (storesToWB > 0 && + storeWBIdx != storeTail && + storeQueue[storeWBIdx].inst && + storeQueue[storeWBIdx].canWB && + usedPorts < cachePorts) { + + // Store didn't write any data so no need to write it back to + // memory. + if (storeQueue[storeWBIdx].size == 0) { + completeStore(storeWBIdx); + + incrStIdx(storeWBIdx); + + continue; + } +/* + if (dcacheInterface && dcacheInterface->isBlocked()) { + DPRINTF(LSQUnit, "Unable to write back any more stores, cache" + " is blocked!\n"); + break; + } +*/ + ++usedPorts; + + if (storeQueue[storeWBIdx].inst->isDataPrefetch()) { + incrStIdx(storeWBIdx); + + continue; + } + + assert(storeQueue[storeWBIdx].req); + assert(!storeQueue[storeWBIdx].committed); + + DynInstPtr inst = storeQueue[storeWBIdx].inst; + + Request *req = storeQueue[storeWBIdx].req; + storeQueue[storeWBIdx].committed = true; + + assert(!inst->memData); + inst->memData = new uint8_t[64]; + memcpy(inst->memData, (uint8_t *)&storeQueue[storeWBIdx].data, req->getSize()); + + PacketPtr data_pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast); + data_pkt->dataStatic(inst->memData); + + DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x " + "to Addr:%#x, data:%#x [sn:%lli]\n", + storeWBIdx, storeQueue[storeWBIdx].inst->readPC(), + req->getPaddr(), *(inst->memData), + storeQueue[storeWBIdx].inst->seqNum); + + if (!dcachePort->sendTiming(data_pkt)) { + // Need to handle becoming blocked on a store. + } else { + /* + StoreCompletionEvent *store_event = new + StoreCompletionEvent(storeWBIdx, NULL, this); + */ + if (isStalled() && + storeQueue[storeWBIdx].inst->seqNum == stallingStoreIsn) { + DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " + "load idx:%i\n", + stallingStoreIsn, stallingLoadIdx); + stalled = false; + stallingStoreIsn = 0; + iewStage->replayMemInst(loadQueue[stallingLoadIdx]); + } +/* + typename LdWritebackEvent *wb = NULL; + if (req->flags & LOCKED) { + // Stx_C should not generate a system port transaction + // if it misses in the cache, but that might be hard + // to accomplish without explicit cache support. + wb = new typename + LdWritebackEvent(storeQueue[storeWBIdx].inst, + iewStage); + store_event->wbEvent = wb; + } +*/ + if (data_pkt->result != Packet::Success) { + DPRINTF(LSQUnit,"D-Cache Write Miss on idx:%i!\n", + storeWBIdx); + + DPRINTF(Activity, "Active st accessing mem miss [sn:%lli]\n", + storeQueue[storeWBIdx].inst->seqNum); + + //mshrSeqNums.push_back(storeQueue[storeWBIdx].inst->seqNum); + + //DPRINTF(LSQUnit, "Added MSHR. count = %i\n",mshrSeqNums.size()); + + // @todo: Increment stat here. + } else { + DPRINTF(LSQUnit,"D-Cache: Write Hit on idx:%i !\n", + storeWBIdx); + + DPRINTF(Activity, "Active st accessing mem hit [sn:%lli]\n", + storeQueue[storeWBIdx].inst->seqNum); + } + + incrStIdx(storeWBIdx); + } + } + + // Not sure this should set it to 0. + usedPorts = 0; + + assert(stores >= 0 && storesToWB >= 0); +} + +/*template +void +LSQUnit::removeMSHR(InstSeqNum seqNum) +{ + list::iterator mshr_it = find(mshrSeqNums.begin(), + mshrSeqNums.end(), + seqNum); + + if (mshr_it != mshrSeqNums.end()) { + mshrSeqNums.erase(mshr_it); + DPRINTF(LSQUnit, "Removing MSHR. count = %i\n",mshrSeqNums.size()); + } +}*/ + +template +void +LSQUnit::squash(const InstSeqNum &squashed_num) +{ + DPRINTF(LSQUnit, "Squashing until [sn:%lli]!" + "(Loads:%i Stores:%i)\n", squashed_num, loads, stores); + + int load_idx = loadTail; + decrLdIdx(load_idx); + + while (loads != 0 && loadQueue[load_idx]->seqNum > squashed_num) { + DPRINTF(LSQUnit,"Load Instruction PC %#x squashed, " + "[sn:%lli]\n", + loadQueue[load_idx]->readPC(), + loadQueue[load_idx]->seqNum); + + if (isStalled() && load_idx == stallingLoadIdx) { + stalled = false; + stallingStoreIsn = 0; + stallingLoadIdx = 0; + } + + // Clear the smart pointer to make sure it is decremented. + loadQueue[load_idx]->squashed = true; + loadQueue[load_idx] = NULL; + --loads; + + // Inefficient! + loadTail = load_idx; + + decrLdIdx(load_idx); + } + + if (isLoadBlocked) { + if (squashed_num < blockedLoadSeqNum) { + isLoadBlocked = false; + loadBlockedHandled = false; + blockedLoadSeqNum = 0; + } + } + + int store_idx = storeTail; + decrStIdx(store_idx); + + while (stores != 0 && + storeQueue[store_idx].inst->seqNum > squashed_num) { + // Instructions marked as can WB are already committed. + if (storeQueue[store_idx].canWB) { + break; + } + + DPRINTF(LSQUnit,"Store Instruction PC %#x squashed, " + "idx:%i [sn:%lli]\n", + storeQueue[store_idx].inst->readPC(), + store_idx, storeQueue[store_idx].inst->seqNum); + + // I don't think this can happen. It should have been cleared + // by the stalling load. + if (isStalled() && + storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { + panic("Is stalled should have been cleared by stalling load!\n"); + stalled = false; + stallingStoreIsn = 0; + } + + // Clear the smart pointer to make sure it is decremented. + storeQueue[store_idx].inst->squashed = true; + storeQueue[store_idx].inst = NULL; + storeQueue[store_idx].canWB = 0; + + storeQueue[store_idx].req = NULL; + --stores; + + // Inefficient! + storeTail = store_idx; + + decrStIdx(store_idx); + } +} + +template +void +LSQUnit::completeStore(int store_idx) +{ + assert(storeQueue[store_idx].inst); + storeQueue[store_idx].completed = true; + --storesToWB; + // A bit conservative because a store completion may not free up entries, + // but hopefully avoids two store completions in one cycle from making + // the CPU tick twice. + cpu->activityThisCycle(); + + if (store_idx == storeHead) { + do { + incrStIdx(storeHead); + + --stores; + } while (storeQueue[storeHead].completed && + storeHead != storeTail); + + iewStage->updateLSQNextCycle = true; + } + + DPRINTF(LSQUnit, "Completing store [sn:%lli], idx:%i, store head " + "idx:%i\n", + storeQueue[store_idx].inst->seqNum, store_idx, storeHead); + + if (isStalled() && + storeQueue[store_idx].inst->seqNum == stallingStoreIsn) { + DPRINTF(LSQUnit, "Unstalling, stalling store [sn:%lli] " + "load idx:%i\n", + stallingStoreIsn, stallingLoadIdx); + stalled = false; + stallingStoreIsn = 0; + iewStage->replayMemInst(loadQueue[stallingLoadIdx]); + } + + storeQueue[store_idx].inst->setCompleted(); + + // Tell the checker we've completed this instruction. Some stores + // may get reported twice to the checker, but the checker can + // handle that case. + if (cpu->checker) { + cpu->checker->tick(storeQueue[store_idx].inst); + } +} + +template +inline void +LSQUnit::incrStIdx(int &store_idx) +{ + if (++store_idx >= SQEntries) + store_idx = 0; +} + +template +inline void +LSQUnit::decrStIdx(int &store_idx) +{ + if (--store_idx < 0) + store_idx += SQEntries; +} + +template +inline void +LSQUnit::incrLdIdx(int &load_idx) +{ + if (++load_idx >= LQEntries) + load_idx = 0; +} + +template +inline void +LSQUnit::decrLdIdx(int &load_idx) +{ + if (--load_idx < 0) + load_idx += LQEntries; +} + +template +void +LSQUnit::dumpInsts() +{ + cprintf("Load store queue: Dumping instructions.\n"); + cprintf("Load queue size: %i\n", loads); + cprintf("Load queue: "); + + int load_idx = loadHead; + + while (load_idx != loadTail && loadQueue[load_idx]) { + cprintf("%#x ", loadQueue[load_idx]->readPC()); + + incrLdIdx(load_idx); + } + + cprintf("Store queue size: %i\n", stores); + cprintf("Store queue: "); + + int store_idx = storeHead; + + while (store_idx != storeTail && storeQueue[store_idx].inst) { + cprintf("%#x ", storeQueue[store_idx].inst->readPC()); + + incrStIdx(store_idx); + } + + cprintf("\n"); +} diff --cc src/cpu/o3/mem_dep_unit.hh index 4d763fca2,000000000..e399f0133 mode 100644,000000..100644 --- a/src/cpu/o3/mem_dep_unit.hh +++ b/src/cpu/o3/mem_dep_unit.hh @@@ -1,253 -1,0 +1,264 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_MEM_DEP_UNIT_HH__ +#define __CPU_O3_MEM_DEP_UNIT_HH__ + +#include +#include + +#include "base/hashmap.hh" +#include "base/refcnt.hh" +#include "base/statistics.hh" +#include "cpu/inst_seq.hh" + +struct SNHash { + size_t operator() (const InstSeqNum &seq_num) const { + unsigned a = (unsigned)seq_num; + unsigned hash = (((a >> 14) ^ ((a >> 2) & 0xffff))) & 0x7FFFFFFF; + + return hash; + } +}; + +template +class InstructionQueue; + +/** + * Memory dependency unit class. This holds the memory dependence predictor. + * As memory operations are issued to the IQ, they are also issued to this + * unit, which then looks up the prediction as to what they are dependent + * upon. This unit must be checked prior to a memory operation being able + * to issue. Although this is templated, it's somewhat hard to make a generic + * memory dependence unit. This one is mostly for store sets; it will be + * quite limited in what other memory dependence predictions it can also + * utilize. Thus this class should be most likely be rewritten for other + * dependence prediction schemes. + */ +template +class MemDepUnit { + public: + typedef typename Impl::Params Params; + typedef typename Impl::DynInstPtr DynInstPtr; + + /** Empty constructor. Must call init() prior to using in this case. */ + MemDepUnit() {} + + /** Constructs a MemDepUnit with given parameters. */ + MemDepUnit(Params *params); + + /** Frees up any memory allocated. */ + ~MemDepUnit(); + + /** Returns the name of the memory dependence unit. */ + std::string name() const; + + /** Initializes the unit with parameters and a thread id. */ + void init(Params *params, int tid); + + /** Registers statistics. */ + void regStats(); + ++ /** Switches out the memory dependence predictor. */ + void switchOut(); + ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); + + /** Sets the pointer to the IQ. */ + void setIQ(InstructionQueue *iq_ptr); + + /** Inserts a memory instruction. */ + void insert(DynInstPtr &inst); + + /** Inserts a non-speculative memory instruction. */ + void insertNonSpec(DynInstPtr &inst); + + /** Inserts a barrier instruction. */ + void insertBarrier(DynInstPtr &barr_inst); + + /** Indicate that an instruction has its registers ready. */ + void regsReady(DynInstPtr &inst); + + /** Indicate that a non-speculative instruction is ready. */ + void nonSpecInstReady(DynInstPtr &inst); + + /** Reschedules an instruction to be re-executed. */ + void reschedule(DynInstPtr &inst); + + /** Replays all instructions that have been rescheduled by moving them to + * the ready list. + */ + void replay(DynInstPtr &inst); + + /** Completes a memory instruction. */ + void completed(DynInstPtr &inst); + + /** Completes a barrier instruction. */ + void completeBarrier(DynInstPtr &inst); + + /** Wakes any dependents of a memory instruction. */ + void wakeDependents(DynInstPtr &inst); + + /** Squashes all instructions up until a given sequence number for a + * specific thread. + */ + void squash(const InstSeqNum &squashed_num, unsigned tid); + + /** Indicates an ordering violation between a store and a younger load. */ + void violation(DynInstPtr &store_inst, DynInstPtr &violating_load); + + /** Issues the given instruction */ + void issue(DynInstPtr &inst); + + /** Debugging function to dump the lists of instructions. */ + void dumpLists(); + + private: + typedef typename std::list::iterator ListIt; + + class MemDepEntry; + + typedef RefCountingPtr MemDepEntryPtr; + + /** Memory dependence entries that track memory operations, marking + * when the instruction is ready to execute and what instructions depend + * upon it. + */ + class MemDepEntry : public RefCounted { + public: + /** Constructs a memory dependence entry. */ + MemDepEntry(DynInstPtr &new_inst) + : inst(new_inst), regsReady(false), memDepReady(false), + completed(false), squashed(false) + { ++#ifdef DEBUG + ++memdep_count; + + DPRINTF(MemDepUnit, "Memory dependency entry created. " + "memdep_count=%i\n", memdep_count); ++#endif + } + + /** Frees any pointers. */ + ~MemDepEntry() + { + for (int i = 0; i < dependInsts.size(); ++i) { + dependInsts[i] = NULL; + } - ++#ifdef DEBUG + --memdep_count; + + DPRINTF(MemDepUnit, "Memory dependency entry deleted. " + "memdep_count=%i\n", memdep_count); ++#endif + } + + /** Returns the name of the memory dependence entry. */ + std::string name() const { return "memdepentry"; } + + /** The instruction being tracked. */ + DynInstPtr inst; + + /** The iterator to the instruction's location inside the list. */ + ListIt listIt; + + /** A vector of any dependent instructions. */ + std::vector dependInsts; + + /** If the registers are ready or not. */ + bool regsReady; + /** If all memory dependencies have been satisfied. */ + bool memDepReady; + /** If the instruction is completed. */ + bool completed; + /** If the instruction is squashed. */ + bool squashed; + + /** For debugging. */ ++#ifdef DEBUG + static int memdep_count; + static int memdep_insert; + static int memdep_erase; ++#endif + }; + + /** Finds the memory dependence entry in the hash map. */ + inline MemDepEntryPtr &findInHash(const DynInstPtr &inst); + + /** Moves an entry to the ready list. */ + inline void moveToReady(MemDepEntryPtr &ready_inst_entry); + + typedef m5::hash_map MemDepHash; + + typedef typename MemDepHash::iterator MemDepHashIt; + + /** A hash map of all memory dependence entries. */ + MemDepHash memDepHash; + + /** A list of all instructions in the memory dependence unit. */ + std::list instList[Impl::MaxThreads]; + + /** A list of all instructions that are going to be replayed. */ + std::list instsToReplay; + + /** The memory dependence predictor. It is accessed upon new + * instructions being added to the IQ, and responds by telling + * this unit what instruction the newly added instruction is dependent + * upon. + */ + MemDepPred depPred; + ++ /** Is there an outstanding load barrier that loads must wait on. */ + bool loadBarrier; ++ /** The sequence number of the load barrier. */ + InstSeqNum loadBarrierSN; ++ /** Is there an outstanding store barrier that loads must wait on. */ + bool storeBarrier; ++ /** The sequence number of the store barrier. */ + InstSeqNum storeBarrierSN; + + /** Pointer to the IQ. */ + InstructionQueue *iqPtr; + + /** The thread id of this memory dependence unit. */ + int id; + + /** Stat for number of inserted loads. */ + Stats::Scalar<> insertedLoads; + /** Stat for number of inserted stores. */ + Stats::Scalar<> insertedStores; + /** Stat for number of conflicting loads that had to wait for a store. */ + Stats::Scalar<> conflictingLoads; + /** Stat for number of conflicting stores that had to wait for a store. */ + Stats::Scalar<> conflictingStores; +}; + +#endif // __CPU_O3_MEM_DEP_UNIT_HH__ diff --cc src/cpu/o3/mem_dep_unit_impl.hh index a4fed4b0d,000000000..50ad1e2c8 mode 100644,000000..100644 --- a/src/cpu/o3/mem_dep_unit_impl.hh +++ b/src/cpu/o3/mem_dep_unit_impl.hh @@@ -1,553 -1,0 +1,557 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include + +#include "cpu/o3/inst_queue.hh" +#include "cpu/o3/mem_dep_unit.hh" + +template +MemDepUnit::MemDepUnit(Params *params) + : depPred(params->SSITSize, params->LFSTSize), loadBarrier(false), + loadBarrierSN(0), storeBarrier(false), storeBarrierSN(0), iqPtr(NULL) +{ + DPRINTF(MemDepUnit, "Creating MemDepUnit object.\n"); +} + +template +MemDepUnit::~MemDepUnit() +{ + for (int tid=0; tid < Impl::MaxThreads; tid++) { + + ListIt inst_list_it = instList[tid].begin(); + + MemDepHashIt hash_it; + + while (!instList[tid].empty()) { + hash_it = memDepHash.find((*inst_list_it)->seqNum); + + assert(hash_it != memDepHash.end()); + + memDepHash.erase(hash_it); + + instList[tid].erase(inst_list_it++); + } + } + + assert(MemDepEntry::memdep_count == 0); +} + +template +std::string +MemDepUnit::name() const +{ + return "memdepunit"; +} + +template +void +MemDepUnit::init(Params *params, int tid) +{ + DPRINTF(MemDepUnit, "Creating MemDepUnit %i object.\n",tid); + + id = tid; + + depPred.init(params->SSITSize, params->LFSTSize); +} + +template +void +MemDepUnit::regStats() +{ + insertedLoads + .name(name() + ".memDep.insertedLoads") + .desc("Number of loads inserted to the mem dependence unit."); + + insertedStores + .name(name() + ".memDep.insertedStores") + .desc("Number of stores inserted to the mem dependence unit."); + + conflictingLoads + .name(name() + ".memDep.conflictingLoads") + .desc("Number of conflicting loads."); + + conflictingStores + .name(name() + ".memDep.conflictingStores") + .desc("Number of conflicting stores."); +} + +template +void +MemDepUnit::switchOut() +{ ++ // Clear any state. + for (int i = 0; i < Impl::MaxThreads; ++i) { + instList[i].clear(); + } + instsToReplay.clear(); + memDepHash.clear(); +} + +template +void +MemDepUnit::takeOverFrom() +{ ++ // Be sure to reset all state. + loadBarrier = storeBarrier = false; + loadBarrierSN = storeBarrierSN = 0; + depPred.clear(); +} + +template +void +MemDepUnit::setIQ(InstructionQueue *iq_ptr) +{ + iqPtr = iq_ptr; +} + +template +void +MemDepUnit::insert(DynInstPtr &inst) +{ + unsigned tid = inst->threadNumber; + + MemDepEntryPtr inst_entry = new MemDepEntry(inst); + + // Add the MemDepEntry to the hash. + memDepHash.insert( + std::pair(inst->seqNum, inst_entry)); + MemDepEntry::memdep_insert++; + + instList[tid].push_back(inst); + + inst_entry->listIt = --(instList[tid].end()); + + // Check any barriers and the dependence predictor for any - // producing stores. ++ // producing memrefs/stores. + InstSeqNum producing_store; + if (inst->isLoad() && loadBarrier) { + producing_store = loadBarrierSN; + } else if (inst->isStore() && storeBarrier) { + producing_store = storeBarrierSN; + } else { + producing_store = depPred.checkInst(inst->readPC()); + } + + MemDepEntryPtr store_entry = NULL; + + // If there is a producing store, try to find the entry. + if (producing_store != 0) { + MemDepHashIt hash_it = memDepHash.find(producing_store); + + if (hash_it != memDepHash.end()) { + store_entry = (*hash_it).second; + } + } + + // If no store entry, then instruction can issue as soon as the registers + // are ready. + if (!store_entry) { + DPRINTF(MemDepUnit, "No dependency for inst PC " + "%#x [sn:%lli].\n", inst->readPC(), inst->seqNum); + + inst_entry->memDepReady = true; + + if (inst->readyToIssue()) { + inst_entry->regsReady = true; + + moveToReady(inst_entry); + } + } else { + // Otherwise make the instruction dependent on the store/barrier. + DPRINTF(MemDepUnit, "Adding to dependency list; " + "inst PC %#x is dependent on [sn:%lli].\n", + inst->readPC(), producing_store); + + if (inst->readyToIssue()) { + inst_entry->regsReady = true; + } + + // Add this instruction to the list of dependents. + store_entry->dependInsts.push_back(inst_entry); + + if (inst->isLoad()) { + ++conflictingLoads; + } else { + ++conflictingStores; + } + } + + if (inst->isStore()) { + DPRINTF(MemDepUnit, "Inserting store PC %#x [sn:%lli].\n", + inst->readPC(), inst->seqNum); + + depPred.insertStore(inst->readPC(), inst->seqNum, inst->threadNumber); + + ++insertedStores; + } else if (inst->isLoad()) { + ++insertedLoads; + } else { + panic("Unknown type! (most likely a barrier)."); + } +} + +template +void +MemDepUnit::insertNonSpec(DynInstPtr &inst) +{ + unsigned tid = inst->threadNumber; + + MemDepEntryPtr inst_entry = new MemDepEntry(inst); + + // Insert the MemDepEntry into the hash. + memDepHash.insert( + std::pair(inst->seqNum, inst_entry)); + MemDepEntry::memdep_insert++; + + // Add the instruction to the list. + instList[tid].push_back(inst); + + inst_entry->listIt = --(instList[tid].end()); + + // Might want to turn this part into an inline function or something. + // It's shared between both insert functions. + if (inst->isStore()) { + DPRINTF(MemDepUnit, "Inserting store PC %#x [sn:%lli].\n", + inst->readPC(), inst->seqNum); + + depPred.insertStore(inst->readPC(), inst->seqNum, inst->threadNumber); + + ++insertedStores; + } else if (inst->isLoad()) { + ++insertedLoads; + } else { + panic("Unknown type! (most likely a barrier)."); + } +} + +template +void +MemDepUnit::insertBarrier(DynInstPtr &barr_inst) +{ + InstSeqNum barr_sn = barr_inst->seqNum; ++ // Memory barriers block loads and stores, write barriers only stores. + if (barr_inst->isMemBarrier()) { + loadBarrier = true; + loadBarrierSN = barr_sn; + storeBarrier = true; + storeBarrierSN = barr_sn; + DPRINTF(MemDepUnit, "Inserted a memory barrier\n"); + } else if (barr_inst->isWriteBarrier()) { + storeBarrier = true; + storeBarrierSN = barr_sn; + DPRINTF(MemDepUnit, "Inserted a write barrier\n"); + } + + unsigned tid = barr_inst->threadNumber; + + MemDepEntryPtr inst_entry = new MemDepEntry(barr_inst); + + // Add the MemDepEntry to the hash. + memDepHash.insert( + std::pair(barr_sn, inst_entry)); + MemDepEntry::memdep_insert++; + + // Add the instruction to the instruction list. + instList[tid].push_back(barr_inst); + + inst_entry->listIt = --(instList[tid].end()); +} + +template +void +MemDepUnit::regsReady(DynInstPtr &inst) +{ + DPRINTF(MemDepUnit, "Marking registers as ready for " + "instruction PC %#x [sn:%lli].\n", + inst->readPC(), inst->seqNum); + + MemDepEntryPtr inst_entry = findInHash(inst); + + inst_entry->regsReady = true; + + if (inst_entry->memDepReady) { + DPRINTF(MemDepUnit, "Instruction has its memory " + "dependencies resolved, adding it to the ready list.\n"); + + moveToReady(inst_entry); + } else { + DPRINTF(MemDepUnit, "Instruction still waiting on " + "memory dependency.\n"); + } +} + +template +void +MemDepUnit::nonSpecInstReady(DynInstPtr &inst) +{ + DPRINTF(MemDepUnit, "Marking non speculative " + "instruction PC %#x as ready [sn:%lli].\n", + inst->readPC(), inst->seqNum); + + MemDepEntryPtr inst_entry = findInHash(inst); + + moveToReady(inst_entry); +} + +template +void +MemDepUnit::reschedule(DynInstPtr &inst) +{ + instsToReplay.push_back(inst); +} + +template +void +MemDepUnit::replay(DynInstPtr &inst) +{ + DynInstPtr temp_inst; + bool found_inst = false; + ++ // For now this replay function replays all waiting memory ops. + while (!instsToReplay.empty()) { + temp_inst = instsToReplay.front(); + + MemDepEntryPtr inst_entry = findInHash(temp_inst); + + DPRINTF(MemDepUnit, "Replaying mem instruction PC %#x " + "[sn:%lli].\n", + temp_inst->readPC(), temp_inst->seqNum); + + moveToReady(inst_entry); + + if (temp_inst == inst) { + found_inst = true; + } + + instsToReplay.pop_front(); + } + + assert(found_inst); +} + +template +void +MemDepUnit::completed(DynInstPtr &inst) +{ + DPRINTF(MemDepUnit, "Completed mem instruction PC %#x " + "[sn:%lli].\n", + inst->readPC(), inst->seqNum); + + unsigned tid = inst->threadNumber; + + // Remove the instruction from the hash and the list. + MemDepHashIt hash_it = memDepHash.find(inst->seqNum); + + assert(hash_it != memDepHash.end()); + + instList[tid].erase((*hash_it).second->listIt); + + (*hash_it).second = NULL; + + memDepHash.erase(hash_it); + MemDepEntry::memdep_erase++; +} + +template +void +MemDepUnit::completeBarrier(DynInstPtr &inst) +{ + wakeDependents(inst); + completed(inst); + + InstSeqNum barr_sn = inst->seqNum; + + if (inst->isMemBarrier()) { + assert(loadBarrier && storeBarrier); + if (loadBarrierSN == barr_sn) + loadBarrier = false; + if (storeBarrierSN == barr_sn) + storeBarrier = false; + } else if (inst->isWriteBarrier()) { + assert(storeBarrier); + if (storeBarrierSN == barr_sn) + storeBarrier = false; + } +} + +template +void +MemDepUnit::wakeDependents(DynInstPtr &inst) +{ + // Only stores and barriers have dependents. + if (!inst->isStore() && !inst->isMemBarrier() && !inst->isWriteBarrier()) { + return; + } + + MemDepEntryPtr inst_entry = findInHash(inst); + + for (int i = 0; i < inst_entry->dependInsts.size(); ++i ) { + MemDepEntryPtr woken_inst = inst_entry->dependInsts[i]; + + if (!woken_inst->inst) { + // Potentially removed mem dep entries could be on this list + continue; + } + + DPRINTF(MemDepUnit, "Waking up a dependent inst, " + "[sn:%lli].\n", + woken_inst->inst->seqNum); + + if (woken_inst->regsReady && !woken_inst->squashed) { + moveToReady(woken_inst); + } else { + woken_inst->memDepReady = true; + } + } + + inst_entry->dependInsts.clear(); +} + +template +void +MemDepUnit::squash(const InstSeqNum &squashed_num, + unsigned tid) +{ + if (!instsToReplay.empty()) { + ListIt replay_it = instsToReplay.begin(); + while (replay_it != instsToReplay.end()) { + if ((*replay_it)->threadNumber == tid && + (*replay_it)->seqNum > squashed_num) { + instsToReplay.erase(replay_it++); + } else { + ++replay_it; + } + } + } + + ListIt squash_it = instList[tid].end(); + --squash_it; + + MemDepHashIt hash_it; + + while (!instList[tid].empty() && + (*squash_it)->seqNum > squashed_num) { + + DPRINTF(MemDepUnit, "Squashing inst [sn:%lli]\n", + (*squash_it)->seqNum); + + hash_it = memDepHash.find((*squash_it)->seqNum); + + assert(hash_it != memDepHash.end()); + + (*hash_it).second->squashed = true; + + (*hash_it).second = NULL; + + memDepHash.erase(hash_it); + MemDepEntry::memdep_erase++; + + instList[tid].erase(squash_it--); + } + + // Tell the dependency predictor to squash as well. + depPred.squash(squashed_num, tid); +} + +template +void +MemDepUnit::violation(DynInstPtr &store_inst, + DynInstPtr &violating_load) +{ + DPRINTF(MemDepUnit, "Passing violating PCs to store sets," + " load: %#x, store: %#x\n", violating_load->readPC(), + store_inst->readPC()); + // Tell the memory dependence unit of the violation. + depPred.violation(violating_load->readPC(), store_inst->readPC()); +} + +template +void +MemDepUnit::issue(DynInstPtr &inst) +{ + DPRINTF(MemDepUnit, "Issuing instruction PC %#x [sn:%lli].\n", + inst->readPC(), inst->seqNum); + + depPred.issued(inst->readPC(), inst->seqNum, inst->isStore()); +} + +template +inline typename MemDepUnit::MemDepEntryPtr & +MemDepUnit::findInHash(const DynInstPtr &inst) +{ + MemDepHashIt hash_it = memDepHash.find(inst->seqNum); + + assert(hash_it != memDepHash.end()); + + return (*hash_it).second; +} + +template +inline void +MemDepUnit::moveToReady(MemDepEntryPtr &woken_inst_entry) +{ + DPRINTF(MemDepUnit, "Adding instruction [sn:%lli] " + "to the ready list.\n", woken_inst_entry->inst->seqNum); + + assert(!woken_inst_entry->squashed); + + iqPtr->addReadyMemInst(woken_inst_entry->inst); +} + + +template +void +MemDepUnit::dumpLists() +{ + for (unsigned tid=0; tid < Impl::MaxThreads; tid++) { + cprintf("Instruction list %i size: %i\n", + tid, instList[tid].size()); + + ListIt inst_list_it = instList[tid].begin(); + int num = 0; + + while (inst_list_it != instList[tid].end()) { + cprintf("Instruction:%i\nPC:%#x\n[sn:%i]\n[tid:%i]\nIssued:%i\n" + "Squashed:%i\n\n", + num, (*inst_list_it)->readPC(), + (*inst_list_it)->seqNum, + (*inst_list_it)->threadNumber, + (*inst_list_it)->isIssued(), + (*inst_list_it)->isSquashed()); + inst_list_it++; + ++num; + } + } + + cprintf("Memory dependence hash size: %i\n", memDepHash.size()); + + cprintf("Memory dependence entries: %i\n", MemDepEntry::memdep_count); +} diff --cc src/cpu/o3/rename.hh index 626d7cc75,000000000..42fdf6bf5 mode 100644,000000..100644 --- a/src/cpu/o3/rename.hh +++ b/src/cpu/o3/rename.hh @@@ -1,464 -1,0 +1,472 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_RENAME_HH__ +#define __CPU_O3_RENAME_HH__ + +#include + +#include "base/statistics.hh" +#include "base/timebuf.hh" + +/** + * DefaultRename handles both single threaded and SMT rename. Its + * width is specified by the parameters; each cycle it tries to rename + * that many instructions. It holds onto the rename history of all + * instructions with destination registers, storing the + * arch. register, the new physical register, and the old physical + * register, to allow for undoing of mappings if squashing happens, or + * freeing up registers upon commit. Rename handles blocking if the + * ROB, IQ, or LSQ is going to be full. Rename also handles barriers, + * and does so by stalling on the instruction until the ROB is empty + * and there are no instructions in flight to the ROB. + */ +template +class DefaultRename +{ + public: + // Typedefs from the Impl. + typedef typename Impl::CPUPol CPUPol; + typedef typename Impl::DynInstPtr DynInstPtr; + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::Params Params; + + // Typedefs from the CPUPol + typedef typename CPUPol::DecodeStruct DecodeStruct; + typedef typename CPUPol::RenameStruct RenameStruct; + typedef typename CPUPol::TimeStruct TimeStruct; + typedef typename CPUPol::FreeList FreeList; + typedef typename CPUPol::RenameMap RenameMap; + // These are used only for initialization. + typedef typename CPUPol::IEW IEW; + typedef typename CPUPol::Commit Commit; + + // Typedefs from the ISA. + typedef TheISA::RegIndex RegIndex; + + // A list is used to queue the instructions. Barrier insts must + // be added to the front of the list, which is the only reason for + // using a list instead of a queue. (Most other stages use a + // queue) + typedef std::list InstQueue; + + public: + /** Overall rename status. Used to determine if the CPU can + * deschedule itself due to a lack of activity. + */ + enum RenameStatus { + Active, + Inactive + }; + + /** Individual thread status. */ + enum ThreadStatus { + Running, + Idle, + StartSquash, + Squashing, + Blocked, + Unblocking, + SerializeStall + }; + + private: + /** Rename status. */ + RenameStatus _status; + + /** Per-thread status. */ + ThreadStatus renameStatus[Impl::MaxThreads]; + + public: + /** DefaultRename constructor. */ + DefaultRename(Params *params); + + /** Returns the name of rename. */ + std::string name() const; + + /** Registers statistics. */ + void regStats(); + + /** Sets CPU pointer. */ + void setCPU(FullCPU *cpu_ptr); + + /** Sets the main backwards communication time buffer pointer. */ + void setTimeBuffer(TimeBuffer *tb_ptr); + + /** Sets pointer to time buffer used to communicate to the next stage. */ + void setRenameQueue(TimeBuffer *rq_ptr); + + /** Sets pointer to time buffer coming from decode. */ + void setDecodeQueue(TimeBuffer *dq_ptr); + + /** Sets pointer to IEW stage. Used only for initialization. */ + void setIEWStage(IEW *iew_stage) + { iew_ptr = iew_stage; } + + /** Sets pointer to commit stage. Used only for initialization. */ + void setCommitStage(Commit *commit_stage) + { commit_ptr = commit_stage; } + + private: + /** Pointer to IEW stage. Used only for initialization. */ + IEW *iew_ptr; + + /** Pointer to commit stage. Used only for initialization. */ + Commit *commit_ptr; + + public: + /** Initializes variables for the stage. */ + void initStage(); + + /** Sets pointer to list of active threads. */ + void setActiveThreads(std::list *at_ptr); + + /** Sets pointer to rename maps (per-thread structures). */ + void setRenameMap(RenameMap rm_ptr[Impl::MaxThreads]); + + /** Sets pointer to the free list. */ + void setFreeList(FreeList *fl_ptr); + + /** Sets pointer to the scoreboard. */ + void setScoreboard(Scoreboard *_scoreboard); + ++ /** Switches out the rename stage. */ + void switchOut(); + ++ /** Completes the switch out. */ + void doSwitchOut(); + ++ /** Takes over from another CPU's thread. */ + void takeOverFrom(); + + /** Squashes all instructions in a thread. */ + void squash(unsigned tid); + + /** Ticks rename, which processes all input signals and attempts to rename + * as many instructions as possible. + */ + void tick(); + + /** Debugging function used to dump history buffer of renamings. */ + void dumpHistory(); + + private: + /** Determines what to do based on rename's current status. + * @param status_change rename() sets this variable if there was a status + * change (ie switching from blocking to unblocking). + * @param tid Thread id to rename instructions from. + */ + void rename(bool &status_change, unsigned tid); + + /** Renames instructions for the given thread. Also handles serializing + * instructions. + */ + void renameInsts(unsigned tid); + + /** Inserts unused instructions from a given thread into the skid buffer, + * to be renamed once rename unblocks. + */ + void skidInsert(unsigned tid); + + /** Separates instructions from decode into individual lists of instructions + * sorted by thread. + */ + void sortInsts(); + + /** Returns if all of the skid buffers are empty. */ + bool skidsEmpty(); + + /** Updates overall rename status based on all of the threads' statuses. */ + void updateStatus(); + + /** Switches rename to blocking, and signals back that rename has become + * blocked. + * @return Returns true if there is a status change. + */ + bool block(unsigned tid); + + /** Switches rename to unblocking if the skid buffer is empty, and signals + * back that rename has unblocked. + * @return Returns true if there is a status change. + */ + bool unblock(unsigned tid); + + /** Executes actual squash, removing squashed instructions. */ + void doSquash(unsigned tid); + + /** Removes a committed instruction's rename history. */ + void removeFromHistory(InstSeqNum inst_seq_num, unsigned tid); + + /** Renames the source registers of an instruction. */ + inline void renameSrcRegs(DynInstPtr &inst, unsigned tid); + + /** Renames the destination registers of an instruction. */ + inline void renameDestRegs(DynInstPtr &inst, unsigned tid); + + /** Calculates the number of free ROB entries for a specific thread. */ + inline int calcFreeROBEntries(unsigned tid); + + /** Calculates the number of free IQ entries for a specific thread. */ + inline int calcFreeIQEntries(unsigned tid); + + /** Calculates the number of free LSQ entries for a specific thread. */ + inline int calcFreeLSQEntries(unsigned tid); + + /** Returns the number of valid instructions coming from decode. */ + unsigned validInsts(); + + /** Reads signals telling rename to block/unblock. */ + void readStallSignals(unsigned tid); + + /** Checks if any stages are telling rename to block. */ + bool checkStall(unsigned tid); + ++ /** Gets the number of free entries for a specific thread. */ + void readFreeEntries(unsigned tid); + ++ /** Checks the signals and updates the status. */ + bool checkSignalsAndUpdate(unsigned tid); + + /** Either serializes on the next instruction available in the InstQueue, + * or records that it must serialize on the next instruction to enter + * rename. + * @param inst_list The list of younger, unprocessed instructions for the + * thread that has the serializeAfter instruction. + * @param tid The thread id. + */ + void serializeAfter(InstQueue &inst_list, unsigned tid); + + /** Holds the information for each destination register rename. It holds + * the instruction's sequence number, the arch register, the old physical + * register for that arch. register, and the new physical register. + */ + struct RenameHistory { + RenameHistory(InstSeqNum _instSeqNum, RegIndex _archReg, + PhysRegIndex _newPhysReg, PhysRegIndex _prevPhysReg) + : instSeqNum(_instSeqNum), archReg(_archReg), + newPhysReg(_newPhysReg), prevPhysReg(_prevPhysReg) + { + } + + /** The sequence number of the instruction that renamed. */ + InstSeqNum instSeqNum; + /** The architectural register index that was renamed. */ + RegIndex archReg; + /** The new physical register that the arch. register is renamed to. */ + PhysRegIndex newPhysReg; + /** The old physical register that the arch. register was renamed to. */ + PhysRegIndex prevPhysReg; + }; + + /** A per-thread list of all destination register renames, used to either + * undo rename mappings or free old physical registers. + */ + std::list historyBuffer[Impl::MaxThreads]; + + /** Pointer to CPU. */ + FullCPU *cpu; + + /** Pointer to main time buffer used for backwards communication. */ + TimeBuffer *timeBuffer; + + /** Wire to get IEW's output from backwards time buffer. */ + typename TimeBuffer::wire fromIEW; + + /** Wire to get commit's output from backwards time buffer. */ + typename TimeBuffer::wire fromCommit; + + /** Wire to write infromation heading to previous stages. */ + typename TimeBuffer::wire toDecode; + + /** Rename instruction queue. */ + TimeBuffer *renameQueue; + + /** Wire to write any information heading to IEW. */ + typename TimeBuffer::wire toIEW; + + /** Decode instruction queue interface. */ + TimeBuffer *decodeQueue; + + /** Wire to get decode's output from decode queue. */ + typename TimeBuffer::wire fromDecode; + + /** Queue of all instructions coming from decode this cycle. */ + InstQueue insts[Impl::MaxThreads]; + + /** Skid buffer between rename and decode. */ + InstQueue skidBuffer[Impl::MaxThreads]; + + /** Rename map interface. */ + RenameMap *renameMap[Impl::MaxThreads]; + + /** Free list interface. */ + FreeList *freeList; + + /** Pointer to the list of active threads. */ + std::list *activeThreads; + + /** Pointer to the scoreboard. */ + Scoreboard *scoreboard; + + /** Count of instructions in progress that have been sent off to the IQ + * and ROB, but are not yet included in their occupancy counts. + */ + int instsInProgress[Impl::MaxThreads]; + + /** Variable that tracks if decode has written to the time buffer this + * cycle. Used to tell CPU if there is activity this cycle. + */ + bool wroteToTimeBuffer; + + /** Structures whose free entries impact the amount of instructions that + * can be renamed. + */ + struct FreeEntries { + unsigned iqEntries; + unsigned lsqEntries; + unsigned robEntries; + }; + + /** Per-thread tracking of the number of free entries of back-end + * structures. + */ + FreeEntries freeEntries[Impl::MaxThreads]; + + /** Records if the ROB is empty. In SMT mode the ROB may be dynamically + * partitioned between threads, so the ROB must tell rename when it is + * empty. + */ + bool emptyROB[Impl::MaxThreads]; + + /** Source of possible stalls. */ + struct Stalls { + bool iew; + bool commit; + }; + + /** Tracks which stages are telling decode to stall. */ + Stalls stalls[Impl::MaxThreads]; + + /** The serialize instruction that rename has stalled on. */ + DynInstPtr serializeInst[Impl::MaxThreads]; + + /** Records if rename needs to serialize on the next instruction for any + * thread. + */ + bool serializeOnNextInst[Impl::MaxThreads]; + + /** Delay between iew and rename, in ticks. */ + int iewToRenameDelay; + + /** Delay between decode and rename, in ticks. */ + int decodeToRenameDelay; + + /** Delay between commit and rename, in ticks. */ + unsigned commitToRenameDelay; + + /** Rename width, in instructions. */ + unsigned renameWidth; + + /** Commit width, in instructions. Used so rename knows how many + * instructions might have freed registers in the previous cycle. + */ + unsigned commitWidth; + + /** The index of the instruction in the time buffer to IEW that rename is + * currently using. + */ + unsigned toIEWIndex; + + /** Whether or not rename needs to block this cycle. */ + bool blockThisCycle; + + /** The number of threads active in rename. */ + unsigned numThreads; + + /** The maximum skid buffer size. */ + unsigned skidBufferMax; + + /** Enum to record the source of a structure full stall. Can come from + * either ROB, IQ, LSQ, and it is priortized in that order. + */ + enum FullSource { + ROB, + IQ, + LSQ, + NONE + }; + + /** Function used to increment the stat that corresponds to the source of + * the stall. + */ + inline void incrFullStat(const FullSource &source); + + /** Stat for total number of cycles spent squashing. */ + Stats::Scalar<> renameSquashCycles; + /** Stat for total number of cycles spent idle. */ + Stats::Scalar<> renameIdleCycles; + /** Stat for total number of cycles spent blocking. */ + Stats::Scalar<> renameBlockCycles; + /** Stat for total number of cycles spent stalling for a serializing inst. */ + Stats::Scalar<> renameSerializeStallCycles; + /** Stat for total number of cycles spent running normally. */ + Stats::Scalar<> renameRunCycles; + /** Stat for total number of cycles spent unblocking. */ + Stats::Scalar<> renameUnblockCycles; + /** Stat for total number of renamed instructions. */ + Stats::Scalar<> renameRenamedInsts; + /** Stat for total number of squashed instructions that rename discards. */ + Stats::Scalar<> renameSquashedInsts; + /** Stat for total number of times that the ROB starts a stall in rename. */ + Stats::Scalar<> renameROBFullEvents; + /** Stat for total number of times that the IQ starts a stall in rename. */ + Stats::Scalar<> renameIQFullEvents; + /** Stat for total number of times that the LSQ starts a stall in rename. */ + Stats::Scalar<> renameLSQFullEvents; + /** Stat for total number of times that rename runs out of free registers + * to use to rename. */ + Stats::Scalar<> renameFullRegistersEvents; + /** Stat for total number of renamed destination registers. */ + Stats::Scalar<> renameRenamedOperands; + /** Stat for total number of source register rename lookups. */ + Stats::Scalar<> renameRenameLookups; + /** Stat for total number of committed renaming mappings. */ + Stats::Scalar<> renameCommittedMaps; + /** Stat for total number of mappings that were undone due to a squash. */ + Stats::Scalar<> renameUndoneMaps; ++ /** Number of serialize instructions handled. */ + Stats::Scalar<> renamedSerializing; ++ /** Number of instructions marked as temporarily serializing. */ + Stats::Scalar<> renamedTempSerializing; ++ /** Number of instructions inserted into skid buffers. */ + Stats::Scalar<> renameSkidInsts; +}; + +#endif // __CPU_O3_RENAME_HH__ diff --cc src/cpu/o3/rename_impl.hh index 9cbe1a770,000000000..8e70c90f7 mode 100644,000000..100644 --- a/src/cpu/o3/rename_impl.hh +++ b/src/cpu/o3/rename_impl.hh @@@ -1,1281 -1,0 +1,1283 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#include + +#include "config/full_system.hh" +#include "cpu/o3/rename.hh" + +using namespace std; + +template +DefaultRename::DefaultRename(Params *params) + : iewToRenameDelay(params->iewToRenameDelay), + decodeToRenameDelay(params->decodeToRenameDelay), + commitToRenameDelay(params->commitToRenameDelay), + renameWidth(params->renameWidth), + commitWidth(params->commitWidth), + numThreads(params->numberOfThreads) +{ + _status = Inactive; + + for (int i=0; i< numThreads; i++) { + renameStatus[i] = Idle; + + freeEntries[i].iqEntries = 0; + freeEntries[i].lsqEntries = 0; + freeEntries[i].robEntries = 0; + + stalls[i].iew = false; + stalls[i].commit = false; + serializeInst[i] = NULL; + + instsInProgress[i] = 0; + + emptyROB[i] = true; + + serializeOnNextInst[i] = false; + } + + // @todo: Make into a parameter. + skidBufferMax = (2 * (iewToRenameDelay * params->decodeWidth)) + renameWidth; +} + +template +std::string +DefaultRename::name() const +{ + return cpu->name() + ".rename"; +} + +template +void +DefaultRename::regStats() +{ + renameSquashCycles + .name(name() + ".RENAME:SquashCycles") + .desc("Number of cycles rename is squashing") + .prereq(renameSquashCycles); + renameIdleCycles + .name(name() + ".RENAME:IdleCycles") + .desc("Number of cycles rename is idle") + .prereq(renameIdleCycles); + renameBlockCycles + .name(name() + ".RENAME:BlockCycles") + .desc("Number of cycles rename is blocking") + .prereq(renameBlockCycles); + renameSerializeStallCycles + .name(name() + ".RENAME:serializeStallCycles") + .desc("count of cycles rename stalled for serializing inst") + .flags(Stats::total); + renameRunCycles + .name(name() + ".RENAME:RunCycles") + .desc("Number of cycles rename is running") + .prereq(renameIdleCycles); + renameUnblockCycles + .name(name() + ".RENAME:UnblockCycles") + .desc("Number of cycles rename is unblocking") + .prereq(renameUnblockCycles); + renameRenamedInsts + .name(name() + ".RENAME:RenamedInsts") + .desc("Number of instructions processed by rename") + .prereq(renameRenamedInsts); + renameSquashedInsts + .name(name() + ".RENAME:SquashedInsts") + .desc("Number of squashed instructions processed by rename") + .prereq(renameSquashedInsts); + renameROBFullEvents + .name(name() + ".RENAME:ROBFullEvents") + .desc("Number of times rename has blocked due to ROB full") + .prereq(renameROBFullEvents); + renameIQFullEvents + .name(name() + ".RENAME:IQFullEvents") + .desc("Number of times rename has blocked due to IQ full") + .prereq(renameIQFullEvents); + renameLSQFullEvents + .name(name() + ".RENAME:LSQFullEvents") + .desc("Number of times rename has blocked due to LSQ full") + .prereq(renameLSQFullEvents); + renameFullRegistersEvents + .name(name() + ".RENAME:FullRegisterEvents") + .desc("Number of times there has been no free registers") + .prereq(renameFullRegistersEvents); + renameRenamedOperands + .name(name() + ".RENAME:RenamedOperands") + .desc("Number of destination operands rename has renamed") + .prereq(renameRenamedOperands); + renameRenameLookups + .name(name() + ".RENAME:RenameLookups") + .desc("Number of register rename lookups that rename has made") + .prereq(renameRenameLookups); + renameCommittedMaps + .name(name() + ".RENAME:CommittedMaps") + .desc("Number of HB maps that are committed") + .prereq(renameCommittedMaps); + renameUndoneMaps + .name(name() + ".RENAME:UndoneMaps") + .desc("Number of HB maps that are undone due to squashing") + .prereq(renameUndoneMaps); + renamedSerializing + .name(name() + ".RENAME:serializingInsts") + .desc("count of serializing insts renamed") + .flags(Stats::total) + ; + renamedTempSerializing + .name(name() + ".RENAME:tempSerializingInsts") + .desc("count of temporary serializing insts renamed") + .flags(Stats::total) + ; + renameSkidInsts + .name(name() + ".RENAME:skidInsts") + .desc("count of insts added to the skid buffer") + .flags(Stats::total) + ; +} + +template +void +DefaultRename::setCPU(FullCPU *cpu_ptr) +{ + DPRINTF(Rename, "Setting CPU pointer.\n"); + cpu = cpu_ptr; +} + +template +void +DefaultRename::setTimeBuffer(TimeBuffer *tb_ptr) +{ + DPRINTF(Rename, "Setting time buffer pointer.\n"); + timeBuffer = tb_ptr; + + // Setup wire to read information from time buffer, from IEW stage. + fromIEW = timeBuffer->getWire(-iewToRenameDelay); + + // Setup wire to read infromation from time buffer, from commit stage. + fromCommit = timeBuffer->getWire(-commitToRenameDelay); + + // Setup wire to write information to previous stages. + toDecode = timeBuffer->getWire(0); +} + +template +void +DefaultRename::setRenameQueue(TimeBuffer *rq_ptr) +{ + DPRINTF(Rename, "Setting rename queue pointer.\n"); + renameQueue = rq_ptr; + + // Setup wire to write information to future stages. + toIEW = renameQueue->getWire(0); +} + +template +void +DefaultRename::setDecodeQueue(TimeBuffer *dq_ptr) +{ + DPRINTF(Rename, "Setting decode queue pointer.\n"); + decodeQueue = dq_ptr; + + // Setup wire to get information from decode. + fromDecode = decodeQueue->getWire(-decodeToRenameDelay); +} + +template +void +DefaultRename::initStage() +{ + // Grab the number of free entries directly from the stages. + for (int tid=0; tid < numThreads; tid++) { + freeEntries[tid].iqEntries = iew_ptr->instQueue.numFreeEntries(tid); + freeEntries[tid].lsqEntries = iew_ptr->ldstQueue.numFreeEntries(tid); + freeEntries[tid].robEntries = commit_ptr->numROBFreeEntries(tid); + emptyROB[tid] = true; + } +} + +template +void +DefaultRename::setActiveThreads(list *at_ptr) +{ + DPRINTF(Rename, "Setting active threads list pointer.\n"); + activeThreads = at_ptr; +} + + +template +void +DefaultRename::setRenameMap(RenameMap rm_ptr[]) +{ + DPRINTF(Rename, "Setting rename map pointers.\n"); + + for (int i=0; i +void +DefaultRename::setFreeList(FreeList *fl_ptr) +{ + DPRINTF(Rename, "Setting free list pointer.\n"); + freeList = fl_ptr; +} + +template +void +DefaultRename::setScoreboard(Scoreboard *_scoreboard) +{ + DPRINTF(Rename, "Setting scoreboard pointer.\n"); + scoreboard = _scoreboard; +} + +template +void +DefaultRename::switchOut() +{ ++ // Rename is ready to switch out at any time. + cpu->signalSwitched(); +} + +template +void +DefaultRename::doSwitchOut() +{ ++ // Clear any state, fix up the rename map. + for (int i = 0; i < numThreads; i++) { + typename list::iterator hb_it = historyBuffer[i].begin(); + + while (!historyBuffer[i].empty()) { + assert(hb_it != historyBuffer[i].end()); + + DPRINTF(Rename, "[tid:%u]: Removing history entry with sequence " + "number %i.\n", i, (*hb_it).instSeqNum); + + // Tell the rename map to set the architected register to the + // previous physical register that it was renamed to. + renameMap[i]->setEntry(hb_it->archReg, hb_it->prevPhysReg); + + // Put the renamed physical register back on the free list. + freeList->addReg(hb_it->newPhysReg); + + historyBuffer[i].erase(hb_it++); + } + insts[i].clear(); + skidBuffer[i].clear(); + } +} + +template +void +DefaultRename::takeOverFrom() +{ + _status = Inactive; + initStage(); + + // Reset all state prior to taking over from the other CPU. + for (int i=0; i< numThreads; i++) { + renameStatus[i] = Idle; + + stalls[i].iew = false; + stalls[i].commit = false; + serializeInst[i] = NULL; + + instsInProgress[i] = 0; + + emptyROB[i] = true; + + serializeOnNextInst[i] = false; + } +} + +template +void +DefaultRename::squash(unsigned tid) +{ + DPRINTF(Rename, "[tid:%u]: Squashing instructions.\n",tid); + + // Clear the stall signal if rename was blocked or unblocking before. + // If it still needs to block, the blocking should happen the next + // cycle and there should be space to hold everything due to the squash. + if (renameStatus[tid] == Blocked || + renameStatus[tid] == Unblocking || + renameStatus[tid] == SerializeStall) { +#if 0 + // In syscall emulation, we can have both a block and a squash due + // to a syscall in the same cycle. This would cause both signals to + // be high. This shouldn't happen in full system. + if (toDecode->renameBlock[tid]) { + toDecode->renameBlock[tid] = 0; + } else { + toDecode->renameUnblock[tid] = 1; + } +#else + toDecode->renameUnblock[tid] = 1; +#endif + serializeInst[tid] = NULL; + } + + // Set the status to Squashing. + renameStatus[tid] = Squashing; + + // Squash any instructions from decode. + unsigned squashCount = 0; + + for (int i=0; isize; i++) { + if (fromDecode->insts[i]->threadNumber == tid) { + fromDecode->insts[i]->squashed = true; + wroteToTimeBuffer = true; + squashCount++; + } + } + + insts[tid].clear(); + + // Clear the skid buffer in case it has any data in it. + skidBuffer[tid].clear(); + + doSquash(tid); +} + +template +void +DefaultRename::tick() +{ + wroteToTimeBuffer = false; + + blockThisCycle = false; + + bool status_change = false; + + toIEWIndex = 0; + + sortInsts(); + + list::iterator threads = (*activeThreads).begin(); + + // Check stall and squash signals. + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + DPRINTF(Rename, "Processing [tid:%i]\n", tid); + + status_change = checkSignalsAndUpdate(tid) || status_change; + + rename(status_change, tid); + } + + if (status_change) { + updateStatus(); + } + + if (wroteToTimeBuffer) { + DPRINTF(Activity, "Activity this cycle.\n"); + cpu->activityThisCycle(); + } + + threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + // If we committed this cycle then doneSeqNum will be > 0 + if (fromCommit->commitInfo[tid].doneSeqNum != 0 && + !fromCommit->commitInfo[tid].squash && + renameStatus[tid] != Squashing) { + + removeFromHistory(fromCommit->commitInfo[tid].doneSeqNum, + tid); + } + } + + // @todo: make into updateProgress function + for (int tid=0; tid < numThreads; tid++) { + instsInProgress[tid] -= fromIEW->iewInfo[tid].dispatched; + + assert(instsInProgress[tid] >=0); + } + +} + +template +void +DefaultRename::rename(bool &status_change, unsigned tid) +{ + // If status is Running or idle, + // call renameInsts() + // If status is Unblocking, + // buffer any instructions coming from decode + // continue trying to empty skid buffer + // check if stall conditions have passed + + if (renameStatus[tid] == Blocked) { + ++renameBlockCycles; + } else if (renameStatus[tid] == Squashing) { + ++renameSquashCycles; + } else if (renameStatus[tid] == SerializeStall) { + ++renameSerializeStallCycles; + } + + if (renameStatus[tid] == Running || + renameStatus[tid] == Idle) { + DPRINTF(Rename, "[tid:%u]: Not blocked, so attempting to run " + "stage.\n", tid); + + renameInsts(tid); + } else if (renameStatus[tid] == Unblocking) { + renameInsts(tid); + + if (validInsts()) { + // Add the current inputs to the skid buffer so they can be + // reprocessed when this stage unblocks. + skidInsert(tid); + } + + // If we switched over to blocking, then there's a potential for + // an overall status change. + status_change = unblock(tid) || status_change || blockThisCycle; + } +} + +template +void +DefaultRename::renameInsts(unsigned tid) +{ + // Instructions can be either in the skid buffer or the queue of + // instructions coming from decode, depending on the status. + int insts_available = renameStatus[tid] == Unblocking ? + skidBuffer[tid].size() : insts[tid].size(); + + // Check the decode queue to see if instructions are available. + // If there are no available instructions to rename, then do nothing. + if (insts_available == 0) { + DPRINTF(Rename, "[tid:%u]: Nothing to do, breaking out early.\n", + tid); + // Should I change status to idle? + ++renameIdleCycles; + return; + } else if (renameStatus[tid] == Unblocking) { + ++renameUnblockCycles; + } else if (renameStatus[tid] == Running) { + ++renameRunCycles; + } + + DynInstPtr inst; + + // Will have to do a different calculation for the number of free + // entries. + int free_rob_entries = calcFreeROBEntries(tid); + int free_iq_entries = calcFreeIQEntries(tid); + int free_lsq_entries = calcFreeLSQEntries(tid); + int min_free_entries = free_rob_entries; + + FullSource source = ROB; + + if (free_iq_entries < min_free_entries) { + min_free_entries = free_iq_entries; + source = IQ; + } + + if (free_lsq_entries < min_free_entries) { + min_free_entries = free_lsq_entries; + source = LSQ; + } + + // Check if there's any space left. + if (min_free_entries <= 0) { + DPRINTF(Rename, "[tid:%u]: Blocking due to no free ROB/IQ/LSQ " + "entries.\n" + "ROB has %i free entries.\n" + "IQ has %i free entries.\n" + "LSQ has %i free entries.\n", + tid, + free_rob_entries, + free_iq_entries, + free_lsq_entries); + + blockThisCycle = true; + + block(tid); + + incrFullStat(source); + + return; + } else if (min_free_entries < insts_available) { + DPRINTF(Rename, "[tid:%u]: Will have to block this cycle." + "%i insts available, but only %i insts can be " + "renamed due to ROB/IQ/LSQ limits.\n", + tid, insts_available, min_free_entries); + + insts_available = min_free_entries; + + blockThisCycle = true; + + incrFullStat(source); + } + + InstQueue &insts_to_rename = renameStatus[tid] == Unblocking ? + skidBuffer[tid] : insts[tid]; + + DPRINTF(Rename, "[tid:%u]: %i available instructions to " + "send iew.\n", tid, insts_available); + + DPRINTF(Rename, "[tid:%u]: %i insts pipelining from Rename | %i insts " + "dispatched to IQ last cycle.\n", + tid, instsInProgress[tid], fromIEW->iewInfo[tid].dispatched); + + // Handle serializing the next instruction if necessary. + if (serializeOnNextInst[tid]) { + if (emptyROB[tid] && instsInProgress[tid] == 0) { + // ROB already empty; no need to serialize. + serializeOnNextInst[tid] = false; + } else if (!insts_to_rename.empty()) { + insts_to_rename.front()->setSerializeBefore(); + } + } + + int renamed_insts = 0; + + while (insts_available > 0 && toIEWIndex < renameWidth) { + DPRINTF(Rename, "[tid:%u]: Sending instructions to IEW.\n", tid); + + assert(!insts_to_rename.empty()); + + inst = insts_to_rename.front(); + + insts_to_rename.pop_front(); + + if (renameStatus[tid] == Unblocking) { + DPRINTF(Rename,"[tid:%u]: Removing [sn:%lli] PC:%#x from rename " + "skidBuffer\n", + tid, inst->seqNum, inst->readPC()); + } + + if (inst->isSquashed()) { + DPRINTF(Rename, "[tid:%u]: instruction %i with PC %#x is " + "squashed, skipping.\n", + tid, inst->seqNum, inst->threadNumber,inst->readPC()); + + ++renameSquashedInsts; + + // Decrement how many instructions are available. + --insts_available; + + continue; + } + + DPRINTF(Rename, "[tid:%u]: Processing instruction [sn:%lli] with " + "PC %#x.\n", + tid, inst->seqNum, inst->readPC()); + + // Handle serializeAfter/serializeBefore instructions. + // serializeAfter marks the next instruction as serializeBefore. + // serializeBefore makes the instruction wait in rename until the ROB + // is empty. + + // In this model, IPR accesses are serialize before + // instructions, and store conditionals are serialize after + // instructions. This is mainly due to lack of support for + // out-of-order operations of either of those classes of + // instructions. + if ((inst->isIprAccess() || inst->isSerializeBefore()) && + !inst->isSerializeHandled()) { + DPRINTF(Rename, "Serialize before instruction encountered.\n"); + + if (!inst->isTempSerializeBefore()) { + renamedSerializing++; + inst->setSerializeHandled(); + } else { + renamedTempSerializing++; + } + + // Change status over to SerializeStall so that other stages know + // what this is blocked on. + renameStatus[tid] = SerializeStall; + + serializeInst[tid] = inst; + + blockThisCycle = true; + + break; + } else if ((inst->isStoreConditional() || inst->isSerializeAfter()) && + !inst->isSerializeHandled()) { + DPRINTF(Rename, "Serialize after instruction encountered.\n"); + + renamedSerializing++; + + inst->setSerializeHandled(); + + serializeAfter(insts_to_rename, tid); + } + + // Check here to make sure there are enough destination registers + // to rename to. Otherwise block. + if (renameMap[tid]->numFreeEntries() < inst->numDestRegs()) { + DPRINTF(Rename, "Blocking due to lack of free " + "physical registers to rename to.\n"); + blockThisCycle = true; + + ++renameFullRegistersEvents; + + break; + } + + renameSrcRegs(inst, inst->threadNumber); + + renameDestRegs(inst, inst->threadNumber); + + ++renamed_insts; + + // Put instruction in rename queue. + toIEW->insts[toIEWIndex] = inst; + ++(toIEW->size); + + // Increment which instruction we're on. + ++toIEWIndex; + + // Decrement how many instructions are available. + --insts_available; + } + + instsInProgress[tid] += renamed_insts; + renameRenamedInsts += renamed_insts; + + // If we wrote to the time buffer, record this. + if (toIEWIndex) { + wroteToTimeBuffer = true; + } + + // Check if there's any instructions left that haven't yet been renamed. + // If so then block. + if (insts_available) { + blockThisCycle = true; + } + + if (blockThisCycle) { + block(tid); + toDecode->renameUnblock[tid] = false; + } +} + +template +void +DefaultRename::skidInsert(unsigned tid) +{ + DynInstPtr inst = NULL; + + while (!insts[tid].empty()) { + inst = insts[tid].front(); + + insts[tid].pop_front(); + + assert(tid == inst->threadNumber); + + DPRINTF(Rename, "[tid:%u]: Inserting [sn:%lli] PC:%#x into Rename " + "skidBuffer\n", tid, inst->seqNum, inst->readPC()); + + ++renameSkidInsts; + + skidBuffer[tid].push_back(inst); + } + + if (skidBuffer[tid].size() > skidBufferMax) + panic("Skidbuffer Exceeded Max Size"); +} + +template +void +DefaultRename::sortInsts() +{ + int insts_from_decode = fromDecode->size; +#ifdef DEBUG + for (int i=0; i < numThreads; i++) + assert(insts[i].empty()); +#endif + for (int i = 0; i < insts_from_decode; ++i) { + DynInstPtr inst = fromDecode->insts[i]; + insts[inst->threadNumber].push_back(inst); + } +} + +template +bool +DefaultRename::skidsEmpty() +{ + list::iterator threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + if (!skidBuffer[*threads++].empty()) + return false; + } + + return true; +} + +template +void +DefaultRename::updateStatus() +{ + bool any_unblocking = false; + + list::iterator threads = (*activeThreads).begin(); + + threads = (*activeThreads).begin(); + + while (threads != (*activeThreads).end()) { + unsigned tid = *threads++; + + if (renameStatus[tid] == Unblocking) { + any_unblocking = true; + break; + } + } + + // Rename will have activity if it's unblocking. + if (any_unblocking) { + if (_status == Inactive) { + _status = Active; + + DPRINTF(Activity, "Activating stage.\n"); + + cpu->activateStage(FullCPU::RenameIdx); + } + } else { + // If it's not unblocking, then rename will not have any internal + // activity. Switch it to inactive. + if (_status == Active) { + _status = Inactive; + DPRINTF(Activity, "Deactivating stage.\n"); + + cpu->deactivateStage(FullCPU::RenameIdx); + } + } +} + +template +bool +DefaultRename::block(unsigned tid) +{ + DPRINTF(Rename, "[tid:%u]: Blocking.\n", tid); + + // Add the current inputs onto the skid buffer, so they can be + // reprocessed when this stage unblocks. + skidInsert(tid); + + // Only signal backwards to block if the previous stages do not think + // rename is already blocked. + if (renameStatus[tid] != Blocked) { + if (renameStatus[tid] != Unblocking) { + toDecode->renameBlock[tid] = true; + toDecode->renameUnblock[tid] = false; + wroteToTimeBuffer = true; + } + + // Rename can not go from SerializeStall to Blocked, otherwise + // it would not know to complete the serialize stall. + if (renameStatus[tid] != SerializeStall) { + // Set status to Blocked. + renameStatus[tid] = Blocked; + return true; + } + } + + return false; +} + +template +bool +DefaultRename::unblock(unsigned tid) +{ + DPRINTF(Rename, "[tid:%u]: Trying to unblock.\n", tid); + + // Rename is done unblocking if the skid buffer is empty. + if (skidBuffer[tid].empty() && renameStatus[tid] != SerializeStall) { + + DPRINTF(Rename, "[tid:%u]: Done unblocking.\n", tid); + + toDecode->renameUnblock[tid] = true; + wroteToTimeBuffer = true; + + renameStatus[tid] = Running; + return true; + } + + return false; +} + +template +void +DefaultRename::doSquash(unsigned tid) +{ + typename list::iterator hb_it = historyBuffer[tid].begin(); + + InstSeqNum squashed_seq_num = fromCommit->commitInfo[tid].doneSeqNum; + + // After a syscall squashes everything, the history buffer may be empty + // but the ROB may still be squashing instructions. + if (historyBuffer[tid].empty()) { + return; + } + + // Go through the most recent instructions, undoing the mappings + // they did and freeing up the registers. + while (!historyBuffer[tid].empty() && + (*hb_it).instSeqNum > squashed_seq_num) { + assert(hb_it != historyBuffer[tid].end()); + + DPRINTF(Rename, "[tid:%u]: Removing history entry with sequence " + "number %i.\n", tid, (*hb_it).instSeqNum); + + // Tell the rename map to set the architected register to the + // previous physical register that it was renamed to. + renameMap[tid]->setEntry(hb_it->archReg, hb_it->prevPhysReg); + + // Put the renamed physical register back on the free list. + freeList->addReg(hb_it->newPhysReg); + + historyBuffer[tid].erase(hb_it++); + + ++renameUndoneMaps; + } +} + +template +void +DefaultRename::removeFromHistory(InstSeqNum inst_seq_num, unsigned tid) +{ + DPRINTF(Rename, "[tid:%u]: Removing a committed instruction from the " + "history buffer %u (size=%i), until [sn:%lli].\n", + tid, tid, historyBuffer[tid].size(), inst_seq_num); + + typename list::iterator hb_it = historyBuffer[tid].end(); + + --hb_it; + + if (historyBuffer[tid].empty()) { + DPRINTF(Rename, "[tid:%u]: History buffer is empty.\n", tid); + return; + } else if (hb_it->instSeqNum > inst_seq_num) { + DPRINTF(Rename, "[tid:%u]: Old sequence number encountered. Ensure " + "that a syscall happened recently.\n", tid); + return; + } + + // Commit all the renames up until (and including) the committed sequence + // number. Some or even all of the committed instructions may not have + // rename histories if they did not have destination registers that were + // renamed. + while (!historyBuffer[tid].empty() && + hb_it != historyBuffer[tid].end() && + (*hb_it).instSeqNum <= inst_seq_num) { + + DPRINTF(Rename, "[tid:%u]: Freeing up older rename of reg %i, " + "[sn:%lli].\n", + tid, (*hb_it).prevPhysReg, (*hb_it).instSeqNum); + + freeList->addReg((*hb_it).prevPhysReg); + ++renameCommittedMaps; + + historyBuffer[tid].erase(hb_it--); + } +} + +template +inline void +DefaultRename::renameSrcRegs(DynInstPtr &inst,unsigned tid) +{ + assert(renameMap[tid] != 0); + + unsigned num_src_regs = inst->numSrcRegs(); + + // Get the architectual register numbers from the source and + // destination operands, and redirect them to the right register. + // Will need to mark dependencies though. + for (int src_idx = 0; src_idx < num_src_regs; src_idx++) { + RegIndex src_reg = inst->srcRegIdx(src_idx); + + // Look up the source registers to get the phys. register they've + // been renamed to, and set the sources to those registers. + PhysRegIndex renamed_reg = renameMap[tid]->lookup(src_reg); + + DPRINTF(Rename, "[tid:%u]: Looking up arch reg %i, got " + "physical reg %i.\n", tid, (int)src_reg, + (int)renamed_reg); + + inst->renameSrcReg(src_idx, renamed_reg); + + // See if the register is ready or not. + if (scoreboard->getReg(renamed_reg) == true) { + DPRINTF(Rename, "[tid:%u]: Register is ready.\n", tid); + + inst->markSrcRegReady(src_idx); + } + + ++renameRenameLookups; + } +} + +template +inline void +DefaultRename::renameDestRegs(DynInstPtr &inst,unsigned tid) +{ + typename RenameMap::RenameInfo rename_result; + + unsigned num_dest_regs = inst->numDestRegs(); + + // Rename the destination registers. + for (int dest_idx = 0; dest_idx < num_dest_regs; dest_idx++) { + RegIndex dest_reg = inst->destRegIdx(dest_idx); + + // Get the physical register that the destination will be + // renamed to. + rename_result = renameMap[tid]->rename(dest_reg); + + //Mark Scoreboard entry as not ready + scoreboard->unsetReg(rename_result.first); + + DPRINTF(Rename, "[tid:%u]: Renaming arch reg %i to physical " + "reg %i.\n", tid, (int)dest_reg, + (int)rename_result.first); + + // Record the rename information so that a history can be kept. + RenameHistory hb_entry(inst->seqNum, dest_reg, + rename_result.first, + rename_result.second); + + historyBuffer[tid].push_front(hb_entry); + + DPRINTF(Rename, "[tid:%u]: Adding instruction to history buffer, " + "[sn:%lli].\n",tid, + (*historyBuffer[tid].begin()).instSeqNum); + + // Tell the instruction to rename the appropriate destination + // register (dest_idx) to the new physical register + // (rename_result.first), and record the previous physical + // register that the same logical register was renamed to + // (rename_result.second). + inst->renameDestReg(dest_idx, + rename_result.first, + rename_result.second); + + ++renameRenamedOperands; + } +} + +template +inline int +DefaultRename::calcFreeROBEntries(unsigned tid) +{ + int num_free = freeEntries[tid].robEntries - + (instsInProgress[tid] - fromIEW->iewInfo[tid].dispatched); + + //DPRINTF(Rename,"[tid:%i]: %i rob free\n",tid,num_free); + + return num_free; +} + +template +inline int +DefaultRename::calcFreeIQEntries(unsigned tid) +{ + int num_free = freeEntries[tid].iqEntries - + (instsInProgress[tid] - fromIEW->iewInfo[tid].dispatched); + + //DPRINTF(Rename,"[tid:%i]: %i iq free\n",tid,num_free); + + return num_free; +} + +template +inline int +DefaultRename::calcFreeLSQEntries(unsigned tid) +{ + int num_free = freeEntries[tid].lsqEntries - + (instsInProgress[tid] - fromIEW->iewInfo[tid].dispatchedToLSQ); + + //DPRINTF(Rename,"[tid:%i]: %i lsq free\n",tid,num_free); + + return num_free; +} + +template +unsigned +DefaultRename::validInsts() +{ + unsigned inst_count = 0; + + for (int i=0; isize; i++) { + if (!fromDecode->insts[i]->squashed) + inst_count++; + } + + return inst_count; +} + +template +void +DefaultRename::readStallSignals(unsigned tid) +{ + if (fromIEW->iewBlock[tid]) { + stalls[tid].iew = true; + } + + if (fromIEW->iewUnblock[tid]) { + assert(stalls[tid].iew); + stalls[tid].iew = false; + } + + if (fromCommit->commitBlock[tid]) { + stalls[tid].commit = true; + } + + if (fromCommit->commitUnblock[tid]) { + assert(stalls[tid].commit); + stalls[tid].commit = false; + } +} + +template +bool +DefaultRename::checkStall(unsigned tid) +{ + bool ret_val = false; + + if (stalls[tid].iew) { + DPRINTF(Rename,"[tid:%i]: Stall from IEW stage detected.\n", tid); + ret_val = true; + } else if (stalls[tid].commit) { + DPRINTF(Rename,"[tid:%i]: Stall from Commit stage detected.\n", tid); + ret_val = true; + } else if (calcFreeROBEntries(tid) <= 0) { + DPRINTF(Rename,"[tid:%i]: Stall: ROB has 0 free entries.\n", tid); + ret_val = true; + } else if (calcFreeIQEntries(tid) <= 0) { + DPRINTF(Rename,"[tid:%i]: Stall: IQ has 0 free entries.\n", tid); + ret_val = true; + } else if (calcFreeLSQEntries(tid) <= 0) { + DPRINTF(Rename,"[tid:%i]: Stall: LSQ has 0 free entries.\n", tid); + ret_val = true; + } else if (renameMap[tid]->numFreeEntries() <= 0) { + DPRINTF(Rename,"[tid:%i]: Stall: RenameMap has 0 free entries.\n", tid); + ret_val = true; + } else if (renameStatus[tid] == SerializeStall && + (!emptyROB[tid] || instsInProgress[tid])) { + DPRINTF(Rename,"[tid:%i]: Stall: Serialize stall and ROB is not " + "empty.\n", + tid); + ret_val = true; + } + + return ret_val; +} + +template +void +DefaultRename::readFreeEntries(unsigned tid) +{ + bool updated = false; + if (fromIEW->iewInfo[tid].usedIQ) { + freeEntries[tid].iqEntries = + fromIEW->iewInfo[tid].freeIQEntries; + updated = true; + } + + if (fromIEW->iewInfo[tid].usedLSQ) { + freeEntries[tid].lsqEntries = + fromIEW->iewInfo[tid].freeLSQEntries; + updated = true; + } + + if (fromCommit->commitInfo[tid].usedROB) { + freeEntries[tid].robEntries = + fromCommit->commitInfo[tid].freeROBEntries; + emptyROB[tid] = fromCommit->commitInfo[tid].emptyROB; + updated = true; + } + + DPRINTF(Rename, "[tid:%i]: Free IQ: %i, Free ROB: %i, Free LSQ: %i\n", + tid, + freeEntries[tid].iqEntries, + freeEntries[tid].robEntries, + freeEntries[tid].lsqEntries); + + DPRINTF(Rename, "[tid:%i]: %i instructions not yet in ROB\n", + tid, instsInProgress[tid]); +} + +template +bool +DefaultRename::checkSignalsAndUpdate(unsigned tid) +{ + // Check if there's a squash signal, squash if there is + // Check stall signals, block if necessary. + // If status was blocked + // check if stall conditions have passed + // if so then go to unblocking + // If status was Squashing + // check if squashing is not high. Switch to running this cycle. + // If status was serialize stall + // check if ROB is empty and no insts are in flight to the ROB + + readFreeEntries(tid); + readStallSignals(tid); + + if (fromCommit->commitInfo[tid].squash) { + DPRINTF(Rename, "[tid:%u]: Squashing instructions due to squash from " + "commit.\n", tid); + + squash(tid); + + return true; + } + + if (fromCommit->commitInfo[tid].robSquashing) { + DPRINTF(Rename, "[tid:%u]: ROB is still squashing.\n", tid); + + renameStatus[tid] = Squashing; + + return true; + } + + if (checkStall(tid)) { + return block(tid); + } + + if (renameStatus[tid] == Blocked) { + DPRINTF(Rename, "[tid:%u]: Done blocking, switching to unblocking.\n", + tid); + + renameStatus[tid] = Unblocking; + + unblock(tid); + + return true; + } + + if (renameStatus[tid] == Squashing) { + // Switch status to running if rename isn't being told to block or + // squash this cycle. + DPRINTF(Rename, "[tid:%u]: Done squashing, switching to running.\n", + tid); + + renameStatus[tid] = Running; + + return false; + } + + if (renameStatus[tid] == SerializeStall) { + // Stall ends once the ROB is free. + DPRINTF(Rename, "[tid:%u]: Done with serialize stall, switching to " + "unblocking.\n", tid); + + DynInstPtr serial_inst = serializeInst[tid]; + + renameStatus[tid] = Unblocking; + + unblock(tid); + + DPRINTF(Rename, "[tid:%u]: Processing instruction [%lli] with " + "PC %#x.\n", + tid, serial_inst->seqNum, serial_inst->readPC()); + + // Put instruction into queue here. + serial_inst->clearSerializeBefore(); + + if (!skidBuffer[tid].empty()) { + skidBuffer[tid].push_front(serial_inst); + } else { + insts[tid].push_front(serial_inst); + } + + DPRINTF(Rename, "[tid:%u]: Instruction must be processed by rename." + " Adding to front of list.", tid); + + serializeInst[tid] = NULL; + + return true; + } + + // If we've reached this point, we have not gotten any signals that + // cause rename to change its status. Rename remains the same as before. + return false; +} + +template +void +DefaultRename::serializeAfter(InstQueue &inst_list, + unsigned tid) +{ + if (inst_list.empty()) { + // Mark a bit to say that I must serialize on the next instruction. + serializeOnNextInst[tid] = true; + return; + } + + // Set the next instruction as serializing. + inst_list.front()->setSerializeBefore(); +} + +template +inline void +DefaultRename::incrFullStat(const FullSource &source) +{ + switch (source) { + case ROB: + ++renameROBFullEvents; + break; + case IQ: + ++renameIQFullEvents; + break; + case LSQ: + ++renameLSQFullEvents; + break; + default: + panic("Rename full stall stat should be incremented for a reason!"); + break; + } +} + +template +void +DefaultRename::dumpHistory() +{ + typename list::iterator buf_it; + + for (int i = 0; i < numThreads; i++) { + + buf_it = historyBuffer[i].begin(); + + while (buf_it != historyBuffer[i].end()) { + cprintf("Seq num: %i\nArch reg: %i New phys reg: %i Old phys " + "reg: %i\n", (*buf_it).instSeqNum, (int)(*buf_it).archReg, + (int)(*buf_it).newPhysReg, (int)(*buf_it).prevPhysReg); + + buf_it++; + } + } +} diff --cc src/cpu/o3/rename_map.hh index 0edb80684,000000000..c4c90c99a mode 100644,000000..100644 --- a/src/cpu/o3/rename_map.hh +++ b/src/cpu/o3/rename_map.hh @@@ -1,167 -1,0 +1,168 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +// Todo: Create destructor. +// Have it so that there's a more meaningful name given to the variable +// that marks the beginning of the FP registers. + +#ifndef __CPU_O3_RENAME_MAP_HH__ +#define __CPU_O3_RENAME_MAP_HH__ + +#include +#include +#include + +#include "cpu/o3/free_list.hh" +//For RegIndex +#include "arch/isa_traits.hh" + +class SimpleRenameMap +{ + protected: + typedef TheISA::RegIndex RegIndex; + public: + /** + * Pair of a logical register and a physical register. Tells the + * previous mapping of a logical register to a physical register. + * Used to roll back the rename map to a previous state. + */ + typedef std::pair UnmapInfo; + + /** + * Pair of a physical register and a physical register. Used to + * return the physical register that a logical register has been + * renamed to, and the previous physical register that the same + * logical register was previously mapped to. + */ + typedef std::pair RenameInfo; + + public: - //Constructor - SimpleRenameMap() {}; ++ /** Default constructor. init() must be called prior to use. */ ++ SimpleRenameMap() {}; + + /** Destructor. */ + ~SimpleRenameMap(); + ++ /** Initializes rename map with given parameters. */ + void init(unsigned _numLogicalIntRegs, + unsigned _numPhysicalIntRegs, + PhysRegIndex &_int_reg_start, + + unsigned _numLogicalFloatRegs, + unsigned _numPhysicalFloatRegs, + PhysRegIndex &_float_reg_start, + + unsigned _numMiscRegs, + + RegIndex _intZeroReg, + RegIndex _floatZeroReg, + + int id, + bool bindRegs); + ++ /** Sets the free list used with this rename map. */ + void setFreeList(SimpleFreeList *fl_ptr); + + //Tell rename map to get a free physical register for a given + //architected register. Not sure it should have a return value, + //but perhaps it should have some sort of fault in case there are + //no free registers. + RenameInfo rename(RegIndex arch_reg); + + PhysRegIndex lookup(RegIndex phys_reg); + + /** + * Marks the given register as ready, meaning that its value has been + * calculated and written to the register file. + * @param ready_reg The index of the physical register that is now ready. + */ + void setEntry(RegIndex arch_reg, PhysRegIndex renamed_reg); + + int numFreeEntries(); + + private: + /** Rename Map ID */ + int id; + + /** Number of logical integer registers. */ + int numLogicalIntRegs; + + /** Number of physical integer registers. */ + int numPhysicalIntRegs; + + /** Number of logical floating point registers. */ + int numLogicalFloatRegs; + + /** Number of physical floating point registers. */ + int numPhysicalFloatRegs; + + /** Number of miscellaneous registers. */ + int numMiscRegs; + + /** Number of logical integer + float registers. */ + int numLogicalRegs; + + /** Number of physical integer + float registers. */ + int numPhysicalRegs; + + /** The integer zero register. This implementation assumes it is always + * zero and never can be anything else. + */ + RegIndex intZeroReg; + + /** The floating point zero register. This implementation assumes it is + * always zero and never can be anything else. + */ + RegIndex floatZeroReg; + + class RenameEntry + { + public: + PhysRegIndex physical_reg; + bool valid; + + RenameEntry() + : physical_reg(0), valid(false) + { } + }; + - //Change this to private + private: + /** Integer rename map. */ + std::vector intRenameMap; + + /** Floating point rename map. */ + std::vector floatRenameMap; + + private: + /** Free list interface. */ + SimpleFreeList *freeList; +}; + +#endif //__CPU_O3_RENAME_MAP_HH__ diff --cc src/cpu/o3/rob.hh index 3786a0355,000000000..6d1402531 mode 100644,000000..100644 --- a/src/cpu/o3/rob.hh +++ b/src/cpu/o3/rob.hh @@@ -1,316 -1,0 +1,319 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_ROB_HH__ +#define __CPU_O3_ROB_HH__ + +#include +#include +#include + +/** + * ROB class. The ROB is largely what drives squashing. + */ +template +class ROB +{ + protected: + typedef TheISA::RegIndex RegIndex; + public: + //Typedefs from the Impl. + typedef typename Impl::FullCPU FullCPU; + typedef typename Impl::DynInstPtr DynInstPtr; + + typedef std::pair UnmapInfo; + typedef typename std::list::iterator InstIt; + + /** Possible ROB statuses. */ + enum Status { + Running, + Idle, + ROBSquashing + }; + + /** SMT ROB Sharing Policy */ + enum ROBPolicy{ + Dynamic, + Partitioned, + Threshold + }; + + private: + /** Per-thread ROB status. */ + Status robStatus[Impl::MaxThreads]; + + /** ROB resource sharing policy for SMT mode. */ + ROBPolicy robPolicy; + + public: + /** ROB constructor. + * @param _numEntries Number of entries in ROB. + * @param _squashWidth Number of instructions that can be squashed in a + * single cycle. + * @param _smtROBPolicy ROB Partitioning Scheme for SMT. + * @param _smtROBThreshold Max Resources(by %) a thread can have in the ROB. + * @param _numThreads The number of active threads. + */ + ROB(unsigned _numEntries, unsigned _squashWidth, std::string smtROBPolicy, + unsigned _smtROBThreshold, unsigned _numThreads); + + std::string name() const; + + /** Function to set the CPU pointer, necessary due to which object the ROB + * is created within. + * @param cpu_ptr Pointer to the implementation specific full CPU object. + */ + void setCPU(FullCPU *cpu_ptr); + + /** Sets pointer to the list of active threads. + * @param at_ptr Pointer to the list of active threads. + */ + void setActiveThreads(std::list* at_ptr); + ++ /** Switches out the ROB. */ + void switchOut(); + ++ /** Takes over another CPU's thread. */ + void takeOverFrom(); + + /** Function to insert an instruction into the ROB. Note that whatever + * calls this function must ensure that there is enough space within the + * ROB for the new instruction. + * @param inst The instruction being inserted into the ROB. + */ + void insertInst(DynInstPtr &inst); + + /** Returns pointer to the head instruction within the ROB. There is + * no guarantee as to the return value if the ROB is empty. + * @retval Pointer to the DynInst that is at the head of the ROB. + */ +// DynInstPtr readHeadInst(); + + /** Returns a pointer to the head instruction of a specific thread within + * the ROB. + * @return Pointer to the DynInst that is at the head of the ROB. + */ + DynInstPtr readHeadInst(unsigned tid); + + /** Returns pointer to the tail instruction within the ROB. There is + * no guarantee as to the return value if the ROB is empty. + * @retval Pointer to the DynInst that is at the tail of the ROB. + */ +// DynInstPtr readTailInst(); + + /** Returns a pointer to the tail instruction of a specific thread within + * the ROB. + * @return Pointer to the DynInst that is at the tail of the ROB. + */ + DynInstPtr readTailInst(unsigned tid); + + /** Retires the head instruction, removing it from the ROB. */ +// void retireHead(); + + /** Retires the head instruction of a specific thread, removing it from the + * ROB. + */ + void retireHead(unsigned tid); + + /** Is the oldest instruction across all threads ready. */ +// bool isHeadReady(); + + /** Is the oldest instruction across a particular thread ready. */ + bool isHeadReady(unsigned tid); + + /** Is there any commitable head instruction across all threads ready. */ + bool canCommit(); + + /** Re-adjust ROB partitioning. */ + void resetEntries(); + + /** Number of entries needed For 'num_threads' amount of threads. */ + int entryAmount(int num_threads); + + /** Returns the number of total free entries in the ROB. */ + unsigned numFreeEntries(); + + /** Returns the number of free entries in a specific ROB paritition. */ + unsigned numFreeEntries(unsigned tid); + + /** Returns the maximum number of entries for a specific thread. */ + unsigned getMaxEntries(unsigned tid) + { return maxEntries[tid]; } + + /** Returns the number of entries being used by a specific thread. */ + unsigned getThreadEntries(unsigned tid) + { return threadEntries[tid]; } + + /** Returns if the ROB is full. */ + bool isFull() + { return numInstsInROB == numEntries; } + + /** Returns if a specific thread's partition is full. */ + bool isFull(unsigned tid) + { return threadEntries[tid] == numEntries; } + + /** Returns if the ROB is empty. */ + bool isEmpty() + { return numInstsInROB == 0; } + + /** Returns if a specific thread's partition is empty. */ + bool isEmpty(unsigned tid) + { return threadEntries[tid] == 0; } + + /** Executes the squash, marking squashed instructions. */ + void doSquash(unsigned tid); + + /** Squashes all instructions younger than the given sequence number for + * the specific thread. + */ + void squash(InstSeqNum squash_num, unsigned tid); + + /** Updates the head instruction with the new oldest instruction. */ + void updateHead(); + + /** Updates the tail instruction with the new youngest instruction. */ + void updateTail(); + + /** Reads the PC of the oldest head instruction. */ +// uint64_t readHeadPC(); + + /** Reads the PC of the head instruction of a specific thread. */ +// uint64_t readHeadPC(unsigned tid); + + /** Reads the next PC of the oldest head instruction. */ +// uint64_t readHeadNextPC(); + + /** Reads the next PC of the head instruction of a specific thread. */ +// uint64_t readHeadNextPC(unsigned tid); + + /** Reads the sequence number of the oldest head instruction. */ +// InstSeqNum readHeadSeqNum(); + + /** Reads the sequence number of the head instruction of a specific thread. + */ +// InstSeqNum readHeadSeqNum(unsigned tid); + + /** Reads the PC of the youngest tail instruction. */ +// uint64_t readTailPC(); + + /** Reads the PC of the tail instruction of a specific thread. */ +// uint64_t readTailPC(unsigned tid); + + /** Reads the sequence number of the youngest tail instruction. */ +// InstSeqNum readTailSeqNum(); + + /** Reads the sequence number of tail instruction of a specific thread. */ +// InstSeqNum readTailSeqNum(unsigned tid); + + /** Checks if the ROB is still in the process of squashing instructions. + * @retval Whether or not the ROB is done squashing. + */ + bool isDoneSquashing(unsigned tid) const + { return doneSquashing[tid]; } + + /** Checks if the ROB is still in the process of squashing instructions for + * any thread. + */ + bool isDoneSquashing(); + + /** This is more of a debugging function than anything. Use + * numInstsInROB to get the instructions in the ROB unless you are + * double checking that variable. + */ + int countInsts(); + + /** This is more of a debugging function than anything. Use + * threadEntries to get the instructions in the ROB unless you are + * double checking that variable. + */ + int countInsts(unsigned tid); + + private: + /** Pointer to the CPU. */ + FullCPU *cpu; + + /** Active Threads in CPU */ + std::list* activeThreads; + + /** Number of instructions in the ROB. */ + unsigned numEntries; + + /** Entries Per Thread */ + unsigned threadEntries[Impl::MaxThreads]; + + /** Max Insts a Thread Can Have in the ROB */ + unsigned maxEntries[Impl::MaxThreads]; + + /** ROB List of Instructions */ + std::list instList[Impl::MaxThreads]; + + /** Number of instructions that can be squashed in a single cycle. */ + unsigned squashWidth; + + public: + /** Iterator pointing to the instruction which is the last instruction + * in the ROB. This may at times be invalid (ie when the ROB is empty), + * however it should never be incorrect. + */ + InstIt tail; + + /** Iterator pointing to the instruction which is the first instruction in + * in the ROB*/ + InstIt head; + + private: + /** Iterator used for walking through the list of instructions when + * squashing. Used so that there is persistent state between cycles; + * when squashing, the instructions are marked as squashed but not + * immediately removed, meaning the tail iterator remains the same before + * and after a squash. + * This will always be set to cpu->instList.end() if it is invalid. + */ + InstIt squashIt[Impl::MaxThreads]; + + public: + /** Number of instructions in the ROB. */ + int numInstsInROB; + ++ /** Dummy instruction returned if there are no insts left. */ + DynInstPtr dummyInst; + + private: + /** The sequence number of the squashed instruction. */ + InstSeqNum squashedSeqNum; + + /** Is the ROB done squashing. */ + bool doneSquashing[Impl::MaxThreads]; + + /** Number of active threads. */ + unsigned numThreads; +}; + +#endif //__CPU_O3_ROB_HH__ diff --cc src/cpu/o3/store_set.cc index 720d5da53,000000000..0023cee36 mode 100644,000000..100644 --- a/src/cpu/o3/store_set.cc +++ b/src/cpu/o3/store_set.cc @@@ -1,322 -1,0 +1,347 @@@ +/* + * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + ++#include "base/intmath.hh" +#include "base/trace.hh" +#include "cpu/o3/store_set.hh" + +StoreSet::StoreSet(int _SSIT_size, int _LFST_size) + : SSITSize(_SSIT_size), LFSTSize(_LFST_size) +{ + DPRINTF(StoreSet, "StoreSet: Creating store set object.\n"); + DPRINTF(StoreSet, "StoreSet: SSIT size: %i, LFST size: %i.\n", + SSITSize, LFSTSize); + ++ if (!isPowerOf2(SSITSize)) { ++ fatal("Invalid SSIT size!\n"); ++ } ++ + SSIT.resize(SSITSize); + + validSSIT.resize(SSITSize); + + for (int i = 0; i < SSITSize; ++i) + validSSIT[i] = false; + ++ if (!isPowerOf2(LFSTSize)) { ++ fatal("Invalid LFST size!\n"); ++ } ++ + LFST.resize(LFSTSize); + + validLFST.resize(LFSTSize); + + for (int i = 0; i < LFSTSize; ++i) { + validLFST[i] = false; + LFST[i] = 0; + } + + indexMask = SSITSize - 1; + + offsetBits = 2; +} + +StoreSet::~StoreSet() +{ +} + +void +StoreSet::init(int _SSIT_size, int _LFST_size) +{ + SSITSize = _SSIT_size; + LFSTSize = _LFST_size; + + DPRINTF(StoreSet, "StoreSet: Creating store set object.\n"); + DPRINTF(StoreSet, "StoreSet: SSIT size: %i, LFST size: %i.\n", + SSITSize, LFSTSize); + + SSIT.resize(SSITSize); + + validSSIT.resize(SSITSize); + + for (int i = 0; i < SSITSize; ++i) + validSSIT[i] = false; + + LFST.resize(LFSTSize); + + validLFST.resize(LFSTSize); + + for (int i = 0; i < LFSTSize; ++i) { + validLFST[i] = false; + LFST[i] = 0; + } + + indexMask = SSITSize - 1; + + offsetBits = 2; +} + + +void +StoreSet::violation(Addr store_PC, Addr load_PC) +{ + int load_index = calcIndex(load_PC); + int store_index = calcIndex(store_PC); + + assert(load_index < SSITSize && store_index < SSITSize); + + bool valid_load_SSID = validSSIT[load_index]; + bool valid_store_SSID = validSSIT[store_index]; + + if (!valid_load_SSID && !valid_store_SSID) { + // Calculate a new SSID here. + SSID new_set = calcSSID(load_PC); + + validSSIT[load_index] = true; + + SSIT[load_index] = new_set; + + validSSIT[store_index] = true; + + SSIT[store_index] = new_set; + + assert(new_set < LFSTSize); + + DPRINTF(StoreSet, "StoreSet: Neither load nor store had a valid " + "storeset, creating a new one: %i for load %#x, store %#x\n", + new_set, load_PC, store_PC); + } else if (valid_load_SSID && !valid_store_SSID) { + SSID load_SSID = SSIT[load_index]; + + validSSIT[store_index] = true; + + SSIT[store_index] = load_SSID; + + assert(load_SSID < LFSTSize); + + DPRINTF(StoreSet, "StoreSet: Load had a valid store set. Adding " + "store to that set: %i for load %#x, store %#x\n", + load_SSID, load_PC, store_PC); + } else if (!valid_load_SSID && valid_store_SSID) { + SSID store_SSID = SSIT[store_index]; + + validSSIT[load_index] = true; + + SSIT[load_index] = store_SSID; + + DPRINTF(StoreSet, "StoreSet: Store had a valid store set: %i for " + "load %#x, store %#x\n", + store_SSID, load_PC, store_PC); + } else { + SSID load_SSID = SSIT[load_index]; + SSID store_SSID = SSIT[store_index]; + + assert(load_SSID < LFSTSize && store_SSID < LFSTSize); + + // The store set with the lower number wins + if (store_SSID > load_SSID) { + SSIT[store_index] = load_SSID; + + DPRINTF(StoreSet, "StoreSet: Load had smaller store set: %i; " + "for load %#x, store %#x\n", + load_SSID, load_PC, store_PC); + } else { + SSIT[load_index] = store_SSID; + + DPRINTF(StoreSet, "StoreSet: Store had smaller store set: %i; " + "for load %#x, store %#x\n", + store_SSID, load_PC, store_PC); + } + } +} + +void +StoreSet::insertLoad(Addr load_PC, InstSeqNum load_seq_num) +{ + // Does nothing. + return; +} + +void +StoreSet::insertStore(Addr store_PC, InstSeqNum store_seq_num, + unsigned tid) +{ + int index = calcIndex(store_PC); + + int store_SSID; + + assert(index < SSITSize); + + if (!validSSIT[index]) { + // Do nothing if there's no valid entry. + return; + } else { + store_SSID = SSIT[index]; + + assert(store_SSID < LFSTSize); + + // Update the last store that was fetched with the current one. + LFST[store_SSID] = store_seq_num; + + validLFST[store_SSID] = 1; + + storeList[store_seq_num] = store_SSID; + + DPRINTF(StoreSet, "Store %#x updated the LFST, SSID: %i\n", + store_PC, store_SSID); + } +} + +InstSeqNum +StoreSet::checkInst(Addr PC) +{ + int index = calcIndex(PC); + + int inst_SSID; + + assert(index < SSITSize); + + if (!validSSIT[index]) { + DPRINTF(StoreSet, "Inst %#x with index %i had no SSID\n", + PC, index); + + // Return 0 if there's no valid entry. + return 0; + } else { + inst_SSID = SSIT[index]; + + assert(inst_SSID < LFSTSize); + + if (!validLFST[inst_SSID]) { + + DPRINTF(StoreSet, "Inst %#x with index %i and SSID %i had no " + "dependency\n", PC, index, inst_SSID); + + return 0; + } else { + DPRINTF(StoreSet, "Inst %#x with index %i and SSID %i had LFST " + "inum of %i\n", PC, index, inst_SSID, LFST[inst_SSID]); + + return LFST[inst_SSID]; + } + } +} + +void +StoreSet::issued(Addr issued_PC, InstSeqNum issued_seq_num, bool is_store) +{ + // This only is updated upon a store being issued. + if (!is_store) { + return; + } + + int index = calcIndex(issued_PC); + + int store_SSID; + + assert(index < SSITSize); + + SeqNumMapIt store_list_it = storeList.find(issued_seq_num); + + if (store_list_it != storeList.end()) { + storeList.erase(store_list_it); + } + + // Make sure the SSIT still has a valid entry for the issued store. + if (!validSSIT[index]) { + return; + } + + store_SSID = SSIT[index]; + + assert(store_SSID < LFSTSize); + + // If the last fetched store in the store set refers to the store that + // was just issued, then invalidate the entry. + if (validLFST[store_SSID] && LFST[store_SSID] == issued_seq_num) { + DPRINTF(StoreSet, "StoreSet: store invalidated itself in LFST.\n"); + validLFST[store_SSID] = false; + } +} + +void +StoreSet::squash(InstSeqNum squashed_num, unsigned tid) +{ + DPRINTF(StoreSet, "StoreSet: Squashing until inum %i\n", + squashed_num); + + int idx; + SeqNumMapIt store_list_it = storeList.begin(); + + //@todo:Fix to only delete from correct thread + while (!storeList.empty()) { + idx = (*store_list_it).second; + + if ((*store_list_it).first <= squashed_num) { + break; + } + + bool younger = LFST[idx] > squashed_num; + + if (validLFST[idx] && younger) { + DPRINTF(StoreSet, "Squashed [sn:%lli]\n", LFST[idx]); + validLFST[idx] = false; + + storeList.erase(store_list_it++); + } else if (!validLFST[idx] && younger) { + storeList.erase(store_list_it++); + } + } +} + +void +StoreSet::clear() +{ + for (int i = 0; i < SSITSize; ++i) { + validSSIT[i] = false; + } + + for (int i = 0; i < LFSTSize; ++i) { + validLFST[i] = false; + } + + storeList.clear(); +} ++ ++void ++StoreSet::dump() ++{ ++ cprintf("storeList.size(): %i\n", storeList.size()); ++ SeqNumMapIt store_list_it = storeList.begin(); ++ ++ int num = 0; ++ ++ while (store_list_it != storeList.end()) { ++ cprintf("%i: [sn:%lli] SSID:%i\n", ++ num, (*store_list_it).first, (*store_list_it).second); ++ num++; ++ store_list_it++; ++ } ++} diff --cc src/cpu/o3/store_set.hh index 64255c51a,000000000..f5a44a1ac mode 100644,000000..100644 --- a/src/cpu/o3/store_set.hh +++ b/src/cpu/o3/store_set.hh @@@ -1,107 -1,0 +1,147 @@@ +/* + * Copyright (c) 2004-2005 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_STORE_SET_HH__ +#define __CPU_O3_STORE_SET_HH__ + +#include +#include +#include +#include + +#include "arch/isa_traits.hh" +#include "cpu/inst_seq.hh" + +struct ltseqnum { + bool operator()(const InstSeqNum &lhs, const InstSeqNum &rhs) const + { + return lhs > rhs; + } +}; + ++/** ++ * Implements a store set predictor for determining if memory ++ * instructions are dependent upon each other. See paper "Memory ++ * Dependence Prediction using Store Sets" by Chrysos and Emer. SSID ++ * stands for Store Set ID, SSIT stands for Store Set ID Table, and ++ * LFST is Last Fetched Store Table. ++ */ +class StoreSet +{ + public: + typedef unsigned SSID; + + public: ++ /** Default constructor. init() must be called prior to use. */ + StoreSet() { }; + ++ /** Creates store set predictor with given table sizes. */ + StoreSet(int SSIT_size, int LFST_size); + ++ /** Default destructor. */ + ~StoreSet(); + ++ /** Initializes the store set predictor with the given table sizes. */ + void init(int SSIT_size, int LFST_size); + ++ /** Records a memory ordering violation between the younger load ++ * and the older store. */ + void violation(Addr store_PC, Addr load_PC); + ++ /** Inserts a load into the store set predictor. This does nothing but ++ * is included in case other predictors require a similar function. ++ */ + void insertLoad(Addr load_PC, InstSeqNum load_seq_num); + ++ /** Inserts a store into the store set predictor. Updates the ++ * LFST if the store has a valid SSID. */ + void insertStore(Addr store_PC, InstSeqNum store_seq_num, + unsigned tid); + ++ /** Checks if the instruction with the given PC is dependent upon ++ * any store. @return Returns the sequence number of the store ++ * instruction this PC is dependent upon. Returns 0 if none. ++ */ + InstSeqNum checkInst(Addr PC); + ++ /** Records this PC/sequence number as issued. */ + void issued(Addr issued_PC, InstSeqNum issued_seq_num, bool is_store); + ++ /** Squashes for a specific thread until the given sequence number. */ + void squash(InstSeqNum squashed_num, unsigned tid); + ++ /** Resets all tables. */ + void clear(); + ++ /** Debug function to dump the contents of the store list. */ ++ void dump(); ++ + private: ++ /** Calculates the index into the SSIT based on the PC. */ + inline int calcIndex(Addr PC) + { return (PC >> offsetBits) & indexMask; } + ++ /** Calculates a Store Set ID based on the PC. */ + inline SSID calcSSID(Addr PC) + { return ((PC ^ (PC >> 10)) % LFSTSize); } + ++ /** The Store Set ID Table. */ + std::vector SSIT; + ++ /** Bit vector to tell if the SSIT has a valid entry. */ + std::vector validSSIT; + ++ /** Last Fetched Store Table. */ + std::vector LFST; + ++ /** Bit vector to tell if the LFST has a valid entry. */ + std::vector validLFST; + ++ /** Map of stores that have been inserted into the store set, but ++ * not yet issued or squashed. ++ */ + std::map storeList; + + typedef std::map::iterator SeqNumMapIt; + ++ /** Store Set ID Table size, in entries. */ + int SSITSize; + ++ /** Last Fetched Store Table size, in entries. */ + int LFSTSize; + ++ /** Mask to obtain the index. */ + int indexMask; + + // HACK: Hardcoded for now. + int offsetBits; +}; + +#endif // __CPU_O3_STORE_SET_HH__ diff --cc src/cpu/o3/thread_state.hh index 9101eafb9,000000000..dfb1530d0 mode 100644,000000..100644 --- a/src/cpu/o3/thread_state.hh +++ b/src/cpu/o3/thread_state.hh @@@ -1,112 -1,0 +1,133 @@@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_O3_THREAD_STATE_HH__ +#define __CPU_O3_THREAD_STATE_HH__ + +#include "arch/faults.hh" +#include "arch/isa_traits.hh" +#include "cpu/exec_context.hh" +#include "cpu/thread_state.hh" + +class Event; +class Process; + +#if FULL_SYSTEM +class EndQuiesceEvent; +class FunctionProfile; +class ProfileNode; +#else +class FunctionalMemory; +class Process; +#endif + +/** + * Class that has various thread state, such as the status, the + * current instruction being processed, whether or not the thread has + * a trap pending or is being externally updated, the ExecContext + * proxy pointer, etc. It also handles anything related to a specific + * thread's process, such as syscalls and checking valid addresses. + */ +template +struct O3ThreadState : public ThreadState { + typedef ExecContext::Status Status; + typedef typename Impl::FullCPU FullCPU; + ++ /** Current status of the thread. */ + Status _status; + - // Current instruction ++ /** Current instruction the thread is committing. Only set and ++ * used for DTB faults currently. ++ */ + TheISA::MachInst inst; ++ + private: ++ /** Pointer to the CPU. */ + FullCPU *cpu; + public: - ++ /** Whether or not the thread is currently in syscall mode, and ++ * thus able to be externally updated without squashing. ++ */ + bool inSyscall; + ++ /** Whether or not the thread is currently waiting on a trap, and ++ * thus able to be externally updated without squashing. ++ */ + bool trapPending; + +#if FULL_SYSTEM + O3ThreadState(FullCPU *_cpu, int _thread_num, FunctionalMemory *_mem) + : ThreadState(-1, _thread_num, _mem), + inSyscall(0), trapPending(0) + { } +#else + O3ThreadState(FullCPU *_cpu, int _thread_num, Process *_process, int _asid) + : ThreadState(-1, _thread_num, NULL, _process, _asid), + cpu(_cpu), inSyscall(0), trapPending(0) + { } + + O3ThreadState(FullCPU *_cpu, int _thread_num, FunctionalMemory *_mem, + int _asid) + : ThreadState(-1, _thread_num, _mem, NULL, _asid), + cpu(_cpu), inSyscall(0), trapPending(0) + { } +#endif + ++ /** Pointer to the ExecContext of this thread. @todo: Don't call ++ this a proxy.*/ + ExecContext *xcProxy; + ++ /** Returns a pointer to the XC of this thread. */ + ExecContext *getXCProxy() { return xcProxy; } + ++ /** Returns the status of this thread. */ + Status status() const { return _status; } + ++ /** Sets the status of this thread. */ + void setStatus(Status new_status) { _status = new_status; } + - bool misspeculating() { return false; } - ++ /** Sets the current instruction being committed. */ + void setInst(TheISA::MachInst _inst) { inst = _inst; } + ++ /** Reads the number of instructions functionally executed and ++ * committed. ++ */ + Counter readFuncExeInst() { return funcExeInst; } + ++ /** Sets the total number of instructions functionally executed ++ * and committed. ++ */ + void setFuncExeInst(Counter new_val) { funcExeInst = new_val; } + +#if !FULL_SYSTEM ++ /** Handles the syscall. */ + void syscall(int64_t callnum) { process->syscall(callnum, xcProxy); } +#endif +}; + +#endif // __CPU_O3_THREAD_STATE_HH__ diff --cc src/cpu/o3/tournament_pred.cc index 361ef4770,000000000..7cf78dcb1 mode 100644,000000..100644 --- a/src/cpu/o3/tournament_pred.cc +++ b/src/cpu/o3/tournament_pred.cc @@@ -1,257 -1,0 +1,293 @@@ +/* - * Copyright (c) 2004-2005 The Regents of The University of Michigan ++ * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + ++#include "base/intmath.hh" +#include "cpu/o3/tournament_pred.hh" + +TournamentBP::TournamentBP(unsigned _localPredictorSize, + unsigned _localCtrBits, + unsigned _localHistoryTableSize, + unsigned _localHistoryBits, + unsigned _globalPredictorSize, + unsigned _globalCtrBits, + unsigned _globalHistoryBits, + unsigned _choicePredictorSize, + unsigned _choiceCtrBits, + unsigned _instShiftAmt) + : localPredictorSize(_localPredictorSize), + localCtrBits(_localCtrBits), + localHistoryTableSize(_localHistoryTableSize), + localHistoryBits(_localHistoryBits), + globalPredictorSize(_globalPredictorSize), + globalCtrBits(_globalCtrBits), + globalHistoryBits(_globalHistoryBits), + choicePredictorSize(_globalPredictorSize), + choiceCtrBits(_choiceCtrBits), + instShiftAmt(_instShiftAmt) +{ - //Should do checks here to make sure sizes are correct (powers of 2) ++ if (!isPowerOf2(localPredictorSize)) { ++ fatal("Invalid local predictor size!\n"); ++ } + + //Setup the array of counters for the local predictor + localCtrs.resize(localPredictorSize); + + for (int i = 0; i < localPredictorSize; ++i) + localCtrs[i].setBits(localCtrBits); + ++ if (!isPowerOf2(localHistoryTableSize)) { ++ fatal("Invalid local history table size!\n"); ++ } ++ + //Setup the history table for the local table + localHistoryTable.resize(localHistoryTableSize); + + for (int i = 0; i < localHistoryTableSize; ++i) + localHistoryTable[i] = 0; + + // Setup the local history mask + localHistoryMask = (1 << localHistoryBits) - 1; + ++ if (!isPowerOf2(globalPredictorSize)) { ++ fatal("Invalid global predictor size!\n"); ++ } ++ + //Setup the array of counters for the global predictor + globalCtrs.resize(globalPredictorSize); + + for (int i = 0; i < globalPredictorSize; ++i) + globalCtrs[i].setBits(globalCtrBits); + + //Clear the global history + globalHistory = 0; + // Setup the global history mask + globalHistoryMask = (1 << globalHistoryBits) - 1; + ++ if (!isPowerOf2(choicePredictorSize)) { ++ fatal("Invalid choice predictor size!\n"); ++ } ++ + //Setup the array of counters for the choice predictor + choiceCtrs.resize(choicePredictorSize); + + for (int i = 0; i < choicePredictorSize; ++i) + choiceCtrs[i].setBits(choiceCtrBits); + ++ // @todo: Allow for different thresholds between the predictors. + threshold = (1 << (localCtrBits - 1)) - 1; + threshold = threshold / 2; +} + +inline +unsigned +TournamentBP::calcLocHistIdx(Addr &branch_addr) +{ ++ // Get low order bits after removing instruction offset. + return (branch_addr >> instShiftAmt) & (localHistoryTableSize - 1); +} + +inline +void - TournamentBP::updateHistoriesTaken(unsigned local_history_idx) ++TournamentBP::updateGlobalHistTaken() +{ + globalHistory = (globalHistory << 1) | 1; + globalHistory = globalHistory & globalHistoryMask; - - localHistoryTable[local_history_idx] = - (localHistoryTable[local_history_idx] << 1) | 1; +} + +inline +void - TournamentBP::updateHistoriesNotTaken(unsigned local_history_idx) ++TournamentBP::updateGlobalHistNotTaken() +{ + globalHistory = (globalHistory << 1); + globalHistory = globalHistory & globalHistoryMask; ++} + ++inline ++void ++TournamentBP::updateLocalHistTaken(unsigned local_history_idx) ++{ ++ localHistoryTable[local_history_idx] = ++ (localHistoryTable[local_history_idx] << 1) | 1; ++} ++ ++inline ++void ++TournamentBP::updateLocalHistNotTaken(unsigned local_history_idx) ++{ + localHistoryTable[local_history_idx] = + (localHistoryTable[local_history_idx] << 1); +} + +bool - TournamentBP::lookup(Addr &branch_addr) ++TournamentBP::lookup(Addr &branch_addr, void * &bp_history) +{ - uint8_t local_prediction; ++ bool local_prediction; + unsigned local_history_idx; + unsigned local_predictor_idx; + - uint8_t global_prediction; - uint8_t choice_prediction; ++ bool global_prediction; ++ bool choice_prediction; + + //Lookup in the local predictor to get its branch prediction + local_history_idx = calcLocHistIdx(branch_addr); + local_predictor_idx = localHistoryTable[local_history_idx] + & localHistoryMask; - local_prediction = localCtrs[local_predictor_idx].read(); ++ local_prediction = localCtrs[local_predictor_idx].read() > threshold; + + //Lookup in the global predictor to get its branch prediction - global_prediction = globalCtrs[globalHistory].read(); ++ global_prediction = globalCtrs[globalHistory].read() > threshold; + + //Lookup in the choice predictor to see which one to use - choice_prediction = choiceCtrs[globalHistory].read(); - - //@todo Put a threshold value in for the three predictors that can - // be set through the constructor (so this isn't hard coded). - //Also should put some of this code into functions. - if (choice_prediction > threshold) { - if (global_prediction > threshold) { - updateHistoriesTaken(local_history_idx); - - assert(globalHistory < globalPredictorSize && - local_history_idx < localPredictorSize); - - globalCtrs[globalHistory].increment(); - localCtrs[local_history_idx].increment(); - ++ choice_prediction = choiceCtrs[globalHistory].read() > threshold; ++ ++ // Create BPHistory and pass it back to be recorded. ++ BPHistory *history = new BPHistory; ++ history->globalHistory = globalHistory; ++ history->localPredTaken = local_prediction; ++ history->globalPredTaken = global_prediction; ++ history->globalUsed = choice_prediction; ++ bp_history = (void *)history; ++ ++ assert(globalHistory < globalPredictorSize && ++ local_history_idx < localPredictorSize); ++ ++ // Commented code is for doing speculative update of counters and ++ // all histories. ++ if (choice_prediction) { ++ if (global_prediction) { ++// updateHistoriesTaken(local_history_idx); ++// globalCtrs[globalHistory].increment(); ++// localCtrs[local_history_idx].increment(); ++ updateGlobalHistTaken(); + return true; + } else { - updateHistoriesNotTaken(local_history_idx); - - assert(globalHistory < globalPredictorSize && - local_history_idx < localPredictorSize); - - globalCtrs[globalHistory].decrement(); - localCtrs[local_history_idx].decrement(); - ++// updateHistoriesNotTaken(local_history_idx); ++// globalCtrs[globalHistory].decrement(); ++// localCtrs[local_history_idx].decrement(); ++ updateGlobalHistNotTaken(); + return false; + } + } else { - if (local_prediction > threshold) { - updateHistoriesTaken(local_history_idx); - - assert(globalHistory < globalPredictorSize && - local_history_idx < localPredictorSize); - - globalCtrs[globalHistory].increment(); - localCtrs[local_history_idx].increment(); - ++ if (local_prediction) { ++// updateHistoriesTaken(local_history_idx); ++// globalCtrs[globalHistory].increment(); ++// localCtrs[local_history_idx].increment(); ++ updateGlobalHistTaken(); + return true; + } else { - updateHistoriesNotTaken(local_history_idx); - - assert(globalHistory < globalPredictorSize && - local_history_idx < localPredictorSize); - - globalCtrs[globalHistory].decrement(); - localCtrs[local_history_idx].decrement(); - ++// updateHistoriesNotTaken(local_history_idx); ++// globalCtrs[globalHistory].decrement(); ++// localCtrs[local_history_idx].decrement(); ++ updateGlobalHistNotTaken(); + return false; + } + } +} + - // Update the branch predictor if it predicted a branch wrong. +void - TournamentBP::update(Addr &branch_addr, unsigned correct_gh, bool taken) ++TournamentBP::uncondBr(void * &bp_history) +{ ++ // Create BPHistory and pass it back to be recorded. ++ BPHistory *history = new BPHistory; ++ history->globalHistory = globalHistory; ++ history->localPredTaken = true; ++ history->globalPredTaken = true; ++ bp_history = static_cast(history); ++ ++ updateGlobalHistTaken(); ++} + - uint8_t local_prediction; ++void ++TournamentBP::update(Addr &branch_addr, bool taken, void *bp_history) ++{ + unsigned local_history_idx; + unsigned local_predictor_idx; - bool local_pred_taken; ++ unsigned local_predictor_hist; + - uint8_t global_prediction; - bool global_pred_taken; - - // Load the correct global history into the register. - globalHistory = correct_gh; - - // Get the local predictor's current prediction, remove the incorrect - // update, and update the local predictor ++ // Get the local predictor's current prediction + local_history_idx = calcLocHistIdx(branch_addr); - local_predictor_idx = localHistoryTable[local_history_idx]; - local_predictor_idx = (local_predictor_idx >> 1) & localHistoryMask; - - local_prediction = localCtrs[local_predictor_idx].read(); - local_pred_taken = local_prediction > threshold; - - //Get the global predictor's current prediction, and update the - //global predictor - global_prediction = globalCtrs[globalHistory].read(); - global_pred_taken = global_prediction > threshold; - - //Update the choice predictor to tell it which one was correct - if (local_pred_taken != global_pred_taken) { - //If the local prediction matches the actual outcome, decerement - //the counter. Otherwise increment the counter. - if (local_pred_taken == taken) { - choiceCtrs[globalHistory].decrement(); - } else { - choiceCtrs[globalHistory].increment(); ++ local_predictor_hist = localHistoryTable[local_history_idx]; ++ local_predictor_idx = local_predictor_hist & localHistoryMask; ++ ++ // Update the choice predictor to tell it which one was correct if ++ // there was a prediction. ++ if (bp_history) { ++ BPHistory *history = static_cast(bp_history); ++ if (history->localPredTaken != history->globalPredTaken) { ++ // If the local prediction matches the actual outcome, ++ // decerement the counter. Otherwise increment the ++ // counter. ++ if (history->localPredTaken == taken) { ++ choiceCtrs[globalHistory].decrement(); ++ } else if (history->globalPredTaken == taken){ ++ choiceCtrs[globalHistory].increment(); ++ } + } ++ ++ // We're done with this history, now delete it. ++ delete history; + } + - if (taken) { - assert(globalHistory < globalPredictorSize && - local_predictor_idx < localPredictorSize); ++ assert(globalHistory < globalPredictorSize && ++ local_predictor_idx < localPredictorSize); + ++ // Update the counters and local history with the proper ++ // resolution of the branch. Global history is updated ++ // speculatively and restored upon squash() calls, so it does not ++ // need to be updated. ++ if (taken) { + localCtrs[local_predictor_idx].increment(); + globalCtrs[globalHistory].increment(); + - globalHistory = (globalHistory << 1) | 1; - globalHistory = globalHistory & globalHistoryMask; - - localHistoryTable[local_history_idx] |= 1; ++ updateLocalHistTaken(local_history_idx); + } else { - assert(globalHistory < globalPredictorSize && - local_predictor_idx < localPredictorSize); - + localCtrs[local_predictor_idx].decrement(); + globalCtrs[globalHistory].decrement(); + - globalHistory = (globalHistory << 1); - globalHistory = globalHistory & globalHistoryMask; - - localHistoryTable[local_history_idx] &= ~1; ++ updateLocalHistNotTaken(local_history_idx); + } +} ++ ++void ++TournamentBP::squash(void *bp_history) ++{ ++ BPHistory *history = static_cast(bp_history); ++ ++ // Restore global history to state prior to this branch. ++ globalHistory = history->globalHistory; ++ ++ // Delete this BPHistory now that we're done with it. ++ delete history; ++} ++ ++#ifdef DEBUG ++int ++TournamentBP::BPHistory::newCount = 0; ++#endif diff --cc src/cpu/o3/tournament_pred.hh index e16600090,000000000..92402adc6 mode 100644,000000..100644 --- a/src/cpu/o3/tournament_pred.hh +++ b/src/cpu/o3/tournament_pred.hh @@@ -1,146 -1,0 +1,218 @@@ +/* - * Copyright (c) 2004-2005 The Regents of The University of Michigan ++ * Copyright (c) 2004-2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Kevin Lim + */ + +#ifndef __CPU_O3_TOURNAMENT_PRED_HH__ +#define __CPU_O3_TOURNAMENT_PRED_HH__ + +// For Addr type. +#include "arch/isa_traits.hh" +#include "cpu/o3/sat_counter.hh" +#include + ++/** ++ * Implements a tournament branch predictor, hopefully identical to the one ++ * used in the 21264. It has a local predictor, which uses a local history ++ * table to index into a table of counters, and a global predictor, which ++ * uses a global history to index into a table of counters. A choice ++ * predictor chooses between the two. Only the global history register ++ * is speculatively updated, the rest are updated upon branches committing ++ * or misspeculating. ++ */ +class TournamentBP +{ + public: + /** + * Default branch predictor constructor. + */ + TournamentBP(unsigned localPredictorSize, + unsigned localCtrBits, + unsigned localHistoryTableSize, + unsigned localHistoryBits, + unsigned globalPredictorSize, + unsigned globalHistoryBits, + unsigned globalCtrBits, + unsigned choicePredictorSize, + unsigned choiceCtrBits, + unsigned instShiftAmt); + + /** + * Looks up the given address in the branch predictor and returns - * a true/false value as to whether it is taken. ++ * a true/false value as to whether it is taken. Also creates a ++ * BPHistory object to store any state it will need on squash/update. + * @param branch_addr The address of the branch to look up. ++ * @param bp_history Pointer that will be set to the BPHistory object. + * @return Whether or not the branch is taken. + */ - bool lookup(Addr &branch_addr); ++ bool lookup(Addr &branch_addr, void * &bp_history); ++ ++ /** ++ * Records that there was an unconditional branch, and modifies ++ * the bp history to point to an object that has the previous ++ * global history stored in it. ++ * @param bp_history Pointer that will be set to the BPHistory object. ++ */ ++ void uncondBr(void * &bp_history); + + /** + * Updates the branch predictor with the actual result of a branch. + * @param branch_addr The address of the branch to update. + * @param taken Whether or not the branch was taken. ++ * @param bp_history Pointer to the BPHistory object that was created ++ * when the branch was predicted. ++ */ ++ void update(Addr &branch_addr, bool taken, void *bp_history); ++ ++ /** ++ * Restores the global branch history on a squash. ++ * @param bp_history Pointer to the BPHistory object that has the ++ * previous global branch history in it. + */ - void update(Addr &branch_addr, unsigned global_history, bool taken); ++ void squash(void *bp_history); + ++ /** Returns the global history. */ + inline unsigned readGlobalHist() { return globalHistory; } + + private: - ++ /** ++ * Returns if the branch should be taken or not, given a counter ++ * value. ++ * @param count The counter value. ++ */ + inline bool getPrediction(uint8_t &count); + ++ /** ++ * Returns the local history index, given a branch address. ++ * @param branch_addr The branch's PC address. ++ */ + inline unsigned calcLocHistIdx(Addr &branch_addr); + - inline void updateHistoriesTaken(unsigned local_history_idx); ++ /** Updates global history as taken. */ ++ inline void updateGlobalHistTaken(); + - inline void updateHistoriesNotTaken(unsigned local_history_idx); ++ /** Updates global history as not taken. */ ++ inline void updateGlobalHistNotTaken(); ++ ++ /** ++ * Updates local histories as taken. ++ * @param local_history_idx The local history table entry that ++ * will be updated. ++ */ ++ inline void updateLocalHistTaken(unsigned local_history_idx); ++ ++ /** ++ * Updates local histories as not taken. ++ * @param local_history_idx The local history table entry that ++ * will be updated. ++ */ ++ inline void updateLocalHistNotTaken(unsigned local_history_idx); ++ ++ /** ++ * The branch history information that is created upon predicting ++ * a branch. It will be passed back upon updating and squashing, ++ * when the BP can use this information to update/restore its ++ * state properly. ++ */ ++ struct BPHistory { ++#ifdef DEBUG ++ BPHistory() ++ { newCount++; } ++ ~BPHistory() ++ { newCount--; } ++ ++ static int newCount; ++#endif ++ unsigned globalHistory; ++ bool localPredTaken; ++ bool globalPredTaken; ++ bool globalUsed; ++ }; + + /** Local counters. */ + std::vector localCtrs; + + /** Size of the local predictor. */ + unsigned localPredictorSize; + + /** Number of bits of the local predictor's counters. */ + unsigned localCtrBits; + + /** Array of local history table entries. */ + std::vector localHistoryTable; + + /** Size of the local history table. */ + unsigned localHistoryTableSize; + + /** Number of bits for each entry of the local history table. + * @todo Doesn't this come from the size of the local predictor? + */ + unsigned localHistoryBits; + + /** Mask to get the proper local history. */ + unsigned localHistoryMask; + - + /** Array of counters that make up the global predictor. */ + std::vector globalCtrs; + + /** Size of the global predictor. */ + unsigned globalPredictorSize; + + /** Number of bits of the global predictor's counters. */ + unsigned globalCtrBits; + + /** Global history register. */ + unsigned globalHistory; + + /** Number of bits for the global history. */ + unsigned globalHistoryBits; + + /** Mask to get the proper global history. */ + unsigned globalHistoryMask; + - + /** Array of counters that make up the choice predictor. */ + std::vector choiceCtrs; + + /** Size of the choice predictor (identical to the global predictor). */ + unsigned choicePredictorSize; + + /** Number of bits of the choice predictor's counters. */ + unsigned choiceCtrBits; + + /** Number of bits to shift the instruction over to get rid of the word + * offset. + */ + unsigned instShiftAmt; + + /** Threshold for the counter value; above the threshold is taken, + * equal to or below the threshold is not taken. + */ + unsigned threshold; +}; + +#endif // __CPU_O3_TOURNAMENT_PRED_HH__ diff --cc src/cpu/ozone/cpu_builder.cc index 64aa49c71,000000000..1ab7a4c29 mode 100644,000000..100644 --- a/src/cpu/ozone/cpu_builder.cc +++ b/src/cpu/ozone/cpu_builder.cc @@@ -1,830 -1,0 +1,861 @@@ ++/* ++ * Copyright (c) 2006 The Regents of The University of Michigan ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer; ++ * redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution; ++ * neither the name of the copyright holders nor the names of its ++ * contributors may be used to endorse or promote products derived from ++ * this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +#include + +#include "cpu/checker/cpu.hh" +#include "cpu/inst_seq.hh" +#include "cpu/ozone/cpu.hh" +#include "cpu/ozone/ozone_impl.hh" +#include "cpu/ozone/simple_impl.hh" +#include "cpu/ozone/simple_params.hh" +#include "mem/cache/base_cache.hh" +#include "sim/builder.hh" +#include "sim/process.hh" +#include "sim/sim_object.hh" + +class DerivOzoneCPU : public OzoneCPU +{ + public: + DerivOzoneCPU(SimpleParams *p) + : OzoneCPU(p) + { } +}; + +class SimpleOzoneCPU : public OzoneCPU +{ + public: + SimpleOzoneCPU(SimpleParams *p) + : OzoneCPU(p) + { } +}; + + +//////////////////////////////////////////////////////////////////////// +// +// OzoneCPU Simulation Object +// + +BEGIN_DECLARE_SIM_OBJECT_PARAMS(DerivOzoneCPU) + + Param clock; + Param numThreads; + +#if FULL_SYSTEM +SimObjectParam system; +Param cpu_id; +SimObjectParam itb; +SimObjectParam dtb; +#else +SimObjectVectorParam workload; +//SimObjectParam page_table; +#endif // FULL_SYSTEM + +SimObjectParam mem; + +SimObjectParam checker; + +Param max_insts_any_thread; +Param max_insts_all_threads; +Param max_loads_any_thread; +Param max_loads_all_threads; + +SimObjectParam icache; +SimObjectParam dcache; + +Param cachePorts; +Param width; +Param frontEndWidth; +Param backEndWidth; +Param backEndSquashLatency; +Param backEndLatency; +Param maxInstBufferSize; +Param numPhysicalRegs; +Param maxOutstandingMemOps; + +Param decodeToFetchDelay; +Param renameToFetchDelay; +Param iewToFetchDelay; +Param commitToFetchDelay; +Param fetchWidth; + +Param renameToDecodeDelay; +Param iewToDecodeDelay; +Param commitToDecodeDelay; +Param fetchToDecodeDelay; +Param decodeWidth; + +Param iewToRenameDelay; +Param commitToRenameDelay; +Param decodeToRenameDelay; +Param renameWidth; + +Param commitToIEWDelay; +Param renameToIEWDelay; +Param issueToExecuteDelay; +Param issueWidth; +Param executeWidth; +Param executeIntWidth; +Param executeFloatWidth; +Param executeBranchWidth; +Param executeMemoryWidth; + +Param iewToCommitDelay; +Param renameToROBDelay; +Param commitWidth; +Param squashWidth; + ++Param predType; +Param localPredictorSize; +Param localCtrBits; +Param localHistoryTableSize; +Param localHistoryBits; +Param globalPredictorSize; +Param globalCtrBits; +Param globalHistoryBits; +Param choicePredictorSize; +Param choiceCtrBits; + +Param BTBEntries; +Param BTBTagSize; + +Param RASSize; + +Param LQEntries; +Param SQEntries; +Param LFSTSize; +Param SSITSize; + +Param numPhysIntRegs; +Param numPhysFloatRegs; +Param numIQEntries; +Param numROBEntries; + +Param decoupledFrontEnd; +Param dispatchWidth; +Param wbWidth; + +Param smtNumFetchingThreads; +Param smtFetchPolicy; +Param smtLSQPolicy; +Param smtLSQThreshold; +Param smtIQPolicy; +Param smtIQThreshold; +Param smtROBPolicy; +Param smtROBThreshold; +Param smtCommitPolicy; + +Param instShiftAmt; + +Param defer_registration; + +Param function_trace; +Param function_trace_start; + +END_DECLARE_SIM_OBJECT_PARAMS(DerivOzoneCPU) + +BEGIN_INIT_SIM_OBJECT_PARAMS(DerivOzoneCPU) + + INIT_PARAM(clock, "clock speed"), + INIT_PARAM(numThreads, "number of HW thread contexts"), + +#if FULL_SYSTEM + INIT_PARAM(system, "System object"), + INIT_PARAM(cpu_id, "processor ID"), + INIT_PARAM(itb, "Instruction translation buffer"), + INIT_PARAM(dtb, "Data translation buffer"), +#else + INIT_PARAM(workload, "Processes to run"), +// INIT_PARAM(page_table, "Page table"), +#endif // FULL_SYSTEM + + INIT_PARAM_DFLT(mem, "Memory", NULL), + + INIT_PARAM_DFLT(checker, "Checker CPU", NULL), + + INIT_PARAM_DFLT(max_insts_any_thread, + "Terminate when any thread reaches this inst count", + 0), + INIT_PARAM_DFLT(max_insts_all_threads, + "Terminate when all threads have reached" + "this inst count", + 0), + INIT_PARAM_DFLT(max_loads_any_thread, + "Terminate when any thread reaches this load count", + 0), + INIT_PARAM_DFLT(max_loads_all_threads, + "Terminate when all threads have reached this load" + "count", + 0), + + INIT_PARAM_DFLT(icache, "L1 instruction cache", NULL), + INIT_PARAM_DFLT(dcache, "L1 data cache", NULL), + + INIT_PARAM_DFLT(cachePorts, "Cache Ports", 200), + INIT_PARAM_DFLT(width, "Width", 1), + INIT_PARAM_DFLT(frontEndWidth, "Front end width", 1), + INIT_PARAM_DFLT(backEndWidth, "Back end width", 1), + INIT_PARAM_DFLT(backEndSquashLatency, "Back end squash latency", 1), + INIT_PARAM_DFLT(backEndLatency, "Back end latency", 1), + INIT_PARAM_DFLT(maxInstBufferSize, "Maximum instruction buffer size", 16), + INIT_PARAM(numPhysicalRegs, "Number of physical registers"), + INIT_PARAM_DFLT(maxOutstandingMemOps, "Maximum outstanding memory operations", 4), + + INIT_PARAM(decodeToFetchDelay, "Decode to fetch delay"), + INIT_PARAM(renameToFetchDelay, "Rename to fetch delay"), + INIT_PARAM(iewToFetchDelay, "Issue/Execute/Writeback to fetch" + "delay"), + INIT_PARAM(commitToFetchDelay, "Commit to fetch delay"), + INIT_PARAM(fetchWidth, "Fetch width"), + INIT_PARAM(renameToDecodeDelay, "Rename to decode delay"), + INIT_PARAM(iewToDecodeDelay, "Issue/Execute/Writeback to decode" + "delay"), + INIT_PARAM(commitToDecodeDelay, "Commit to decode delay"), + INIT_PARAM(fetchToDecodeDelay, "Fetch to decode delay"), + INIT_PARAM(decodeWidth, "Decode width"), + + INIT_PARAM(iewToRenameDelay, "Issue/Execute/Writeback to rename" + "delay"), + INIT_PARAM(commitToRenameDelay, "Commit to rename delay"), + INIT_PARAM(decodeToRenameDelay, "Decode to rename delay"), + INIT_PARAM(renameWidth, "Rename width"), + + INIT_PARAM(commitToIEWDelay, "Commit to " + "Issue/Execute/Writeback delay"), + INIT_PARAM(renameToIEWDelay, "Rename to " + "Issue/Execute/Writeback delay"), + INIT_PARAM(issueToExecuteDelay, "Issue to execute delay (internal" + "to the IEW stage)"), + INIT_PARAM(issueWidth, "Issue width"), + INIT_PARAM(executeWidth, "Execute width"), + INIT_PARAM(executeIntWidth, "Integer execute width"), + INIT_PARAM(executeFloatWidth, "Floating point execute width"), + INIT_PARAM(executeBranchWidth, "Branch execute width"), + INIT_PARAM(executeMemoryWidth, "Memory execute width"), + + INIT_PARAM(iewToCommitDelay, "Issue/Execute/Writeback to commit " + "delay"), + INIT_PARAM(renameToROBDelay, "Rename to reorder buffer delay"), + INIT_PARAM(commitWidth, "Commit width"), + INIT_PARAM(squashWidth, "Squash width"), + ++ INIT_PARAM(predType, "Type of branch predictor ('local', 'tournament')"), + INIT_PARAM(localPredictorSize, "Size of local predictor"), + INIT_PARAM(localCtrBits, "Bits per counter"), + INIT_PARAM(localHistoryTableSize, "Size of local history table"), + INIT_PARAM(localHistoryBits, "Bits for the local history"), + INIT_PARAM(globalPredictorSize, "Size of global predictor"), + INIT_PARAM(globalCtrBits, "Bits per counter"), + INIT_PARAM(globalHistoryBits, "Bits of history"), + INIT_PARAM(choicePredictorSize, "Size of choice predictor"), + INIT_PARAM(choiceCtrBits, "Bits of choice counters"), + + INIT_PARAM(BTBEntries, "Number of BTB entries"), + INIT_PARAM(BTBTagSize, "Size of the BTB tags, in bits"), + + INIT_PARAM(RASSize, "RAS size"), + + INIT_PARAM(LQEntries, "Number of load queue entries"), + INIT_PARAM(SQEntries, "Number of store queue entries"), + INIT_PARAM(LFSTSize, "Last fetched store table size"), + INIT_PARAM(SSITSize, "Store set ID table size"), + + INIT_PARAM(numPhysIntRegs, "Number of physical integer registers"), + INIT_PARAM(numPhysFloatRegs, "Number of physical floating point " + "registers"), + INIT_PARAM(numIQEntries, "Number of instruction queue entries"), + INIT_PARAM(numROBEntries, "Number of reorder buffer entries"), + + INIT_PARAM_DFLT(decoupledFrontEnd, "Decoupled front end", true), + INIT_PARAM_DFLT(dispatchWidth, "Dispatch width", 0), + INIT_PARAM_DFLT(wbWidth, "Writeback width", 0), + + INIT_PARAM_DFLT(smtNumFetchingThreads, "SMT Number of Fetching Threads", 1), + INIT_PARAM_DFLT(smtFetchPolicy, "SMT Fetch Policy", "SingleThread"), + INIT_PARAM_DFLT(smtLSQPolicy, "SMT LSQ Sharing Policy", "Partitioned"), + INIT_PARAM_DFLT(smtLSQThreshold,"SMT LSQ Threshold", 100), + INIT_PARAM_DFLT(smtIQPolicy, "SMT IQ Policy", "Partitioned"), + INIT_PARAM_DFLT(smtIQThreshold, "SMT IQ Threshold", 100), + INIT_PARAM_DFLT(smtROBPolicy, "SMT ROB Sharing Policy", "Partitioned"), + INIT_PARAM_DFLT(smtROBThreshold,"SMT ROB Threshold", 100), + INIT_PARAM_DFLT(smtCommitPolicy,"SMT Commit Fetch Policy", "RoundRobin"), + + INIT_PARAM(instShiftAmt, "Number of bits to shift instructions by"), + INIT_PARAM(defer_registration, "defer system registration (for sampling)"), + + INIT_PARAM(function_trace, "Enable function trace"), + INIT_PARAM(function_trace_start, "Cycle to start function trace") + +END_INIT_SIM_OBJECT_PARAMS(DerivOzoneCPU) + +CREATE_SIM_OBJECT(DerivOzoneCPU) +{ + DerivOzoneCPU *cpu; + +#if FULL_SYSTEM + // Full-system only supports a single thread for the moment. + int actual_num_threads = 1; +#else + // In non-full-system mode, we infer the number of threads from + // the workload if it's not explicitly specified. + int actual_num_threads = + numThreads.isValid() ? numThreads : workload.size(); + + if (workload.size() == 0) { + fatal("Must specify at least one workload!"); + } + +#endif + + SimpleParams *params = new SimpleParams; + + params->clock = clock; + + params->name = getInstanceName(); + params->numberOfThreads = actual_num_threads; + +#if FULL_SYSTEM + params->system = system; + params->cpu_id = cpu_id; + params->itb = itb; + params->dtb = dtb; +#else + params->workload = workload; +// params->pTable = page_table; +#endif // FULL_SYSTEM + + params->mem = mem; + params->checker = checker; + params->max_insts_any_thread = max_insts_any_thread; + params->max_insts_all_threads = max_insts_all_threads; + params->max_loads_any_thread = max_loads_any_thread; + params->max_loads_all_threads = max_loads_all_threads; + + // + // Caches + // + params->icacheInterface = icache ? icache->getInterface() : NULL; + params->dcacheInterface = dcache ? dcache->getInterface() : NULL; + params->cachePorts = cachePorts; + + params->width = width; + params->frontEndWidth = frontEndWidth; + params->backEndWidth = backEndWidth; + params->backEndSquashLatency = backEndSquashLatency; + params->backEndLatency = backEndLatency; + params->maxInstBufferSize = maxInstBufferSize; + params->numPhysicalRegs = numPhysIntRegs + numPhysFloatRegs; + params->maxOutstandingMemOps = maxOutstandingMemOps; + + params->decodeToFetchDelay = decodeToFetchDelay; + params->renameToFetchDelay = renameToFetchDelay; + params->iewToFetchDelay = iewToFetchDelay; + params->commitToFetchDelay = commitToFetchDelay; + params->fetchWidth = fetchWidth; + + params->renameToDecodeDelay = renameToDecodeDelay; + params->iewToDecodeDelay = iewToDecodeDelay; + params->commitToDecodeDelay = commitToDecodeDelay; + params->fetchToDecodeDelay = fetchToDecodeDelay; + params->decodeWidth = decodeWidth; + + params->iewToRenameDelay = iewToRenameDelay; + params->commitToRenameDelay = commitToRenameDelay; + params->decodeToRenameDelay = decodeToRenameDelay; + params->renameWidth = renameWidth; + + params->commitToIEWDelay = commitToIEWDelay; + params->renameToIEWDelay = renameToIEWDelay; + params->issueToExecuteDelay = issueToExecuteDelay; + params->issueWidth = issueWidth; + params->executeWidth = executeWidth; + params->executeIntWidth = executeIntWidth; + params->executeFloatWidth = executeFloatWidth; + params->executeBranchWidth = executeBranchWidth; + params->executeMemoryWidth = executeMemoryWidth; + + params->iewToCommitDelay = iewToCommitDelay; + params->renameToROBDelay = renameToROBDelay; + params->commitWidth = commitWidth; + params->squashWidth = squashWidth; + - ++ params->predType = predType; + params->localPredictorSize = localPredictorSize; + params->localCtrBits = localCtrBits; + params->localHistoryTableSize = localHistoryTableSize; + params->localHistoryBits = localHistoryBits; + params->globalPredictorSize = globalPredictorSize; + params->globalCtrBits = globalCtrBits; + params->globalHistoryBits = globalHistoryBits; + params->choicePredictorSize = choicePredictorSize; + params->choiceCtrBits = choiceCtrBits; + + params->BTBEntries = BTBEntries; + params->BTBTagSize = BTBTagSize; + + params->RASSize = RASSize; + + params->LQEntries = LQEntries; + params->SQEntries = SQEntries; + + params->SSITSize = SSITSize; + params->LFSTSize = LFSTSize; + + params->numPhysIntRegs = numPhysIntRegs; + params->numPhysFloatRegs = numPhysFloatRegs; + params->numIQEntries = numIQEntries; + params->numROBEntries = numROBEntries; + + params->decoupledFrontEnd = decoupledFrontEnd; + params->dispatchWidth = dispatchWidth; + params->wbWidth = wbWidth; + + params->smtNumFetchingThreads = smtNumFetchingThreads; + params->smtFetchPolicy = smtFetchPolicy; + params->smtIQPolicy = smtIQPolicy; + params->smtLSQPolicy = smtLSQPolicy; + params->smtLSQThreshold = smtLSQThreshold; + params->smtROBPolicy = smtROBPolicy; + params->smtROBThreshold = smtROBThreshold; + params->smtCommitPolicy = smtCommitPolicy; + + params->instShiftAmt = 2; + + params->deferRegistration = defer_registration; + + params->functionTrace = function_trace; + params->functionTraceStart = function_trace_start; + + cpu = new DerivOzoneCPU(params); + + return cpu; +} + +REGISTER_SIM_OBJECT("DerivOzoneCPU", DerivOzoneCPU) + + + +//////////////////////////////////////////////////////////////////////// +// +// OzoneCPU Simulation Object +// + +BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleOzoneCPU) + + Param clock; + Param numThreads; + +#if FULL_SYSTEM +SimObjectParam system; +Param cpu_id; +SimObjectParam itb; +SimObjectParam dtb; +#else +SimObjectVectorParam workload; +//SimObjectParam page_table; +#endif // FULL_SYSTEM + +SimObjectParam mem; + +SimObjectParam checker; + +Param max_insts_any_thread; +Param max_insts_all_threads; +Param max_loads_any_thread; +Param max_loads_all_threads; + +SimObjectParam icache; +SimObjectParam dcache; + +Param cachePorts; +Param width; +Param frontEndWidth; +Param backEndWidth; +Param backEndSquashLatency; +Param backEndLatency; +Param maxInstBufferSize; +Param numPhysicalRegs; + +Param decodeToFetchDelay; +Param renameToFetchDelay; +Param iewToFetchDelay; +Param commitToFetchDelay; +Param fetchWidth; + +Param renameToDecodeDelay; +Param iewToDecodeDelay; +Param commitToDecodeDelay; +Param fetchToDecodeDelay; +Param decodeWidth; + +Param iewToRenameDelay; +Param commitToRenameDelay; +Param decodeToRenameDelay; +Param renameWidth; + +Param commitToIEWDelay; +Param renameToIEWDelay; +Param issueToExecuteDelay; +Param issueWidth; +Param executeWidth; +Param executeIntWidth; +Param executeFloatWidth; +Param executeBranchWidth; +Param executeMemoryWidth; + +Param iewToCommitDelay; +Param renameToROBDelay; +Param commitWidth; +Param squashWidth; + ++Param predType; +Param localPredictorSize; +Param localCtrBits; +Param localHistoryTableSize; +Param localHistoryBits; +Param globalPredictorSize; +Param globalCtrBits; +Param globalHistoryBits; +Param choicePredictorSize; +Param choiceCtrBits; + +Param BTBEntries; +Param BTBTagSize; + +Param RASSize; + +Param LQEntries; +Param SQEntries; +Param LFSTSize; +Param SSITSize; + +Param numPhysIntRegs; +Param numPhysFloatRegs; +Param numIQEntries; +Param numROBEntries; + +Param decoupledFrontEnd; +Param dispatchWidth; +Param wbWidth; + +Param smtNumFetchingThreads; +Param smtFetchPolicy; +Param smtLSQPolicy; +Param smtLSQThreshold; +Param smtIQPolicy; +Param smtIQThreshold; +Param smtROBPolicy; +Param smtROBThreshold; +Param smtCommitPolicy; + +Param instShiftAmt; + +Param defer_registration; + +Param function_trace; +Param function_trace_start; + +END_DECLARE_SIM_OBJECT_PARAMS(SimpleOzoneCPU) + +BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleOzoneCPU) + + INIT_PARAM(clock, "clock speed"), + INIT_PARAM(numThreads, "number of HW thread contexts"), + +#if FULL_SYSTEM + INIT_PARAM(system, "System object"), + INIT_PARAM(cpu_id, "processor ID"), + INIT_PARAM(itb, "Instruction translation buffer"), + INIT_PARAM(dtb, "Data translation buffer"), +#else + INIT_PARAM(workload, "Processes to run"), +// INIT_PARAM(page_table, "Page table"), +#endif // FULL_SYSTEM + + INIT_PARAM_DFLT(mem, "Memory", NULL), + + INIT_PARAM_DFLT(checker, "Checker CPU", NULL), + + INIT_PARAM_DFLT(max_insts_any_thread, + "Terminate when any thread reaches this inst count", + 0), + INIT_PARAM_DFLT(max_insts_all_threads, + "Terminate when all threads have reached" + "this inst count", + 0), + INIT_PARAM_DFLT(max_loads_any_thread, + "Terminate when any thread reaches this load count", + 0), + INIT_PARAM_DFLT(max_loads_all_threads, + "Terminate when all threads have reached this load" + "count", + 0), + + INIT_PARAM_DFLT(icache, "L1 instruction cache", NULL), + INIT_PARAM_DFLT(dcache, "L1 data cache", NULL), + + INIT_PARAM_DFLT(cachePorts, "Cache Ports", 200), + INIT_PARAM_DFLT(width, "Width", 1), + INIT_PARAM_DFLT(frontEndWidth, "Front end width", 1), + INIT_PARAM_DFLT(backEndWidth, "Back end width", 1), + INIT_PARAM_DFLT(backEndSquashLatency, "Back end squash latency", 1), + INIT_PARAM_DFLT(backEndLatency, "Back end latency", 1), + INIT_PARAM_DFLT(maxInstBufferSize, "Maximum instruction buffer size", 16), + INIT_PARAM(numPhysicalRegs, "Number of physical registers"), + + INIT_PARAM(decodeToFetchDelay, "Decode to fetch delay"), + INIT_PARAM(renameToFetchDelay, "Rename to fetch delay"), + INIT_PARAM(iewToFetchDelay, "Issue/Execute/Writeback to fetch" + "delay"), + INIT_PARAM(commitToFetchDelay, "Commit to fetch delay"), + INIT_PARAM(fetchWidth, "Fetch width"), + INIT_PARAM(renameToDecodeDelay, "Rename to decode delay"), + INIT_PARAM(iewToDecodeDelay, "Issue/Execute/Writeback to decode" + "delay"), + INIT_PARAM(commitToDecodeDelay, "Commit to decode delay"), + INIT_PARAM(fetchToDecodeDelay, "Fetch to decode delay"), + INIT_PARAM(decodeWidth, "Decode width"), + + INIT_PARAM(iewToRenameDelay, "Issue/Execute/Writeback to rename" + "delay"), + INIT_PARAM(commitToRenameDelay, "Commit to rename delay"), + INIT_PARAM(decodeToRenameDelay, "Decode to rename delay"), + INIT_PARAM(renameWidth, "Rename width"), + + INIT_PARAM(commitToIEWDelay, "Commit to " + "Issue/Execute/Writeback delay"), + INIT_PARAM(renameToIEWDelay, "Rename to " + "Issue/Execute/Writeback delay"), + INIT_PARAM(issueToExecuteDelay, "Issue to execute delay (internal" + "to the IEW stage)"), + INIT_PARAM(issueWidth, "Issue width"), + INIT_PARAM(executeWidth, "Execute width"), + INIT_PARAM(executeIntWidth, "Integer execute width"), + INIT_PARAM(executeFloatWidth, "Floating point execute width"), + INIT_PARAM(executeBranchWidth, "Branch execute width"), + INIT_PARAM(executeMemoryWidth, "Memory execute width"), + + INIT_PARAM(iewToCommitDelay, "Issue/Execute/Writeback to commit " + "delay"), + INIT_PARAM(renameToROBDelay, "Rename to reorder buffer delay"), + INIT_PARAM(commitWidth, "Commit width"), + INIT_PARAM(squashWidth, "Squash width"), + ++ INIT_PARAM(predType, "Type of branch predictor ('local', 'tournament')"), + INIT_PARAM(localPredictorSize, "Size of local predictor"), + INIT_PARAM(localCtrBits, "Bits per counter"), + INIT_PARAM(localHistoryTableSize, "Size of local history table"), + INIT_PARAM(localHistoryBits, "Bits for the local history"), + INIT_PARAM(globalPredictorSize, "Size of global predictor"), + INIT_PARAM(globalCtrBits, "Bits per counter"), + INIT_PARAM(globalHistoryBits, "Bits of history"), + INIT_PARAM(choicePredictorSize, "Size of choice predictor"), + INIT_PARAM(choiceCtrBits, "Bits of choice counters"), + + INIT_PARAM(BTBEntries, "Number of BTB entries"), + INIT_PARAM(BTBTagSize, "Size of the BTB tags, in bits"), + + INIT_PARAM(RASSize, "RAS size"), + + INIT_PARAM(LQEntries, "Number of load queue entries"), + INIT_PARAM(SQEntries, "Number of store queue entries"), + INIT_PARAM(LFSTSize, "Last fetched store table size"), + INIT_PARAM(SSITSize, "Store set ID table size"), + + INIT_PARAM(numPhysIntRegs, "Number of physical integer registers"), + INIT_PARAM(numPhysFloatRegs, "Number of physical floating point " + "registers"), + INIT_PARAM(numIQEntries, "Number of instruction queue entries"), + INIT_PARAM(numROBEntries, "Number of reorder buffer entries"), + + INIT_PARAM_DFLT(decoupledFrontEnd, "Decoupled front end", true), + INIT_PARAM_DFLT(dispatchWidth, "Dispatch width", 0), + INIT_PARAM_DFLT(wbWidth, "Writeback width", 0), + + INIT_PARAM_DFLT(smtNumFetchingThreads, "SMT Number of Fetching Threads", 1), + INIT_PARAM_DFLT(smtFetchPolicy, "SMT Fetch Policy", "SingleThread"), + INIT_PARAM_DFLT(smtLSQPolicy, "SMT LSQ Sharing Policy", "Partitioned"), + INIT_PARAM_DFLT(smtLSQThreshold,"SMT LSQ Threshold", 100), + INIT_PARAM_DFLT(smtIQPolicy, "SMT IQ Policy", "Partitioned"), + INIT_PARAM_DFLT(smtIQThreshold, "SMT IQ Threshold", 100), + INIT_PARAM_DFLT(smtROBPolicy, "SMT ROB Sharing Policy", "Partitioned"), + INIT_PARAM_DFLT(smtROBThreshold,"SMT ROB Threshold", 100), + INIT_PARAM_DFLT(smtCommitPolicy,"SMT Commit Fetch Policy", "RoundRobin"), + + INIT_PARAM(instShiftAmt, "Number of bits to shift instructions by"), + INIT_PARAM(defer_registration, "defer system registration (for sampling)"), + + INIT_PARAM(function_trace, "Enable function trace"), + INIT_PARAM(function_trace_start, "Cycle to start function trace") + +END_INIT_SIM_OBJECT_PARAMS(SimpleOzoneCPU) + +CREATE_SIM_OBJECT(SimpleOzoneCPU) +{ + SimpleOzoneCPU *cpu; + +#if FULL_SYSTEM + // Full-system only supports a single thread for the moment. + int actual_num_threads = 1; +#else + // In non-full-system mode, we infer the number of threads from + // the workload if it's not explicitly specified. + int actual_num_threads = + numThreads.isValid() ? numThreads : workload.size(); + + if (workload.size() == 0) { + fatal("Must specify at least one workload!"); + } + +#endif + + SimpleParams *params = new SimpleParams; + + params->clock = clock; + + params->name = getInstanceName(); + params->numberOfThreads = actual_num_threads; + +#if FULL_SYSTEM + params->system = system; + params->cpu_id = cpu_id; + params->itb = itb; + params->dtb = dtb; +#else + params->workload = workload; +// params->pTable = page_table; +#endif // FULL_SYSTEM + + params->mem = mem; + params->checker = checker; + params->max_insts_any_thread = max_insts_any_thread; + params->max_insts_all_threads = max_insts_all_threads; + params->max_loads_any_thread = max_loads_any_thread; + params->max_loads_all_threads = max_loads_all_threads; + + // + // Caches + // + params->icacheInterface = icache ? icache->getInterface() : NULL; + params->dcacheInterface = dcache ? dcache->getInterface() : NULL; + params->cachePorts = cachePorts; + + params->width = width; + params->frontEndWidth = frontEndWidth; + params->backEndWidth = backEndWidth; + params->backEndSquashLatency = backEndSquashLatency; + params->backEndLatency = backEndLatency; + params->maxInstBufferSize = maxInstBufferSize; + params->numPhysicalRegs = numPhysIntRegs + numPhysFloatRegs; + + params->decodeToFetchDelay = decodeToFetchDelay; + params->renameToFetchDelay = renameToFetchDelay; + params->iewToFetchDelay = iewToFetchDelay; + params->commitToFetchDelay = commitToFetchDelay; + params->fetchWidth = fetchWidth; + + params->renameToDecodeDelay = renameToDecodeDelay; + params->iewToDecodeDelay = iewToDecodeDelay; + params->commitToDecodeDelay = commitToDecodeDelay; + params->fetchToDecodeDelay = fetchToDecodeDelay; + params->decodeWidth = decodeWidth; + + params->iewToRenameDelay = iewToRenameDelay; + params->commitToRenameDelay = commitToRenameDelay; + params->decodeToRenameDelay = decodeToRenameDelay; + params->renameWidth = renameWidth; + + params->commitToIEWDelay = commitToIEWDelay; + params->renameToIEWDelay = renameToIEWDelay; + params->issueToExecuteDelay = issueToExecuteDelay; + params->issueWidth = issueWidth; + params->executeWidth = executeWidth; + params->executeIntWidth = executeIntWidth; + params->executeFloatWidth = executeFloatWidth; + params->executeBranchWidth = executeBranchWidth; + params->executeMemoryWidth = executeMemoryWidth; + + params->iewToCommitDelay = iewToCommitDelay; + params->renameToROBDelay = renameToROBDelay; + params->commitWidth = commitWidth; + params->squashWidth = squashWidth; + - ++ params->predType = predType; + params->localPredictorSize = localPredictorSize; + params->localCtrBits = localCtrBits; + params->localHistoryTableSize = localHistoryTableSize; + params->localHistoryBits = localHistoryBits; + params->globalPredictorSize = globalPredictorSize; + params->globalCtrBits = globalCtrBits; + params->globalHistoryBits = globalHistoryBits; + params->choicePredictorSize = choicePredictorSize; + params->choiceCtrBits = choiceCtrBits; + + params->BTBEntries = BTBEntries; + params->BTBTagSize = BTBTagSize; + + params->RASSize = RASSize; + + params->LQEntries = LQEntries; + params->SQEntries = SQEntries; + + params->SSITSize = SSITSize; + params->LFSTSize = LFSTSize; + + params->numPhysIntRegs = numPhysIntRegs; + params->numPhysFloatRegs = numPhysFloatRegs; + params->numIQEntries = numIQEntries; + params->numROBEntries = numROBEntries; + + params->decoupledFrontEnd = decoupledFrontEnd; + params->dispatchWidth = dispatchWidth; + params->wbWidth = wbWidth; + + params->smtNumFetchingThreads = smtNumFetchingThreads; + params->smtFetchPolicy = smtFetchPolicy; + params->smtIQPolicy = smtIQPolicy; + params->smtLSQPolicy = smtLSQPolicy; + params->smtLSQThreshold = smtLSQThreshold; + params->smtROBPolicy = smtROBPolicy; + params->smtROBThreshold = smtROBThreshold; + params->smtCommitPolicy = smtCommitPolicy; + + params->instShiftAmt = 2; + + params->deferRegistration = defer_registration; + + params->functionTrace = function_trace; + params->functionTraceStart = function_trace_start; + + cpu = new SimpleOzoneCPU(params); + + return cpu; +} + +REGISTER_SIM_OBJECT("SimpleOzoneCPU", SimpleOzoneCPU) + diff --cc src/cpu/ozone/ozone_impl.hh index d8c545977,000000000..4e0dbc0e1 mode 100644,000000..100644 --- a/src/cpu/ozone/ozone_impl.hh +++ b/src/cpu/ozone/ozone_impl.hh @@@ -1,73 -1,0 +1,73 @@@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_OZONE_OZONE_IMPL_HH__ +#define __CPU_OZONE_OZONE_IMPL_HH__ + +#include "arch/alpha/isa_traits.hh" +#include "cpu/o3/bpred_unit.hh" +#include "cpu/ozone/front_end.hh" +#include "cpu/ozone/inst_queue.hh" +#include "cpu/ozone/lw_lsq.hh" +#include "cpu/ozone/lw_back_end.hh" +#include "cpu/ozone/null_predictor.hh" +#include "cpu/ozone/dyn_inst.hh" +#include "cpu/ozone/simple_params.hh" + +template +class OzoneCPU; + +template +class OzoneDynInst; + +struct OzoneImpl { + typedef SimpleParams Params; + typedef OzoneCPU OzoneCPU; + typedef OzoneCPU FullCPU; + + // Would like to put these into their own area. +// typedef NullPredictor BranchPred; - typedef TwobitBPredUnit BranchPred; ++ typedef BPredUnit BranchPred; + typedef FrontEnd FrontEnd; + // Will need IQ, LSQ eventually + typedef LWBackEnd BackEnd; + + typedef InstQueue InstQueue; + typedef OzoneLWLSQ LdstQueue; + + typedef OzoneDynInst DynInst; + typedef RefCountingPtr DynInstPtr; + + typedef uint64_t IssueStruct; + + enum { + MaxThreads = 1 + }; +}; + +#endif // __CPU_OZONE_OZONE_IMPL_HH__ diff --cc src/cpu/ozone/simple_impl.hh index 961bf2ea9,000000000..26845271a mode 100644,000000..100644 --- a/src/cpu/ozone/simple_impl.hh +++ b/src/cpu/ozone/simple_impl.hh @@@ -1,69 -1,0 +1,69 @@@ +/* + * Copyright (c) 2006 The Regents of The University of Michigan + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_OZONE_SIMPLE_IMPL_HH__ +#define __CPU_OZONE_SIMPLE_IMPL_HH__ + +#include "arch/isa_traits.hh" +#include "cpu/o3/bpred_unit.hh" +#include "cpu/ozone/cpu.hh" +#include "cpu/ozone/front_end.hh" +#include "cpu/ozone/inorder_back_end.hh" +#include "cpu/ozone/null_predictor.hh" +#include "cpu/ozone/dyn_inst.hh" +#include "cpu/ozone/simple_params.hh" + +//template +//class OzoneCPU; + +template +class OzoneDynInst; + +struct SimpleImpl { + typedef SimpleParams Params; + typedef OzoneCPU OzoneCPU; + typedef OzoneCPU FullCPU; + + // Would like to put these into their own area. +// typedef NullPredictor BranchPred; - typedef TwobitBPredUnit BranchPred; ++ typedef BPredUnit BranchPred; + typedef FrontEnd FrontEnd; + // Will need IQ, LSQ eventually + typedef InorderBackEnd BackEnd; + + typedef OzoneDynInst DynInst; + typedef RefCountingPtr DynInstPtr; + + typedef uint64_t IssueStruct; + + enum { + MaxThreads = 1 + }; +}; + +#endif // __CPU_OZONE_SIMPLE_IMPL_HH__ diff --cc src/cpu/ozone/simple_params.hh index 647da1781,000000000..7b5c6f67b mode 100644,000000..100644 --- a/src/cpu/ozone/simple_params.hh +++ b/src/cpu/ozone/simple_params.hh @@@ -1,165 -1,0 +1,191 @@@ - ++/* ++ * Copyright (c) 2006 The Regents of The University of Michigan ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are ++ * met: redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer; ++ * redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution; ++ * neither the name of the copyright holders nor the names of its ++ * contributors may be used to endorse or promote products derived from ++ * this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ + +#ifndef __CPU_OZONE_SIMPLE_PARAMS_HH__ +#define __CPU_OZONE_SIMPLE_PARAMS_HH__ + +#include "cpu/ozone/cpu.hh" + +//Forward declarations +class AlphaDTB; +class AlphaITB; +class FUPool; +class FunctionalMemory; +class MemInterface; +class PageTable; +class Process; +class System; + +/** + * This file defines the parameters that will be used for the OzoneCPU. + * This must be defined externally so that the Impl can have a params class + * defined that it can pass to all of the individual stages. + */ + +class SimpleParams : public BaseCPU::Params +{ + public: + +#if FULL_SYSTEM + AlphaITB *itb; AlphaDTB *dtb; +#else + std::vector workload; - // Process *process; +#endif // FULL_SYSTEM + + //Page Table + PageTable *pTable; + + FunctionalMemory *mem; + + // + // Caches + // + MemInterface *icacheInterface; + MemInterface *dcacheInterface; + + unsigned cachePorts; + unsigned width; + unsigned frontEndWidth; + unsigned backEndWidth; + unsigned backEndSquashLatency; + unsigned backEndLatency; + unsigned maxInstBufferSize; + unsigned numPhysicalRegs; + unsigned maxOutstandingMemOps; + // + // Fetch + // + unsigned decodeToFetchDelay; + unsigned renameToFetchDelay; + unsigned iewToFetchDelay; + unsigned commitToFetchDelay; + unsigned fetchWidth; + + // + // Decode + // + unsigned renameToDecodeDelay; + unsigned iewToDecodeDelay; + unsigned commitToDecodeDelay; + unsigned fetchToDecodeDelay; + unsigned decodeWidth; + + // + // Rename + // + unsigned iewToRenameDelay; + unsigned commitToRenameDelay; + unsigned decodeToRenameDelay; + unsigned renameWidth; + + // + // IEW + // + unsigned commitToIEWDelay; + unsigned renameToIEWDelay; + unsigned issueToExecuteDelay; + unsigned issueWidth; + unsigned executeWidth; + unsigned executeIntWidth; + unsigned executeFloatWidth; + unsigned executeBranchWidth; + unsigned executeMemoryWidth; + FUPool *fuPool; + + // + // Commit + // + unsigned iewToCommitDelay; + unsigned renameToROBDelay; + unsigned commitWidth; + unsigned squashWidth; + + // + // Branch predictor (BP & BTB) + // ++ std::string predType; + unsigned localPredictorSize; + unsigned localCtrBits; + unsigned localHistoryTableSize; + unsigned localHistoryBits; + unsigned globalPredictorSize; + unsigned globalCtrBits; + unsigned globalHistoryBits; + unsigned choicePredictorSize; + unsigned choiceCtrBits; + + unsigned BTBEntries; + unsigned BTBTagSize; + + unsigned RASSize; + + // + // Load store queue + // + unsigned LQEntries; + unsigned SQEntries; + + // + // Memory dependence + // + unsigned SSITSize; + unsigned LFSTSize; + + // + // Miscellaneous + // + unsigned numPhysIntRegs; + unsigned numPhysFloatRegs; + unsigned numIQEntries; + unsigned numROBEntries; + + bool decoupledFrontEnd; + int dispatchWidth; + int wbWidth; + + //SMT Parameters + unsigned smtNumFetchingThreads; + + std::string smtFetchPolicy; + + std::string smtIQPolicy; + unsigned smtIQThreshold; + + std::string smtLSQPolicy; + unsigned smtLSQThreshold; + + std::string smtCommitPolicy; + + std::string smtROBPolicy; + unsigned smtROBThreshold; + + // Probably can get this from somewhere. + unsigned instShiftAmt; +}; + +#endif // __CPU_OZONE_SIMPLE_PARAMS_HH__ diff --cc src/python/m5/objects/AlphaFullCPU.py index d719bf783,000000000..043c3c08f mode 100644,000000..100644 --- a/src/python/m5/objects/AlphaFullCPU.py +++ b/src/python/m5/objects/AlphaFullCPU.py @@@ -1,99 -1,0 +1,100 @@@ +from m5 import * +from BaseCPU import BaseCPU + +class DerivAlphaFullCPU(BaseCPU): + type = 'DerivAlphaFullCPU' + activity = Param.Unsigned("Initial count") + numThreads = Param.Unsigned("number of HW thread contexts") + + if not build_env['FULL_SYSTEM']: + mem = Param.FunctionalMemory(NULL, "memory") + + checker = Param.BaseCPU(NULL, "checker") + + cachePorts = Param.Unsigned("Cache Ports") + + decodeToFetchDelay = Param.Unsigned("Decode to fetch delay") + renameToFetchDelay = Param.Unsigned("Rename to fetch delay") + iewToFetchDelay = Param.Unsigned("Issue/Execute/Writeback to fetch " + "delay") + commitToFetchDelay = Param.Unsigned("Commit to fetch delay") + fetchWidth = Param.Unsigned("Fetch width") + + renameToDecodeDelay = Param.Unsigned("Rename to decode delay") + iewToDecodeDelay = Param.Unsigned("Issue/Execute/Writeback to decode " + "delay") + commitToDecodeDelay = Param.Unsigned("Commit to decode delay") + fetchToDecodeDelay = Param.Unsigned("Fetch to decode delay") + decodeWidth = Param.Unsigned("Decode width") + + iewToRenameDelay = Param.Unsigned("Issue/Execute/Writeback to rename " + "delay") + commitToRenameDelay = Param.Unsigned("Commit to rename delay") + decodeToRenameDelay = Param.Unsigned("Decode to rename delay") + renameWidth = Param.Unsigned("Rename width") + + commitToIEWDelay = Param.Unsigned("Commit to " + "Issue/Execute/Writeback delay") + renameToIEWDelay = Param.Unsigned("Rename to " + "Issue/Execute/Writeback delay") + issueToExecuteDelay = Param.Unsigned("Issue to execute delay (internal " + "to the IEW stage)") + issueWidth = Param.Unsigned("Issue width") + executeWidth = Param.Unsigned("Execute width") + executeIntWidth = Param.Unsigned("Integer execute width") + executeFloatWidth = Param.Unsigned("Floating point execute width") + executeBranchWidth = Param.Unsigned("Branch execute width") + executeMemoryWidth = Param.Unsigned("Memory execute width") + fuPool = Param.FUPool(NULL, "Functional Unit pool") + + iewToCommitDelay = Param.Unsigned("Issue/Execute/Writeback to commit " + "delay") + renameToROBDelay = Param.Unsigned("Rename to reorder buffer delay") + commitWidth = Param.Unsigned("Commit width") + squashWidth = Param.Unsigned("Squash width") + trapLatency = Param.Tick("Trap latency") + fetchTrapLatency = Param.Tick("Fetch trap latency") + ++ predType = Param.String("Branch predictor type ('local', 'tournament')") + localPredictorSize = Param.Unsigned("Size of local predictor") + localCtrBits = Param.Unsigned("Bits per counter") + localHistoryTableSize = Param.Unsigned("Size of local history table") + localHistoryBits = Param.Unsigned("Bits for the local history") + globalPredictorSize = Param.Unsigned("Size of global predictor") + globalCtrBits = Param.Unsigned("Bits per counter") + globalHistoryBits = Param.Unsigned("Bits of history") + choicePredictorSize = Param.Unsigned("Size of choice predictor") + choiceCtrBits = Param.Unsigned("Bits of choice counters") + + BTBEntries = Param.Unsigned("Number of BTB entries") + BTBTagSize = Param.Unsigned("Size of the BTB tags, in bits") + + RASSize = Param.Unsigned("RAS size") + + LQEntries = Param.Unsigned("Number of load queue entries") + SQEntries = Param.Unsigned("Number of store queue entries") + LFSTSize = Param.Unsigned("Last fetched store table size") + SSITSize = Param.Unsigned("Store set ID table size") + + numRobs = Param.Unsigned("Number of Reorder Buffers"); + + numPhysIntRegs = Param.Unsigned("Number of physical integer registers") + numPhysFloatRegs = Param.Unsigned("Number of physical floating point " + "registers") + numIQEntries = Param.Unsigned("Number of instruction queue entries") + numROBEntries = Param.Unsigned("Number of reorder buffer entries") + + instShiftAmt = Param.Unsigned("Number of bits to shift instructions by") + + function_trace = Param.Bool(False, "Enable function trace") + function_trace_start = Param.Tick(0, "Cycle to start function trace") + + smtNumFetchingThreads = Param.Unsigned("SMT Number of Fetching Threads") + smtFetchPolicy = Param.String("SMT Fetch policy") + smtLSQPolicy = Param.String("SMT LSQ Sharing Policy") + smtLSQThreshold = Param.String("SMT LSQ Threshold Sharing Parameter") + smtIQPolicy = Param.String("SMT IQ Sharing Policy") + smtIQThreshold = Param.String("SMT IQ Threshold Sharing Parameter") + smtROBPolicy = Param.String("SMT ROB Sharing Policy") + smtROBThreshold = Param.String("SMT ROB Threshold Sharing Parameter") + smtCommitPolicy = Param.String("SMT Commit Policy") diff --cc src/python/m5/objects/OzoneCPU.py index 3fca61e28,000000000..ea8b6b537 mode 100644,000000..100644 --- a/src/python/m5/objects/OzoneCPU.py +++ b/src/python/m5/objects/OzoneCPU.py @@@ -1,89 -1,0 +1,90 @@@ +from m5 import * +from BaseCPU import BaseCPU + +class DerivOzoneCPU(BaseCPU): + type = 'DerivOzoneCPU' + + numThreads = Param.Unsigned("number of HW thread contexts") + + if not build_env['FULL_SYSTEM']: + mem = Param.FunctionalMemory(NULL, "memory") + + checker = Param.BaseCPU("Checker CPU") + + width = Param.Unsigned("Width") + frontEndWidth = Param.Unsigned("Front end width") + backEndWidth = Param.Unsigned("Back end width") + backEndSquashLatency = Param.Unsigned("Back end squash latency") + backEndLatency = Param.Unsigned("Back end latency") + maxInstBufferSize = Param.Unsigned("Maximum instruction buffer size") + maxOutstandingMemOps = Param.Unsigned("Maximum number of outstanding memory operations") + decodeToFetchDelay = Param.Unsigned("Decode to fetch delay") + renameToFetchDelay = Param.Unsigned("Rename to fetch delay") + iewToFetchDelay = Param.Unsigned("Issue/Execute/Writeback to fetch " + "delay") + commitToFetchDelay = Param.Unsigned("Commit to fetch delay") + fetchWidth = Param.Unsigned("Fetch width") + + renameToDecodeDelay = Param.Unsigned("Rename to decode delay") + iewToDecodeDelay = Param.Unsigned("Issue/Execute/Writeback to decode " + "delay") + commitToDecodeDelay = Param.Unsigned("Commit to decode delay") + fetchToDecodeDelay = Param.Unsigned("Fetch to decode delay") + decodeWidth = Param.Unsigned("Decode width") + + iewToRenameDelay = Param.Unsigned("Issue/Execute/Writeback to rename " + "delay") + commitToRenameDelay = Param.Unsigned("Commit to rename delay") + decodeToRenameDelay = Param.Unsigned("Decode to rename delay") + renameWidth = Param.Unsigned("Rename width") + + commitToIEWDelay = Param.Unsigned("Commit to " + "Issue/Execute/Writeback delay") + renameToIEWDelay = Param.Unsigned("Rename to " + "Issue/Execute/Writeback delay") + issueToExecuteDelay = Param.Unsigned("Issue to execute delay (internal " + "to the IEW stage)") + issueWidth = Param.Unsigned("Issue width") + executeWidth = Param.Unsigned("Execute width") + executeIntWidth = Param.Unsigned("Integer execute width") + executeFloatWidth = Param.Unsigned("Floating point execute width") + executeBranchWidth = Param.Unsigned("Branch execute width") + executeMemoryWidth = Param.Unsigned("Memory execute width") + + iewToCommitDelay = Param.Unsigned("Issue/Execute/Writeback to commit " + "delay") + renameToROBDelay = Param.Unsigned("Rename to reorder buffer delay") + commitWidth = Param.Unsigned("Commit width") + squashWidth = Param.Unsigned("Squash width") + ++ predType = Param.String("Type of branch predictor ('local', 'tournament')") + localPredictorSize = Param.Unsigned("Size of local predictor") + localCtrBits = Param.Unsigned("Bits per counter") + localHistoryTableSize = Param.Unsigned("Size of local history table") + localHistoryBits = Param.Unsigned("Bits for the local history") + globalPredictorSize = Param.Unsigned("Size of global predictor") + globalCtrBits = Param.Unsigned("Bits per counter") + globalHistoryBits = Param.Unsigned("Bits of history") + choicePredictorSize = Param.Unsigned("Size of choice predictor") + choiceCtrBits = Param.Unsigned("Bits of choice counters") + + BTBEntries = Param.Unsigned("Number of BTB entries") + BTBTagSize = Param.Unsigned("Size of the BTB tags, in bits") + + RASSize = Param.Unsigned("RAS size") + + LQEntries = Param.Unsigned("Number of load queue entries") + SQEntries = Param.Unsigned("Number of store queue entries") + LFSTSize = Param.Unsigned("Last fetched store table size") + SSITSize = Param.Unsigned("Store set ID table size") + + numPhysIntRegs = Param.Unsigned("Number of physical integer registers") + numPhysFloatRegs = Param.Unsigned("Number of physical floating point " + "registers") + numIQEntries = Param.Unsigned("Number of instruction queue entries") + numROBEntries = Param.Unsigned("Number of reorder buffer entries") + + instShiftAmt = Param.Unsigned("Number of bits to shift instructions by") + + function_trace = Param.Bool(False, "Enable function trace") + function_trace_start = Param.Tick(0, "Cycle to start function trace")