2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Steve Reinhardt
31 #include "arch/locked_mem.hh"
32 #include "arch/mmaped_ipr.hh"
33 #include "arch/utility.hh"
34 #include "base/bigint.hh"
35 #include "cpu/exetrace.hh"
36 #include "cpu/simple/timing.hh"
37 #include "mem/packet.hh"
38 #include "mem/packet_access.hh"
39 #include "params/TimingSimpleCPU.hh"
40 #include "sim/system.hh"
43 using namespace TheISA
;
46 TimingSimpleCPU::getPort(const std::string
&if_name
, int idx
)
48 if (if_name
== "dcache_port")
50 else if (if_name
== "icache_port")
53 panic("No Such Port\n");
57 TimingSimpleCPU::init()
61 for (int i
= 0; i
< threadContexts
.size(); ++i
) {
62 ThreadContext
*tc
= threadContexts
[i
];
64 // initialize CPU, including PC
65 TheISA::initCPU(tc
, tc
->readCpuId());
71 TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt
)
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
78 TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt
)
80 //No internal storage to update, jusst return
85 TimingSimpleCPU::CpuPort::recvStatusChange(Status status
)
87 if (status
== RangeChange
) {
88 if (!snoopRangeSent
) {
89 snoopRangeSent
= true;
90 sendStatusChange(Port::RangeChange
);
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
100 TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt
, Tick t
)
106 TimingSimpleCPU::TimingSimpleCPU(Params
*p
)
107 : BaseSimpleCPU(p
), icachePort(this, p
->clock
), dcachePort(this, p
->clock
),
112 icachePort
.snoopRangeSent
= false;
113 dcachePort
.snoopRangeSent
= false;
115 ifetch_pkt
= dcache_pkt
= NULL
;
119 changeState(SimObject::Running
);
123 TimingSimpleCPU::~TimingSimpleCPU()
128 TimingSimpleCPU::serialize(ostream
&os
)
130 SimObject::State so_state
= SimObject::getState();
131 SERIALIZE_ENUM(so_state
);
132 BaseSimpleCPU::serialize(os
);
136 TimingSimpleCPU::unserialize(Checkpoint
*cp
, const string
§ion
)
138 SimObject::State so_state
;
139 UNSERIALIZE_ENUM(so_state
);
140 BaseSimpleCPU::unserialize(cp
, section
);
144 TimingSimpleCPU::drain(Event
*drain_event
)
146 // TimingSimpleCPU is ready to drain if it's not waiting for
147 // an access to complete.
148 if (status() == Idle
|| status() == Running
|| status() == SwitchedOut
) {
149 changeState(SimObject::Drained
);
152 changeState(SimObject::Draining
);
153 drainEvent
= drain_event
;
159 TimingSimpleCPU::resume()
161 if (_status
!= SwitchedOut
&& _status
!= Idle
) {
162 assert(system
->getMemoryMode() == Enums::timing
);
164 // Delete the old event if it existed.
166 if (fetchEvent
->scheduled())
167 fetchEvent
->deschedule();
172 fetchEvent
= new FetchEvent(this, nextCycle());
175 changeState(SimObject::Running
);
179 TimingSimpleCPU::switchOut()
181 assert(status() == Running
|| status() == Idle
);
182 _status
= SwitchedOut
;
183 numCycles
+= tickToCycles(curTick
- previousTick
);
185 // If we've been scheduled to resume but are then told to switch out,
186 // we'll need to cancel it.
187 if (fetchEvent
&& fetchEvent
->scheduled())
188 fetchEvent
->deschedule();
193 TimingSimpleCPU::takeOverFrom(BaseCPU
*oldCPU
)
195 BaseCPU::takeOverFrom(oldCPU
, &icachePort
, &dcachePort
);
197 // if any of this CPU's ThreadContexts are active, mark the CPU as
198 // running and schedule its tick event.
199 for (int i
= 0; i
< threadContexts
.size(); ++i
) {
200 ThreadContext
*tc
= threadContexts
[i
];
201 if (tc
->status() == ThreadContext::Active
&& _status
!= Running
) {
207 if (_status
!= Running
) {
210 previousTick
= curTick
;
215 TimingSimpleCPU::activateContext(int thread_num
, int delay
)
217 assert(thread_num
== 0);
220 assert(_status
== Idle
);
225 // kick things off by initiating the fetch of the next instruction
226 fetchEvent
= new FetchEvent(this, nextCycle(curTick
+ ticks(delay
)));
231 TimingSimpleCPU::suspendContext(int thread_num
)
233 assert(thread_num
== 0);
236 assert(_status
== Running
);
238 // just change status to Idle... if status != Running,
239 // completeInst() will not initiate fetch of next instruction.
248 TimingSimpleCPU::read(Addr addr
, T
&data
, unsigned flags
)
251 new Request(/* asid */ 0, addr
, sizeof(T
), flags
, thread
->readPC(),
252 cpu_id
, /* thread ID */ 0);
255 traceData
->setAddr(req
->getVaddr());
258 // translate to physical address
259 Fault fault
= thread
->translateDataReadReq(req
);
261 // Now do the access.
262 if (fault
== NoFault
) {
266 MemCmd::LoadLockedReq
: MemCmd::ReadReq
),
268 pkt
->dataDynamic
<T
>(new T
);
270 if (req
->isMmapedIpr()) {
272 delay
= TheISA::handleIprRead(thread
->getTC(), pkt
);
273 new IprEvent(pkt
, this, nextCycle(curTick
+ delay
));
274 _status
= DcacheWaitResponse
;
276 } else if (!dcachePort
.sendTiming(pkt
)) {
277 _status
= DcacheRetry
;
280 _status
= DcacheWaitResponse
;
281 // memory system takes ownership of packet
285 // This will need a new way to tell if it has a dcache attached.
286 if (req
->isUncacheable())
287 recordEvent("Uncached Read");
295 #ifndef DOXYGEN_SHOULD_SKIP_THIS
299 TimingSimpleCPU::read(Addr addr
, Twin64_t
&data
, unsigned flags
);
303 TimingSimpleCPU::read(Addr addr
, Twin32_t
&data
, unsigned flags
);
307 TimingSimpleCPU::read(Addr addr
, uint64_t &data
, unsigned flags
);
311 TimingSimpleCPU::read(Addr addr
, uint32_t &data
, unsigned flags
);
315 TimingSimpleCPU::read(Addr addr
, uint16_t &data
, unsigned flags
);
319 TimingSimpleCPU::read(Addr addr
, uint8_t &data
, unsigned flags
);
321 #endif //DOXYGEN_SHOULD_SKIP_THIS
325 TimingSimpleCPU::read(Addr addr
, double &data
, unsigned flags
)
327 return read(addr
, *(uint64_t*)&data
, flags
);
332 TimingSimpleCPU::read(Addr addr
, float &data
, unsigned flags
)
334 return read(addr
, *(uint32_t*)&data
, flags
);
340 TimingSimpleCPU::read(Addr addr
, int32_t &data
, unsigned flags
)
342 return read(addr
, (uint32_t&)data
, flags
);
348 TimingSimpleCPU::write(T data
, Addr addr
, unsigned flags
, uint64_t *res
)
351 new Request(/* asid */ 0, addr
, sizeof(T
), flags
, thread
->readPC(),
352 cpu_id
, /* thread ID */ 0);
355 traceData
->setAddr(req
->getVaddr());
358 // translate to physical address
359 Fault fault
= thread
->translateDataWriteReq(req
);
361 // Now do the access.
362 if (fault
== NoFault
) {
363 MemCmd cmd
= MemCmd::WriteReq
; // default
364 bool do_access
= true; // flag to suppress cache access
366 if (req
->isLocked()) {
367 cmd
= MemCmd::StoreCondReq
;
368 do_access
= TheISA::handleLockedWrite(thread
, req
);
369 } else if (req
->isSwap()) {
370 cmd
= MemCmd::SwapReq
;
371 if (req
->isCondSwap()) {
373 req
->setExtraData(*res
);
377 // Note: need to allocate dcache_pkt even if do_access is
378 // false, as it's used unconditionally to call completeAcc().
379 assert(dcache_pkt
== NULL
);
380 dcache_pkt
= new Packet(req
, cmd
, Packet::Broadcast
);
381 dcache_pkt
->allocate();
382 dcache_pkt
->set(data
);
385 if (req
->isMmapedIpr()) {
387 dcache_pkt
->set(htog(data
));
388 delay
= TheISA::handleIprWrite(thread
->getTC(), dcache_pkt
);
389 new IprEvent(dcache_pkt
, this, nextCycle(curTick
+ delay
));
390 _status
= DcacheWaitResponse
;
392 } else if (!dcachePort
.sendTiming(dcache_pkt
)) {
393 _status
= DcacheRetry
;
395 _status
= DcacheWaitResponse
;
396 // memory system takes ownership of packet
400 // This will need a new way to tell if it's hooked up to a cache or not.
401 if (req
->isUncacheable())
402 recordEvent("Uncached Write");
408 // If the write needs to have a fault on the access, consider calling
409 // changeStatus() and changing it to "bad addr write" or something.
414 #ifndef DOXYGEN_SHOULD_SKIP_THIS
417 TimingSimpleCPU::write(Twin32_t data
, Addr addr
,
418 unsigned flags
, uint64_t *res
);
422 TimingSimpleCPU::write(Twin64_t data
, Addr addr
,
423 unsigned flags
, uint64_t *res
);
427 TimingSimpleCPU::write(uint64_t data
, Addr addr
,
428 unsigned flags
, uint64_t *res
);
432 TimingSimpleCPU::write(uint32_t data
, Addr addr
,
433 unsigned flags
, uint64_t *res
);
437 TimingSimpleCPU::write(uint16_t data
, Addr addr
,
438 unsigned flags
, uint64_t *res
);
442 TimingSimpleCPU::write(uint8_t data
, Addr addr
,
443 unsigned flags
, uint64_t *res
);
445 #endif //DOXYGEN_SHOULD_SKIP_THIS
449 TimingSimpleCPU::write(double data
, Addr addr
, unsigned flags
, uint64_t *res
)
451 return write(*(uint64_t*)&data
, addr
, flags
, res
);
456 TimingSimpleCPU::write(float data
, Addr addr
, unsigned flags
, uint64_t *res
)
458 return write(*(uint32_t*)&data
, addr
, flags
, res
);
464 TimingSimpleCPU::write(int32_t data
, Addr addr
, unsigned flags
, uint64_t *res
)
466 return write((uint32_t)data
, addr
, flags
, res
);
471 TimingSimpleCPU::fetch()
473 if (!curStaticInst
|| !curStaticInst
->isDelayedCommit())
474 checkForInterrupts();
476 Request
*ifetch_req
= new Request();
477 ifetch_req
->setThreadContext(cpu_id
, /* thread ID */ 0);
478 Fault fault
= setupFetchRequest(ifetch_req
);
480 ifetch_pkt
= new Packet(ifetch_req
, MemCmd::ReadReq
, Packet::Broadcast
);
481 ifetch_pkt
->dataStatic(&inst
);
483 if (fault
== NoFault
) {
484 if (!icachePort
.sendTiming(ifetch_pkt
)) {
485 // Need to wait for retry
486 _status
= IcacheRetry
;
488 // Need to wait for cache to respond
489 _status
= IcacheWaitResponse
;
490 // ownership of packet transferred to memory system
496 // fetch fault: advance directly to next instruction (fault handler)
500 numCycles
+= tickToCycles(curTick
- previousTick
);
501 previousTick
= curTick
;
506 TimingSimpleCPU::advanceInst(Fault fault
)
510 if (_status
== Running
) {
511 // kick off fetch of next instruction... callback from icache
512 // response will cause that instruction to be executed,
513 // keeping the CPU running.
520 TimingSimpleCPU::completeIfetch(PacketPtr pkt
)
522 // received a response from the icache: execute the received
524 assert(!pkt
->isError());
525 assert(_status
== IcacheWaitResponse
);
529 numCycles
+= tickToCycles(curTick
- previousTick
);
530 previousTick
= curTick
;
532 if (getState() == SimObject::Draining
) {
541 if (curStaticInst
->isMemRef() && !curStaticInst
->isDataPrefetch()) {
542 // load or store: just send to dcache
543 Fault fault
= curStaticInst
->initiateAcc(this, traceData
);
544 if (_status
!= Running
) {
545 // instruction will complete in dcache response callback
546 assert(_status
== DcacheWaitResponse
|| _status
== DcacheRetry
);
547 assert(fault
== NoFault
);
549 if (fault
== NoFault
) {
550 // early fail on store conditional: complete now
551 assert(dcache_pkt
!= NULL
);
552 fault
= curStaticInst
->completeAcc(dcache_pkt
, this,
554 delete dcache_pkt
->req
;
558 // keep an instruction count
559 if (fault
== NoFault
)
561 } else if (traceData
) {
562 // If there was a fault, we shouldn't trace this instruction.
568 // @todo remove me after debugging with legion done
569 if (curStaticInst
&& (!curStaticInst
->isMicroop() ||
570 curStaticInst
->isFirstMicroop()))
575 // non-memory instruction: execute completely now
576 Fault fault
= curStaticInst
->execute(this, traceData
);
578 // keep an instruction count
579 if (fault
== NoFault
)
581 else if (traceData
) {
582 // If there was a fault, we shouldn't trace this instruction.
588 // @todo remove me after debugging with legion done
589 if (curStaticInst
&& (!curStaticInst
->isMicroop() ||
590 curStaticInst
->isFirstMicroop()))
600 TimingSimpleCPU::IcachePort::ITickEvent::process()
602 cpu
->completeIfetch(pkt
);
606 TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt
)
608 if (pkt
->isResponse() && !pkt
->wasNacked()) {
609 // delay processing of returned data until next CPU clock edge
610 Tick next_tick
= cpu
->nextCycle(curTick
);
612 if (next_tick
== curTick
)
613 cpu
->completeIfetch(pkt
);
615 tickEvent
.schedule(pkt
, next_tick
);
619 else if (pkt
->wasNacked()) {
620 assert(cpu
->_status
== IcacheWaitResponse
);
622 if (!sendTiming(pkt
)) {
623 cpu
->_status
= IcacheRetry
;
624 cpu
->ifetch_pkt
= pkt
;
627 //Snooping a Coherence Request, do nothing
632 TimingSimpleCPU::IcachePort::recvRetry()
634 // we shouldn't get a retry unless we have a packet that we're
635 // waiting to transmit
636 assert(cpu
->ifetch_pkt
!= NULL
);
637 assert(cpu
->_status
== IcacheRetry
);
638 PacketPtr tmp
= cpu
->ifetch_pkt
;
639 if (sendTiming(tmp
)) {
640 cpu
->_status
= IcacheWaitResponse
;
641 cpu
->ifetch_pkt
= NULL
;
646 TimingSimpleCPU::completeDataAccess(PacketPtr pkt
)
648 // received a response from the dcache: complete the load or store
650 assert(!pkt
->isError());
651 assert(_status
== DcacheWaitResponse
);
654 numCycles
+= tickToCycles(curTick
- previousTick
);
655 previousTick
= curTick
;
657 Fault fault
= curStaticInst
->completeAcc(pkt
, this, traceData
);
659 // keep an instruction count
660 if (fault
== NoFault
)
662 else if (traceData
) {
663 // If there was a fault, we shouldn't trace this instruction.
668 if (pkt
->isRead() && pkt
->isLocked()) {
669 TheISA::handleLockedRead(thread
, pkt
->req
);
677 if (getState() == SimObject::Draining
) {
689 TimingSimpleCPU::completeDrain()
691 DPRINTF(Config
, "Done draining\n");
692 changeState(SimObject::Drained
);
693 drainEvent
->process();
697 TimingSimpleCPU::DcachePort::setPeer(Port
*port
)
702 // Update the ThreadContext's memory ports (Functional/Virtual
704 cpu
->tcBase()->connectMemPorts();
709 TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt
)
711 if (pkt
->isResponse() && !pkt
->wasNacked()) {
712 // delay processing of returned data until next CPU clock edge
713 Tick next_tick
= cpu
->nextCycle(curTick
);
715 if (next_tick
== curTick
)
716 cpu
->completeDataAccess(pkt
);
718 tickEvent
.schedule(pkt
, next_tick
);
722 else if (pkt
->wasNacked()) {
723 assert(cpu
->_status
== DcacheWaitResponse
);
725 if (!sendTiming(pkt
)) {
726 cpu
->_status
= DcacheRetry
;
727 cpu
->dcache_pkt
= pkt
;
730 //Snooping a Coherence Request, do nothing
735 TimingSimpleCPU::DcachePort::DTickEvent::process()
737 cpu
->completeDataAccess(pkt
);
741 TimingSimpleCPU::DcachePort::recvRetry()
743 // we shouldn't get a retry unless we have a packet that we're
744 // waiting to transmit
745 assert(cpu
->dcache_pkt
!= NULL
);
746 assert(cpu
->_status
== DcacheRetry
);
747 PacketPtr tmp
= cpu
->dcache_pkt
;
748 if (sendTiming(tmp
)) {
749 cpu
->_status
= DcacheWaitResponse
;
750 // memory system takes ownership of packet
751 cpu
->dcache_pkt
= NULL
;
755 TimingSimpleCPU::IprEvent::IprEvent(Packet
*_pkt
, TimingSimpleCPU
*_cpu
, Tick t
)
756 : Event(&mainEventQueue
), pkt(_pkt
), cpu(_cpu
)
762 TimingSimpleCPU::IprEvent::process()
764 cpu
->completeDataAccess(pkt
);
768 TimingSimpleCPU::IprEvent::description()
770 return "Timing Simple CPU Delay IPR event";
774 ////////////////////////////////////////////////////////////////////////
776 // TimingSimpleCPU Simulation Object
779 TimingSimpleCPUParams::create()
781 TimingSimpleCPU::Params
*params
= new TimingSimpleCPU::Params();
783 params
->numberOfThreads
= 1;
784 params
->max_insts_any_thread
= max_insts_any_thread
;
785 params
->max_insts_all_threads
= max_insts_all_threads
;
786 params
->max_loads_any_thread
= max_loads_any_thread
;
787 params
->max_loads_all_threads
= max_loads_all_threads
;
788 params
->progress_interval
= progress_interval
;
789 params
->deferRegistration
= defer_registration
;
790 params
->clock
= clock
;
791 params
->phase
= phase
;
792 params
->functionTrace
= function_trace
;
793 params
->functionTraceStart
= function_trace_start
;
794 params
->system
= system
;
795 params
->cpu_id
= cpu_id
;
796 params
->tracer
= tracer
;
801 params
->profile
= profile
;
802 params
->do_quiesce
= do_quiesce
;
803 params
->do_checkpoint_insts
= do_checkpoint_insts
;
804 params
->do_statistics_insts
= do_statistics_insts
;
806 if (workload
.size() != 1)
807 panic("only one workload allowed");
808 params
->process
= workload
[0];
811 TimingSimpleCPU
*cpu
= new TimingSimpleCPU(params
);