2 * Copyright (c) 2010-2012 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Steve Reinhardt
43 #include "arch/locked_mem.hh"
44 #include "arch/mmapped_ipr.hh"
45 #include "arch/utility.hh"
46 #include "base/bigint.hh"
47 #include "config/the_isa.hh"
48 #include "cpu/simple/timing.hh"
49 #include "cpu/exetrace.hh"
50 #include "debug/Config.hh"
51 #include "debug/Drain.hh"
52 #include "debug/ExecFaulting.hh"
53 #include "debug/SimpleCPU.hh"
54 #include "mem/packet.hh"
55 #include "mem/packet_access.hh"
56 #include "params/TimingSimpleCPU.hh"
57 #include "sim/faults.hh"
58 #include "sim/full_system.hh"
59 #include "sim/system.hh"
62 using namespace TheISA
;
65 TimingSimpleCPU::init()
69 if (!params()->switched_out
&&
70 system
->getMemoryMode() != Enums::timing
) {
71 fatal("The timing CPU requires the memory system to be in "
75 // Initialise the ThreadContext's memory proxies
76 tcBase()->initMemProxies(tcBase());
78 if (FullSystem
&& !params()->switched_out
) {
79 for (int i
= 0; i
< threadContexts
.size(); ++i
) {
80 ThreadContext
*tc
= threadContexts
[i
];
81 // initialize CPU, including PC
82 TheISA::initCPU(tc
, _cpuId
);
88 TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt
, Tick t
)
91 cpu
->schedule(this, t
);
94 TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams
*p
)
95 : BaseSimpleCPU(p
), fetchTranslation(this), icachePort(this),
96 dcachePort(this), ifetch_pkt(NULL
), dcache_pkt(NULL
), previousCycle(0),
97 fetchEvent(this), drainManager(NULL
)
101 system
->totalNumInsts
= 0;
105 TimingSimpleCPU::~TimingSimpleCPU()
110 TimingSimpleCPU::drain(DrainManager
*drain_manager
)
112 assert(!drainManager
);
116 if (_status
== Idle
||
117 (_status
== BaseSimpleCPU::Running
&& isDrained())) {
118 assert(!fetchEvent
.scheduled());
119 DPRINTF(Drain
, "No need to drain.\n");
122 drainManager
= drain_manager
;
123 DPRINTF(Drain
, "Requesting drain: %s\n", pcState());
125 // The fetch event can become descheduled if a drain didn't
126 // succeed on the first attempt. We need to reschedule it if
127 // the CPU is waiting for a microcode routine to complete.
128 if (_status
== BaseSimpleCPU::Running
&& !fetchEvent
.scheduled())
129 schedule(fetchEvent
, nextCycle());
136 TimingSimpleCPU::drainResume()
138 assert(!fetchEvent
.scheduled());
139 assert(!drainManager
);
143 DPRINTF(SimpleCPU
, "Resume\n");
144 if (system
->getMemoryMode() != Enums::timing
) {
145 fatal("The timing CPU requires the memory system to be in "
149 assert(!threadContexts
.empty());
150 if (threadContexts
.size() > 1)
151 fatal("The timing CPU only supports one thread.\n");
153 if (thread
->status() == ThreadContext::Active
) {
154 schedule(fetchEvent
, nextCycle());
155 _status
= BaseSimpleCPU::Running
;
157 _status
= BaseSimpleCPU::Idle
;
162 TimingSimpleCPU::tryCompleteDrain()
167 DPRINTF(Drain
, "tryCompleteDrain: %s\n", pcState());
171 DPRINTF(Drain
, "CPU done draining, processing drain event\n");
172 drainManager
->signalDrainDone();
179 TimingSimpleCPU::switchOut()
181 BaseSimpleCPU::switchOut();
183 assert(!fetchEvent
.scheduled());
184 assert(_status
== BaseSimpleCPU::Running
|| _status
== Idle
);
186 assert(microPC() == 0);
188 numCycles
+= curCycle() - previousCycle
;
193 TimingSimpleCPU::takeOverFrom(BaseCPU
*oldCPU
)
195 BaseSimpleCPU::takeOverFrom(oldCPU
);
197 previousCycle
= curCycle();
202 TimingSimpleCPU::activateContext(ThreadID thread_num
, Cycles delay
)
204 DPRINTF(SimpleCPU
, "ActivateContext %d (%d cycles)\n", thread_num
, delay
);
206 assert(thread_num
== 0);
209 assert(_status
== Idle
);
212 _status
= BaseSimpleCPU::Running
;
214 // kick things off by initiating the fetch of the next instruction
215 schedule(fetchEvent
, clockEdge(delay
));
220 TimingSimpleCPU::suspendContext(ThreadID thread_num
)
222 DPRINTF(SimpleCPU
, "SuspendContext %d\n", thread_num
);
224 assert(thread_num
== 0);
230 assert(_status
== BaseSimpleCPU::Running
);
232 // just change status to Idle... if status != Running,
233 // completeInst() will not initiate fetch of next instruction.
240 TimingSimpleCPU::handleReadPacket(PacketPtr pkt
)
242 RequestPtr req
= pkt
->req
;
243 if (req
->isMmappedIpr()) {
244 Cycles delay
= TheISA::handleIprRead(thread
->getTC(), pkt
);
245 new IprEvent(pkt
, this, clockEdge(delay
));
246 _status
= DcacheWaitResponse
;
248 } else if (!dcachePort
.sendTimingReq(pkt
)) {
249 _status
= DcacheRetry
;
252 _status
= DcacheWaitResponse
;
253 // memory system takes ownership of packet
256 return dcache_pkt
== NULL
;
260 TimingSimpleCPU::sendData(RequestPtr req
, uint8_t *data
, uint64_t *res
,
264 buildPacket(pkt
, req
, read
);
265 pkt
->dataDynamicArray
<uint8_t>(data
);
266 if (req
->getFlags().isSet(Request::NO_ACCESS
)) {
269 completeDataAccess(pkt
);
271 handleReadPacket(pkt
);
273 bool do_access
= true; // flag to suppress cache access
276 do_access
= TheISA::handleLockedWrite(thread
, req
);
277 } else if (req
->isCondSwap()) {
279 req
->setExtraData(*res
);
286 _status
= DcacheWaitResponse
;
287 completeDataAccess(pkt
);
293 TimingSimpleCPU::sendSplitData(RequestPtr req1
, RequestPtr req2
,
294 RequestPtr req
, uint8_t *data
, bool read
)
296 PacketPtr pkt1
, pkt2
;
297 buildSplitPacket(pkt1
, pkt2
, req1
, req2
, req
, data
, read
);
298 if (req
->getFlags().isSet(Request::NO_ACCESS
)) {
300 pkt1
->makeResponse();
301 completeDataAccess(pkt1
);
303 SplitFragmentSenderState
* send_state
=
304 dynamic_cast<SplitFragmentSenderState
*>(pkt1
->senderState
);
305 if (handleReadPacket(pkt1
)) {
306 send_state
->clearFromParent();
307 send_state
= dynamic_cast<SplitFragmentSenderState
*>(
309 if (handleReadPacket(pkt2
)) {
310 send_state
->clearFromParent();
315 SplitFragmentSenderState
* send_state
=
316 dynamic_cast<SplitFragmentSenderState
*>(pkt1
->senderState
);
317 if (handleWritePacket()) {
318 send_state
->clearFromParent();
320 send_state
= dynamic_cast<SplitFragmentSenderState
*>(
322 if (handleWritePacket()) {
323 send_state
->clearFromParent();
330 TimingSimpleCPU::translationFault(Fault fault
)
332 // fault may be NoFault in cases where a fault is suppressed,
333 // for instance prefetches.
334 numCycles
+= curCycle() - previousCycle
;
335 previousCycle
= curCycle();
338 // Since there was a fault, we shouldn't trace this instruction.
349 TimingSimpleCPU::buildPacket(PacketPtr
&pkt
, RequestPtr req
, bool read
)
353 cmd
= MemCmd::ReadReq
;
355 cmd
= MemCmd::LoadLockedReq
;
357 cmd
= MemCmd::WriteReq
;
359 cmd
= MemCmd::StoreCondReq
;
360 } else if (req
->isSwap()) {
361 cmd
= MemCmd::SwapReq
;
364 pkt
= new Packet(req
, cmd
);
368 TimingSimpleCPU::buildSplitPacket(PacketPtr
&pkt1
, PacketPtr
&pkt2
,
369 RequestPtr req1
, RequestPtr req2
, RequestPtr req
,
370 uint8_t *data
, bool read
)
374 assert(!req1
->isMmappedIpr() && !req2
->isMmappedIpr());
376 if (req
->getFlags().isSet(Request::NO_ACCESS
)) {
377 buildPacket(pkt1
, req
, read
);
381 buildPacket(pkt1
, req1
, read
);
382 buildPacket(pkt2
, req2
, read
);
384 req
->setPhys(req1
->getPaddr(), req
->getSize(), req1
->getFlags(), dataMasterId());
385 PacketPtr pkt
= new Packet(req
, pkt1
->cmd
.responseCommand());
387 pkt
->dataDynamicArray
<uint8_t>(data
);
388 pkt1
->dataStatic
<uint8_t>(data
);
389 pkt2
->dataStatic
<uint8_t>(data
+ req1
->getSize());
391 SplitMainSenderState
* main_send_state
= new SplitMainSenderState
;
392 pkt
->senderState
= main_send_state
;
393 main_send_state
->fragments
[0] = pkt1
;
394 main_send_state
->fragments
[1] = pkt2
;
395 main_send_state
->outstanding
= 2;
396 pkt1
->senderState
= new SplitFragmentSenderState(pkt
, 0);
397 pkt2
->senderState
= new SplitFragmentSenderState(pkt
, 1);
401 TimingSimpleCPU::readMem(Addr addr
, uint8_t *data
,
402 unsigned size
, unsigned flags
)
406 const ThreadID tid
= 0;
407 const Addr pc
= thread
->instAddr();
408 unsigned block_size
= dcachePort
.peerBlockSize();
409 BaseTLB::Mode mode
= BaseTLB::Read
;
412 traceData
->setAddr(addr
);
415 RequestPtr req
= new Request(asid
, addr
, size
,
416 flags
, dataMasterId(), pc
, _cpuId
, tid
);
418 Addr split_addr
= roundDown(addr
+ size
- 1, block_size
);
419 assert(split_addr
<= addr
|| split_addr
- addr
< block_size
);
421 _status
= DTBWaitResponse
;
422 if (split_addr
> addr
) {
423 RequestPtr req1
, req2
;
424 assert(!req
->isLLSC() && !req
->isSwap());
425 req
->splitOnVaddr(split_addr
, req1
, req2
);
427 WholeTranslationState
*state
=
428 new WholeTranslationState(req
, req1
, req2
, new uint8_t[size
],
430 DataTranslation
<TimingSimpleCPU
*> *trans1
=
431 new DataTranslation
<TimingSimpleCPU
*>(this, state
, 0);
432 DataTranslation
<TimingSimpleCPU
*> *trans2
=
433 new DataTranslation
<TimingSimpleCPU
*>(this, state
, 1);
435 thread
->dtb
->translateTiming(req1
, tc
, trans1
, mode
);
436 thread
->dtb
->translateTiming(req2
, tc
, trans2
, mode
);
438 WholeTranslationState
*state
=
439 new WholeTranslationState(req
, new uint8_t[size
], NULL
, mode
);
440 DataTranslation
<TimingSimpleCPU
*> *translation
441 = new DataTranslation
<TimingSimpleCPU
*>(this, state
);
442 thread
->dtb
->translateTiming(req
, tc
, translation
, mode
);
449 TimingSimpleCPU::handleWritePacket()
451 RequestPtr req
= dcache_pkt
->req
;
452 if (req
->isMmappedIpr()) {
453 Cycles delay
= TheISA::handleIprWrite(thread
->getTC(), dcache_pkt
);
454 new IprEvent(dcache_pkt
, this, clockEdge(delay
));
455 _status
= DcacheWaitResponse
;
457 } else if (!dcachePort
.sendTimingReq(dcache_pkt
)) {
458 _status
= DcacheRetry
;
460 _status
= DcacheWaitResponse
;
461 // memory system takes ownership of packet
464 return dcache_pkt
== NULL
;
468 TimingSimpleCPU::writeMem(uint8_t *data
, unsigned size
,
469 Addr addr
, unsigned flags
, uint64_t *res
)
471 uint8_t *newData
= new uint8_t[size
];
472 memcpy(newData
, data
, size
);
475 const ThreadID tid
= 0;
476 const Addr pc
= thread
->instAddr();
477 unsigned block_size
= dcachePort
.peerBlockSize();
478 BaseTLB::Mode mode
= BaseTLB::Write
;
481 traceData
->setAddr(addr
);
484 RequestPtr req
= new Request(asid
, addr
, size
,
485 flags
, dataMasterId(), pc
, _cpuId
, tid
);
487 Addr split_addr
= roundDown(addr
+ size
- 1, block_size
);
488 assert(split_addr
<= addr
|| split_addr
- addr
< block_size
);
490 _status
= DTBWaitResponse
;
491 if (split_addr
> addr
) {
492 RequestPtr req1
, req2
;
493 assert(!req
->isLLSC() && !req
->isSwap());
494 req
->splitOnVaddr(split_addr
, req1
, req2
);
496 WholeTranslationState
*state
=
497 new WholeTranslationState(req
, req1
, req2
, newData
, res
, mode
);
498 DataTranslation
<TimingSimpleCPU
*> *trans1
=
499 new DataTranslation
<TimingSimpleCPU
*>(this, state
, 0);
500 DataTranslation
<TimingSimpleCPU
*> *trans2
=
501 new DataTranslation
<TimingSimpleCPU
*>(this, state
, 1);
503 thread
->dtb
->translateTiming(req1
, tc
, trans1
, mode
);
504 thread
->dtb
->translateTiming(req2
, tc
, trans2
, mode
);
506 WholeTranslationState
*state
=
507 new WholeTranslationState(req
, newData
, res
, mode
);
508 DataTranslation
<TimingSimpleCPU
*> *translation
=
509 new DataTranslation
<TimingSimpleCPU
*>(this, state
);
510 thread
->dtb
->translateTiming(req
, tc
, translation
, mode
);
513 // Translation faults will be returned via finishTranslation()
519 TimingSimpleCPU::finishTranslation(WholeTranslationState
*state
)
521 _status
= BaseSimpleCPU::Running
;
523 if (state
->getFault() != NoFault
) {
524 if (state
->isPrefetch()) {
527 delete [] state
->data
;
529 translationFault(state
->getFault());
531 if (!state
->isSplit
) {
532 sendData(state
->mainReq
, state
->data
, state
->res
,
533 state
->mode
== BaseTLB::Read
);
535 sendSplitData(state
->sreqLow
, state
->sreqHigh
, state
->mainReq
,
536 state
->data
, state
->mode
== BaseTLB::Read
);
545 TimingSimpleCPU::fetch()
547 DPRINTF(SimpleCPU
, "Fetch\n");
549 if (!curStaticInst
|| !curStaticInst
->isDelayedCommit())
550 checkForInterrupts();
554 // We must have just got suspended by a PC event
558 TheISA::PCState pcState
= thread
->pcState();
559 bool needToFetch
= !isRomMicroPC(pcState
.microPC()) && !curMacroStaticInst
;
562 _status
= BaseSimpleCPU::Running
;
563 Request
*ifetch_req
= new Request();
564 ifetch_req
->setThreadContext(_cpuId
, /* thread ID */ 0);
565 setupFetchRequest(ifetch_req
);
566 DPRINTF(SimpleCPU
, "Translating address %#x\n", ifetch_req
->getVaddr());
567 thread
->itb
->translateTiming(ifetch_req
, tc
, &fetchTranslation
,
570 _status
= IcacheWaitResponse
;
571 completeIfetch(NULL
);
573 numCycles
+= curCycle() - previousCycle
;
574 previousCycle
= curCycle();
580 TimingSimpleCPU::sendFetch(Fault fault
, RequestPtr req
, ThreadContext
*tc
)
582 if (fault
== NoFault
) {
583 DPRINTF(SimpleCPU
, "Sending fetch for addr %#x(pa: %#x)\n",
584 req
->getVaddr(), req
->getPaddr());
585 ifetch_pkt
= new Packet(req
, MemCmd::ReadReq
);
586 ifetch_pkt
->dataStatic(&inst
);
587 DPRINTF(SimpleCPU
, " -- pkt addr: %#x\n", ifetch_pkt
->getAddr());
589 if (!icachePort
.sendTimingReq(ifetch_pkt
)) {
590 // Need to wait for retry
591 _status
= IcacheRetry
;
593 // Need to wait for cache to respond
594 _status
= IcacheWaitResponse
;
595 // ownership of packet transferred to memory system
599 DPRINTF(SimpleCPU
, "Translation of addr %#x faulted\n", req
->getVaddr());
601 // fetch fault: advance directly to next instruction (fault handler)
602 _status
= BaseSimpleCPU::Running
;
606 numCycles
+= curCycle() - previousCycle
;
607 previousCycle
= curCycle();
612 TimingSimpleCPU::advanceInst(Fault fault
)
614 if (_status
== Faulting
)
617 if (fault
!= NoFault
) {
619 DPRINTF(SimpleCPU
, "Fault occured, scheduling fetch event\n");
620 reschedule(fetchEvent
, nextCycle(), true);
629 if (tryCompleteDrain())
632 if (_status
== BaseSimpleCPU::Running
) {
633 // kick off fetch of next instruction... callback from icache
634 // response will cause that instruction to be executed,
635 // keeping the CPU running.
642 TimingSimpleCPU::completeIfetch(PacketPtr pkt
)
644 DPRINTF(SimpleCPU
, "Complete ICache Fetch for addr %#x\n", pkt
?
647 // received a response from the icache: execute the received
650 assert(!pkt
|| !pkt
->isError());
651 assert(_status
== IcacheWaitResponse
);
653 _status
= BaseSimpleCPU::Running
;
655 numCycles
+= curCycle() - previousCycle
;
656 previousCycle
= curCycle();
659 if (curStaticInst
&& curStaticInst
->isMemRef()) {
660 // load or store: just send to dcache
661 Fault fault
= curStaticInst
->initiateAcc(this, traceData
);
663 // If we're not running now the instruction will complete in a dcache
664 // response callback or the instruction faulted and has started an
666 if (_status
== BaseSimpleCPU::Running
) {
667 if (fault
!= NoFault
&& traceData
) {
668 // If there was a fault, we shouldn't trace this instruction.
674 // @todo remove me after debugging with legion done
675 if (curStaticInst
&& (!curStaticInst
->isMicroop() ||
676 curStaticInst
->isFirstMicroop()))
680 } else if (curStaticInst
) {
681 // non-memory instruction: execute completely now
682 Fault fault
= curStaticInst
->execute(this, traceData
);
684 // keep an instruction count
685 if (fault
== NoFault
)
687 else if (traceData
&& !DTRACE(ExecFaulting
)) {
693 // @todo remove me after debugging with legion done
694 if (curStaticInst
&& (!curStaticInst
->isMicroop() ||
695 curStaticInst
->isFirstMicroop()))
699 advanceInst(NoFault
);
709 TimingSimpleCPU::IcachePort::ITickEvent::process()
711 cpu
->completeIfetch(pkt
);
715 TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt
)
717 DPRINTF(SimpleCPU
, "Received timing response %#x\n", pkt
->getAddr());
718 // delay processing of returned data until next CPU clock edge
719 Tick next_tick
= cpu
->nextCycle();
721 if (next_tick
== curTick())
722 cpu
->completeIfetch(pkt
);
724 tickEvent
.schedule(pkt
, next_tick
);
730 TimingSimpleCPU::IcachePort::recvRetry()
732 // we shouldn't get a retry unless we have a packet that we're
733 // waiting to transmit
734 assert(cpu
->ifetch_pkt
!= NULL
);
735 assert(cpu
->_status
== IcacheRetry
);
736 PacketPtr tmp
= cpu
->ifetch_pkt
;
737 if (sendTimingReq(tmp
)) {
738 cpu
->_status
= IcacheWaitResponse
;
739 cpu
->ifetch_pkt
= NULL
;
744 TimingSimpleCPU::completeDataAccess(PacketPtr pkt
)
746 // received a response from the dcache: complete the load or store
748 assert(!pkt
->isError());
749 assert(_status
== DcacheWaitResponse
|| _status
== DTBWaitResponse
||
750 pkt
->req
->getFlags().isSet(Request::NO_ACCESS
));
752 numCycles
+= curCycle() - previousCycle
;
753 previousCycle
= curCycle();
755 if (pkt
->senderState
) {
756 SplitFragmentSenderState
* send_state
=
757 dynamic_cast<SplitFragmentSenderState
*>(pkt
->senderState
);
761 PacketPtr big_pkt
= send_state
->bigPkt
;
764 SplitMainSenderState
* main_send_state
=
765 dynamic_cast<SplitMainSenderState
*>(big_pkt
->senderState
);
766 assert(main_send_state
);
767 // Record the fact that this packet is no longer outstanding.
768 assert(main_send_state
->outstanding
!= 0);
769 main_send_state
->outstanding
--;
771 if (main_send_state
->outstanding
) {
774 delete main_send_state
;
775 big_pkt
->senderState
= NULL
;
780 _status
= BaseSimpleCPU::Running
;
782 Fault fault
= curStaticInst
->completeAcc(pkt
, this, traceData
);
784 // keep an instruction count
785 if (fault
== NoFault
)
787 else if (traceData
) {
788 // If there was a fault, we shouldn't trace this instruction.
793 // the locked flag may be cleared on the response packet, so check
794 // pkt->req and not pkt to see if it was a load-locked
795 if (pkt
->isRead() && pkt
->req
->isLLSC()) {
796 TheISA::handleLockedRead(thread
, pkt
->req
);
808 TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt
)
810 // delay processing of returned data until next CPU clock edge
811 Tick next_tick
= cpu
->nextCycle();
813 if (next_tick
== curTick()) {
814 cpu
->completeDataAccess(pkt
);
816 if (!tickEvent
.scheduled()) {
817 tickEvent
.schedule(pkt
, next_tick
);
819 // In the case of a split transaction and a cache that is
820 // faster than a CPU we could get two responses before
822 if (!retryEvent
.scheduled())
823 cpu
->schedule(retryEvent
, next_tick
);
832 TimingSimpleCPU::DcachePort::DTickEvent::process()
834 cpu
->completeDataAccess(pkt
);
838 TimingSimpleCPU::DcachePort::recvRetry()
840 // we shouldn't get a retry unless we have a packet that we're
841 // waiting to transmit
842 assert(cpu
->dcache_pkt
!= NULL
);
843 assert(cpu
->_status
== DcacheRetry
);
844 PacketPtr tmp
= cpu
->dcache_pkt
;
845 if (tmp
->senderState
) {
846 // This is a packet from a split access.
847 SplitFragmentSenderState
* send_state
=
848 dynamic_cast<SplitFragmentSenderState
*>(tmp
->senderState
);
850 PacketPtr big_pkt
= send_state
->bigPkt
;
852 SplitMainSenderState
* main_send_state
=
853 dynamic_cast<SplitMainSenderState
*>(big_pkt
->senderState
);
854 assert(main_send_state
);
856 if (sendTimingReq(tmp
)) {
857 // If we were able to send without retrying, record that fact
858 // and try sending the other fragment.
859 send_state
->clearFromParent();
860 int other_index
= main_send_state
->getPendingFragment();
861 if (other_index
> 0) {
862 tmp
= main_send_state
->fragments
[other_index
];
863 cpu
->dcache_pkt
= tmp
;
864 if ((big_pkt
->isRead() && cpu
->handleReadPacket(tmp
)) ||
865 (big_pkt
->isWrite() && cpu
->handleWritePacket())) {
866 main_send_state
->fragments
[other_index
] = NULL
;
869 cpu
->_status
= DcacheWaitResponse
;
870 // memory system takes ownership of packet
871 cpu
->dcache_pkt
= NULL
;
874 } else if (sendTimingReq(tmp
)) {
875 cpu
->_status
= DcacheWaitResponse
;
876 // memory system takes ownership of packet
877 cpu
->dcache_pkt
= NULL
;
881 TimingSimpleCPU::IprEvent::IprEvent(Packet
*_pkt
, TimingSimpleCPU
*_cpu
,
883 : pkt(_pkt
), cpu(_cpu
)
885 cpu
->schedule(this, t
);
889 TimingSimpleCPU::IprEvent::process()
891 cpu
->completeDataAccess(pkt
);
895 TimingSimpleCPU::IprEvent::description() const
897 return "Timing Simple CPU Delay IPR event";
902 TimingSimpleCPU::printAddr(Addr a
)
904 dcachePort
.printAddr(a
);
908 ////////////////////////////////////////////////////////////////////////
910 // TimingSimpleCPU Simulation Object
913 TimingSimpleCPUParams::create()
916 if (!FullSystem
&& workload
.size() != 1)
917 panic("only one workload allowed");
918 return new TimingSimpleCPU(this);