2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2012-2013,2015 ARM Limited
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Steve Reinhardt
44 #include "arch/locked_mem.hh"
45 #include "arch/mmapped_ipr.hh"
46 #include "arch/utility.hh"
47 #include "base/bigint.hh"
48 #include "base/output.hh"
49 #include "config/the_isa.hh"
50 #include "cpu/simple/atomic.hh"
51 #include "cpu/exetrace.hh"
52 #include "debug/Drain.hh"
53 #include "debug/ExecFaulting.hh"
54 #include "debug/SimpleCPU.hh"
55 #include "mem/packet.hh"
56 #include "mem/packet_access.hh"
57 #include "mem/physical.hh"
58 #include "params/AtomicSimpleCPU.hh"
59 #include "sim/faults.hh"
60 #include "sim/system.hh"
61 #include "sim/full_system.hh"
64 using namespace TheISA
;
66 AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU
*c
)
67 : Event(CPU_Tick_Pri
), cpu(c
)
73 AtomicSimpleCPU::TickEvent::process()
79 AtomicSimpleCPU::TickEvent::description() const
81 return "AtomicSimpleCPU tick";
85 AtomicSimpleCPU::init()
87 BaseSimpleCPU::init();
89 int cid
= threadContexts
[0]->contextId();
90 ifetch_req
.setThreadContext(cid
, 0);
91 data_read_req
.setThreadContext(cid
, 0);
92 data_write_req
.setThreadContext(cid
, 0);
95 AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams
*p
)
96 : BaseSimpleCPU(p
), tickEvent(this), width(p
->width
), locked(false),
97 simulate_data_stalls(p
->simulate_data_stalls
),
98 simulate_inst_stalls(p
->simulate_inst_stalls
),
99 icachePort(name() + ".icache_port", this),
100 dcachePort(name() + ".dcache_port", this),
101 fastmem(p
->fastmem
), dcache_access(false), dcache_latency(0),
108 AtomicSimpleCPU::~AtomicSimpleCPU()
110 if (tickEvent
.scheduled()) {
111 deschedule(tickEvent
);
116 AtomicSimpleCPU::drain()
119 return DrainState::Drained
;
122 DPRINTF(Drain
, "Requesting drain.\n");
123 return DrainState::Draining
;
125 if (tickEvent
.scheduled())
126 deschedule(tickEvent
);
128 activeThreads
.clear();
129 DPRINTF(Drain
, "Not executing microcode, no need to drain.\n");
130 return DrainState::Drained
;
135 AtomicSimpleCPU::threadSnoop(PacketPtr pkt
, ThreadID sender
)
137 DPRINTF(SimpleCPU
, "received snoop pkt for addr:%#x %s\n", pkt
->getAddr(),
140 for (ThreadID tid
= 0; tid
< numThreads
; tid
++) {
142 if (getCpuAddrMonitor(tid
)->doMonitor(pkt
)) {
146 TheISA::handleLockedSnoop(threadInfo
[tid
]->thread
,
147 pkt
, dcachePort
.cacheBlockMask
);
153 AtomicSimpleCPU::drainResume()
155 assert(!tickEvent
.scheduled());
159 DPRINTF(SimpleCPU
, "Resume\n");
162 assert(!threadContexts
.empty());
164 _status
= BaseSimpleCPU::Idle
;
166 for (ThreadID tid
= 0; tid
< numThreads
; tid
++) {
167 if (threadInfo
[tid
]->thread
->status() == ThreadContext::Active
) {
168 threadInfo
[tid
]->notIdleFraction
= 1;
169 activeThreads
.push_back(tid
);
170 _status
= BaseSimpleCPU::Running
;
172 // Tick if any threads active
173 if (!tickEvent
.scheduled()) {
174 schedule(tickEvent
, nextCycle());
177 threadInfo
[tid
]->notIdleFraction
= 0;
183 AtomicSimpleCPU::tryCompleteDrain()
185 if (drainState() != DrainState::Draining
)
188 DPRINTF(Drain
, "tryCompleteDrain.\n");
192 DPRINTF(Drain
, "CPU done draining, processing drain event\n");
200 AtomicSimpleCPU::switchOut()
202 BaseSimpleCPU::switchOut();
204 assert(!tickEvent
.scheduled());
205 assert(_status
== BaseSimpleCPU::Running
|| _status
== Idle
);
211 AtomicSimpleCPU::takeOverFrom(BaseCPU
*oldCPU
)
213 BaseSimpleCPU::takeOverFrom(oldCPU
);
215 // The tick event should have been descheduled by drain()
216 assert(!tickEvent
.scheduled());
220 AtomicSimpleCPU::verifyMemoryMode() const
222 if (!system
->isAtomicMode()) {
223 fatal("The atomic CPU requires the memory system to be in "
229 AtomicSimpleCPU::activateContext(ThreadID thread_num
)
231 DPRINTF(SimpleCPU
, "ActivateContext %d\n", thread_num
);
233 assert(thread_num
< numThreads
);
235 threadInfo
[thread_num
]->notIdleFraction
= 1;
236 Cycles delta
= ticksToCycles(threadInfo
[thread_num
]->thread
->lastActivate
-
237 threadInfo
[thread_num
]->thread
->lastSuspend
);
239 ppCycles
->notify(delta
);
241 if (!tickEvent
.scheduled()) {
242 //Make sure ticks are still on multiples of cycles
243 schedule(tickEvent
, clockEdge(Cycles(0)));
245 _status
= BaseSimpleCPU::Running
;
246 if (std::find(activeThreads
.begin(), activeThreads
.end(), thread_num
)
247 == activeThreads
.end()) {
248 activeThreads
.push_back(thread_num
);
254 AtomicSimpleCPU::suspendContext(ThreadID thread_num
)
256 DPRINTF(SimpleCPU
, "SuspendContext %d\n", thread_num
);
258 assert(thread_num
< numThreads
);
259 activeThreads
.remove(thread_num
);
264 assert(_status
== BaseSimpleCPU::Running
);
266 threadInfo
[thread_num
]->notIdleFraction
= 0;
268 if (activeThreads
.empty()) {
271 if (tickEvent
.scheduled()) {
272 deschedule(tickEvent
);
280 AtomicSimpleCPU::AtomicCPUDPort::recvAtomicSnoop(PacketPtr pkt
)
282 DPRINTF(SimpleCPU
, "received snoop pkt for addr:%#x %s\n", pkt
->getAddr(),
285 // X86 ISA: Snooping an invalidation for monitor/mwait
286 AtomicSimpleCPU
*cpu
= (AtomicSimpleCPU
*)(&owner
);
288 for (ThreadID tid
= 0; tid
< cpu
->numThreads
; tid
++) {
289 if (cpu
->getCpuAddrMonitor(tid
)->doMonitor(pkt
)) {
294 // if snoop invalidates, release any associated locks
295 if (pkt
->isInvalidate()) {
296 DPRINTF(SimpleCPU
, "received invalidation for addr:%#x\n",
298 for (auto &t_info
: cpu
->threadInfo
) {
299 TheISA::handleLockedSnoop(t_info
->thread
, pkt
, cacheBlockMask
);
307 AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt
)
309 DPRINTF(SimpleCPU
, "received snoop pkt for addr:%#x %s\n", pkt
->getAddr(),
312 // X86 ISA: Snooping an invalidation for monitor/mwait
313 AtomicSimpleCPU
*cpu
= (AtomicSimpleCPU
*)(&owner
);
314 for (ThreadID tid
= 0; tid
< cpu
->numThreads
; tid
++) {
315 if (cpu
->getCpuAddrMonitor(tid
)->doMonitor(pkt
)) {
320 // if snoop invalidates, release any associated locks
321 if (pkt
->isInvalidate()) {
322 DPRINTF(SimpleCPU
, "received invalidation for addr:%#x\n",
324 for (auto &t_info
: cpu
->threadInfo
) {
325 TheISA::handleLockedSnoop(t_info
->thread
, pkt
, cacheBlockMask
);
331 AtomicSimpleCPU::readMem(Addr addr
, uint8_t * data
,
332 unsigned size
, unsigned flags
)
334 SimpleExecContext
& t_info
= *threadInfo
[curThread
];
335 SimpleThread
* thread
= t_info
.thread
;
337 // use the CPU's statically allocated read request and packet objects
338 Request
*req
= &data_read_req
;
341 traceData
->setMem(addr
, size
, flags
);
343 //The size of the data we're trying to read.
346 //The address of the second part of this access if it needs to be split
347 //across a cache line boundary.
348 Addr secondAddr
= roundDown(addr
+ size
- 1, cacheLineSize());
350 if (secondAddr
> addr
)
351 size
= secondAddr
- addr
;
355 req
->taskId(taskId());
357 req
->setVirt(0, addr
, size
, flags
, dataMasterId(), thread
->pcState().instAddr());
359 // translate to physical address
360 Fault fault
= thread
->dtb
->translateAtomic(req
, thread
->getTC(),
363 // Now do the access.
364 if (fault
== NoFault
&& !req
->getFlags().isSet(Request::NO_ACCESS
)) {
365 Packet
pkt(req
, Packet::makeReadCmd(req
));
366 pkt
.dataStatic(data
);
368 if (req
->isMmappedIpr())
369 dcache_latency
+= TheISA::handleIprRead(thread
->getTC(), &pkt
);
371 if (fastmem
&& system
->isMemAddr(pkt
.getAddr()))
372 system
->getPhysMem().access(&pkt
);
374 dcache_latency
+= dcachePort
.sendAtomic(&pkt
);
376 dcache_access
= true;
378 assert(!pkt
.isError());
381 TheISA::handleLockedRead(thread
, req
);
385 //If there's a fault, return it
386 if (fault
!= NoFault
) {
387 if (req
->isPrefetch()) {
394 //If we don't need to access a second cache line, stop now.
395 if (secondAddr
<= addr
)
397 if (req
->isLockedRMW() && fault
== NoFault
) {
406 * Set up for accessing the second cache line.
409 //Move the pointer we're reading into to the correct location.
411 //Adjust the size to get the remaining bytes.
412 size
= addr
+ fullSize
- secondAddr
;
413 //And access the right address.
419 AtomicSimpleCPU::initiateMemRead(Addr addr
, unsigned size
, unsigned flags
)
421 panic("initiateMemRead() is for timing accesses, and should "
422 "never be called on AtomicSimpleCPU.\n");
426 AtomicSimpleCPU::writeMem(uint8_t *data
, unsigned size
,
427 Addr addr
, unsigned flags
, uint64_t *res
)
429 SimpleExecContext
& t_info
= *threadInfo
[curThread
];
430 SimpleThread
* thread
= t_info
.thread
;
431 static uint8_t zero_array
[64] = {};
435 assert(flags
& Request::CACHE_BLOCK_ZERO
);
436 // This must be a cache block cleaning request
440 // use the CPU's statically allocated write request and packet objects
441 Request
*req
= &data_write_req
;
444 traceData
->setMem(addr
, size
, flags
);
446 //The size of the data we're trying to read.
449 //The address of the second part of this access if it needs to be split
450 //across a cache line boundary.
451 Addr secondAddr
= roundDown(addr
+ size
- 1, cacheLineSize());
453 if (secondAddr
> addr
)
454 size
= secondAddr
- addr
;
458 req
->taskId(taskId());
460 req
->setVirt(0, addr
, size
, flags
, dataMasterId(), thread
->pcState().instAddr());
462 // translate to physical address
463 Fault fault
= thread
->dtb
->translateAtomic(req
, thread
->getTC(), BaseTLB::Write
);
465 // Now do the access.
466 if (fault
== NoFault
) {
467 MemCmd cmd
= MemCmd::WriteReq
; // default
468 bool do_access
= true; // flag to suppress cache access
471 cmd
= MemCmd::StoreCondReq
;
472 do_access
= TheISA::handleLockedWrite(thread
, req
, dcachePort
.cacheBlockMask
);
473 } else if (req
->isSwap()) {
474 cmd
= MemCmd::SwapReq
;
475 if (req
->isCondSwap()) {
477 req
->setExtraData(*res
);
481 if (do_access
&& !req
->getFlags().isSet(Request::NO_ACCESS
)) {
482 Packet pkt
= Packet(req
, cmd
);
483 pkt
.dataStatic(data
);
485 if (req
->isMmappedIpr()) {
487 TheISA::handleIprWrite(thread
->getTC(), &pkt
);
489 if (fastmem
&& system
->isMemAddr(pkt
.getAddr()))
490 system
->getPhysMem().access(&pkt
);
492 dcache_latency
+= dcachePort
.sendAtomic(&pkt
);
494 // Notify other threads on this CPU of write
495 threadSnoop(&pkt
, curThread
);
497 dcache_access
= true;
498 assert(!pkt
.isError());
502 memcpy(res
, pkt
.getConstPtr
<uint8_t>(), fullSize
);
506 if (res
&& !req
->isSwap()) {
507 *res
= req
->getExtraData();
511 //If there's a fault or we don't need to access a second cache line,
513 if (fault
!= NoFault
|| secondAddr
<= addr
)
515 if (req
->isLockedRMW() && fault
== NoFault
) {
521 if (fault
!= NoFault
&& req
->isPrefetch()) {
529 * Set up for accessing the second cache line.
532 //Move the pointer we're reading into to the correct location.
534 //Adjust the size to get the remaining bytes.
535 size
= addr
+ fullSize
- secondAddr
;
536 //And access the right address.
543 AtomicSimpleCPU::tick()
545 DPRINTF(SimpleCPU
, "Tick\n");
547 // Change thread if multi-threaded
550 // Set memroy request ids to current thread
551 if (numThreads
> 1) {
552 ContextID cid
= threadContexts
[curThread
]->contextId();
554 ifetch_req
.setThreadContext(cid
, curThread
);
555 data_read_req
.setThreadContext(cid
, curThread
);
556 data_write_req
.setThreadContext(cid
, curThread
);
559 SimpleExecContext
& t_info
= *threadInfo
[curThread
];
560 SimpleThread
* thread
= t_info
.thread
;
564 for (int i
= 0; i
< width
|| locked
; ++i
) {
568 if (!curStaticInst
|| !curStaticInst
->isDelayedCommit()) {
569 checkForInterrupts();
573 // We must have just got suspended by a PC event
574 if (_status
== Idle
) {
579 Fault fault
= NoFault
;
581 TheISA::PCState pcState
= thread
->pcState();
583 bool needToFetch
= !isRomMicroPC(pcState
.microPC()) &&
586 ifetch_req
.taskId(taskId());
587 setupFetchRequest(&ifetch_req
);
588 fault
= thread
->itb
->translateAtomic(&ifetch_req
, thread
->getTC(),
592 if (fault
== NoFault
) {
593 Tick icache_latency
= 0;
594 bool icache_access
= false;
595 dcache_access
= false; // assume no dcache access
598 // This is commented out because the decoder would act like
599 // a tiny cache otherwise. It wouldn't be flushed when needed
600 // like the I cache. It should be flushed, and when that works
601 // this code should be uncommented.
602 //Fetch more instruction memory if necessary
603 //if (decoder.needMoreBytes())
605 icache_access
= true;
606 Packet ifetch_pkt
= Packet(&ifetch_req
, MemCmd::ReadReq
);
607 ifetch_pkt
.dataStatic(&inst
);
609 if (fastmem
&& system
->isMemAddr(ifetch_pkt
.getAddr()))
610 system
->getPhysMem().access(&ifetch_pkt
);
612 icache_latency
= icachePort
.sendAtomic(&ifetch_pkt
);
614 assert(!ifetch_pkt
.isError());
616 // ifetch_req is initialized to read the instruction directly
617 // into the CPU object's inst field.
624 fault
= curStaticInst
->execute(&t_info
, traceData
);
626 // keep an instruction count
627 if (fault
== NoFault
) {
629 ppCommit
->notify(std::make_pair(thread
, curStaticInst
));
631 else if (traceData
&& !DTRACE(ExecFaulting
)) {
639 // @todo remove me after debugging with legion done
640 if (curStaticInst
&& (!curStaticInst
->isMicroop() ||
641 curStaticInst
->isFirstMicroop()))
644 Tick stall_ticks
= 0;
645 if (simulate_inst_stalls
&& icache_access
)
646 stall_ticks
+= icache_latency
;
648 if (simulate_data_stalls
&& dcache_access
)
649 stall_ticks
+= dcache_latency
;
652 // the atomic cpu does its accounting in ticks, so
653 // keep counting in ticks but round to the clock
655 latency
+= divCeil(stall_ticks
, clockPeriod()) *
660 if (fault
!= NoFault
|| !t_info
.stayAtPC
)
664 if (tryCompleteDrain())
667 // instruction takes at least one cycle
668 if (latency
< clockPeriod())
669 latency
= clockPeriod();
672 reschedule(tickEvent
, curTick() + latency
, true);
676 AtomicSimpleCPU::regProbePoints()
678 BaseCPU::regProbePoints();
680 ppCommit
= new ProbePointArg
<pair
<SimpleThread
*, const StaticInstPtr
>>
681 (getProbeManager(), "Commit");
685 AtomicSimpleCPU::printAddr(Addr a
)
687 dcachePort
.printAddr(a
);
690 ////////////////////////////////////////////////////////////////////////
692 // AtomicSimpleCPU Simulation Object
695 AtomicSimpleCPUParams::create()
697 return new AtomicSimpleCPU(this);