Merge vm1.(none):/home/stever/bk/newmem-head
[gem5.git] / src / cpu / ozone / lw_lsq.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31 #ifndef __CPU_OZONE_LW_LSQ_HH__
32 #define __CPU_OZONE_LW_LSQ_HH__
33
34 #include <list>
35 #include <map>
36 #include <queue>
37 #include <algorithm>
38
39 #include "arch/faults.hh"
40 #include "arch/types.hh"
41 #include "config/full_system.hh"
42 #include "base/hashmap.hh"
43 #include "cpu/inst_seq.hh"
44 #include "mem/packet.hh"
45 #include "mem/port.hh"
46 //#include "mem/page_table.hh"
47 #include "sim/debug.hh"
48 #include "sim/sim_object.hh"
49
50 class MemObject;
51
52 /**
53 * Class that implements the actual LQ and SQ for each specific thread.
54 * Both are circular queues; load entries are freed upon committing, while
55 * store entries are freed once they writeback. The LSQUnit tracks if there
56 * are memory ordering violations, and also detects partial load to store
57 * forwarding cases (a store only has part of a load's data) that requires
58 * the load to wait until the store writes back. In the former case it
59 * holds onto the instruction until the dependence unit looks at it, and
60 * in the latter it stalls the LSQ until the store writes back. At that
61 * point the load is replayed.
62 */
63 template <class Impl>
64 class OzoneLWLSQ {
65 public:
66 typedef typename Impl::Params Params;
67 typedef typename Impl::OzoneCPU OzoneCPU;
68 typedef typename Impl::BackEnd BackEnd;
69 typedef typename Impl::DynInstPtr DynInstPtr;
70 typedef typename Impl::IssueStruct IssueStruct;
71
72 typedef TheISA::IntReg IntReg;
73
74 typedef typename std::map<InstSeqNum, DynInstPtr>::iterator LdMapIt;
75
76 public:
77 /** Constructs an LSQ unit. init() must be called prior to use. */
78 OzoneLWLSQ();
79
80 /** Initializes the LSQ unit with the specified number of entries. */
81 void init(Params *params, unsigned maxLQEntries,
82 unsigned maxSQEntries, unsigned id);
83
84 /** Returns the name of the LSQ unit. */
85 std::string name() const;
86
87 void regStats();
88
89 /** Sets the CPU pointer. */
90 void setCPU(OzoneCPU *cpu_ptr);
91
92 /** Sets the back-end stage pointer. */
93 void setBE(BackEnd *be_ptr)
94 { be = be_ptr; }
95
96 Port *getDcachePort() { return &dcachePort; }
97
98 /** Ticks the LSQ unit, which in this case only resets the number of
99 * used cache ports.
100 * @todo: Move the number of used ports up to the LSQ level so it can
101 * be shared by all LSQ units.
102 */
103 void tick() { usedPorts = 0; }
104
105 /** Inserts an instruction. */
106 void insert(DynInstPtr &inst);
107 /** Inserts a load instruction. */
108 void insertLoad(DynInstPtr &load_inst);
109 /** Inserts a store instruction. */
110 void insertStore(DynInstPtr &store_inst);
111
112 /** Executes a load instruction. */
113 Fault executeLoad(DynInstPtr &inst);
114
115 /** Executes a store instruction. */
116 Fault executeStore(DynInstPtr &inst);
117
118 /** Commits the head load. */
119 void commitLoad();
120 /** Commits loads older than a specific sequence number. */
121 void commitLoads(InstSeqNum &youngest_inst);
122
123 /** Commits stores older than a specific sequence number. */
124 void commitStores(InstSeqNum &youngest_inst);
125
126 /** Writes back stores. */
127 void writebackStores();
128
129 /** Completes the data access that has been returned from the
130 * memory system. */
131 void completeDataAccess(PacketPtr pkt);
132
133 // @todo: Include stats in the LSQ unit.
134 //void regStats();
135
136 /** Clears all the entries in the LQ. */
137 void clearLQ();
138
139 /** Clears all the entries in the SQ. */
140 void clearSQ();
141
142 /** Resizes the LQ to a given size. */
143 void resizeLQ(unsigned size);
144
145 /** Resizes the SQ to a given size. */
146 void resizeSQ(unsigned size);
147
148 /** Squashes all instructions younger than a specific sequence number. */
149 void squash(const InstSeqNum &squashed_num);
150
151 /** Returns if there is a memory ordering violation. Value is reset upon
152 * call to getMemDepViolator().
153 */
154 bool violation() { return memDepViolator; }
155
156 /** Returns the memory ordering violator. */
157 DynInstPtr getMemDepViolator();
158
159 /** Returns if a load became blocked due to the memory system. It clears
160 * the bool's value upon this being called.
161 */
162 bool loadBlocked()
163 { return isLoadBlocked; }
164
165 void clearLoadBlocked()
166 { isLoadBlocked = false; }
167
168 bool isLoadBlockedHandled()
169 { return loadBlockedHandled; }
170
171 void setLoadBlockedHandled()
172 { loadBlockedHandled = true; }
173
174 /** Returns the number of free entries (min of free LQ and SQ entries). */
175 unsigned numFreeEntries();
176
177 /** Returns the number of loads ready to execute. */
178 int numLoadsReady();
179
180 /** Returns the number of loads in the LQ. */
181 int numLoads() { return loads; }
182
183 /** Returns the number of stores in the SQ. */
184 int numStores() { return stores + storesInFlight; }
185
186 /** Returns if either the LQ or SQ is full. */
187 bool isFull() { return lqFull() || sqFull(); }
188
189 /** Returns if the LQ is full. */
190 bool lqFull() { return loads >= (LQEntries - 1); }
191
192 /** Returns if the SQ is full. */
193 bool sqFull() { return (stores + storesInFlight) >= (SQEntries - 1); }
194
195 /** Debugging function to dump instructions in the LSQ. */
196 void dumpInsts();
197
198 /** Returns the number of instructions in the LSQ. */
199 unsigned getCount() { return loads + stores; }
200
201 /** Returns if there are any stores to writeback. */
202 bool hasStoresToWB() { return storesToWB; }
203
204 /** Returns the number of stores to writeback. */
205 int numStoresToWB() { return storesToWB; }
206
207 /** Returns if the LSQ unit will writeback on this cycle. */
208 bool willWB() { return storeQueue.back().canWB &&
209 !storeQueue.back().completed &&
210 !isStoreBlocked; }
211
212 void switchOut();
213
214 void takeOverFrom(ThreadContext *old_tc = NULL);
215
216 bool isSwitchedOut() { return switchedOut; }
217
218 bool switchedOut;
219
220 private:
221 /** Writes back the instruction, sending it to IEW. */
222 void writeback(DynInstPtr &inst, PacketPtr pkt);
223
224 /** Handles completing the send of a store to memory. */
225 void storePostSend(PacketPtr pkt, DynInstPtr &inst);
226
227 /** Completes the store at the specified index. */
228 void completeStore(DynInstPtr &inst);
229
230 void removeStore(int store_idx);
231
232 /** Handles doing the retry. */
233 void recvRetry();
234
235 private:
236 /** Pointer to the CPU. */
237 OzoneCPU *cpu;
238
239 /** Pointer to the back-end stage. */
240 BackEnd *be;
241
242 class DcachePort : public Port
243 {
244 protected:
245 OzoneLWLSQ *lsq;
246
247 public:
248 DcachePort(OzoneLWLSQ *_lsq)
249 : lsq(_lsq)
250 { }
251
252 protected:
253 virtual Tick recvAtomic(PacketPtr pkt);
254
255 virtual void recvFunctional(PacketPtr pkt);
256
257 virtual void recvStatusChange(Status status);
258
259 virtual void getDeviceAddressRanges(AddrRangeList &resp,
260 bool &snoop)
261 { resp.clear(); snoop = true; }
262
263 virtual bool recvTiming(PacketPtr pkt);
264
265 virtual void recvRetry();
266 };
267
268 /** D-cache port. */
269 DcachePort dcachePort;
270
271 public:
272 struct SQEntry {
273 /** Constructs an empty store queue entry. */
274 SQEntry()
275 : inst(NULL), req(NULL), size(0), data(0),
276 canWB(0), committed(0), completed(0), lqIt(NULL)
277 { }
278
279 /** Constructs a store queue entry for a given instruction. */
280 SQEntry(DynInstPtr &_inst)
281 : inst(_inst), req(NULL), size(0), data(0),
282 canWB(0), committed(0), completed(0), lqIt(NULL)
283 { }
284
285 /** The store instruction. */
286 DynInstPtr inst;
287 /** The memory request for the store. */
288 RequestPtr req;
289 /** The size of the store. */
290 int size;
291 /** The store data. */
292 IntReg data;
293 /** Whether or not the store can writeback. */
294 bool canWB;
295 /** Whether or not the store is committed. */
296 bool committed;
297 /** Whether or not the store is completed. */
298 bool completed;
299
300 typename std::list<DynInstPtr>::iterator lqIt;
301 };
302
303 /** Derived class to hold any sender state the LSQ needs. */
304 class LSQSenderState : public Packet::SenderState
305 {
306 public:
307 /** Default constructor. */
308 LSQSenderState()
309 : noWB(false)
310 { }
311
312 /** Instruction who initiated the access to memory. */
313 DynInstPtr inst;
314 /** Whether or not it is a load. */
315 bool isLoad;
316 /** The LQ/SQ index of the instruction. */
317 int idx;
318 /** Whether or not the instruction will need to writeback. */
319 bool noWB;
320 };
321
322 /** Writeback event, specifically for when stores forward data to loads. */
323 class WritebackEvent : public Event {
324 public:
325 /** Constructs a writeback event. */
326 WritebackEvent(DynInstPtr &_inst, PacketPtr pkt, OzoneLWLSQ *lsq_ptr);
327
328 /** Processes the writeback event. */
329 void process();
330
331 /** Returns the description of this event. */
332 const char *description();
333
334 private:
335 /** Instruction whose results are being written back. */
336 DynInstPtr inst;
337
338 /** The packet that would have been sent to memory. */
339 PacketPtr pkt;
340
341 /** The pointer to the LSQ unit that issued the store. */
342 OzoneLWLSQ<Impl> *lsqPtr;
343 };
344
345 enum Status {
346 Running,
347 Idle,
348 DcacheMissStall,
349 DcacheMissSwitch
350 };
351
352 private:
353 /** The OzoneLWLSQ thread id. */
354 unsigned lsqID;
355
356 /** The status of the LSQ unit. */
357 Status _status;
358
359 /** The store queue. */
360 std::list<SQEntry> storeQueue;
361 /** The load queue. */
362 std::list<DynInstPtr> loadQueue;
363
364 typedef typename std::list<SQEntry>::iterator SQIt;
365 typedef typename std::list<DynInstPtr>::iterator LQIt;
366
367
368 struct HashFn {
369 size_t operator() (const int a) const
370 {
371 unsigned hash = (((a >> 14) ^ ((a >> 2) & 0xffff))) & 0x7FFFFFFF;
372
373 return hash;
374 }
375 };
376
377 m5::hash_map<int, SQIt, HashFn> SQItHash;
378 std::queue<int> SQIndices;
379 m5::hash_map<int, LQIt, HashFn> LQItHash;
380 std::queue<int> LQIndices;
381
382 typedef typename m5::hash_map<int, LQIt, HashFn>::iterator LQHashIt;
383 typedef typename m5::hash_map<int, SQIt, HashFn>::iterator SQHashIt;
384 // Consider making these 16 bits
385 /** The number of LQ entries. */
386 unsigned LQEntries;
387 /** The number of SQ entries. */
388 unsigned SQEntries;
389
390 /** The number of load instructions in the LQ. */
391 int loads;
392 /** The number of store instructions in the SQ (excludes those waiting to
393 * writeback).
394 */
395 int stores;
396
397 int storesToWB;
398
399 public:
400 int storesInFlight;
401
402 private:
403 /// @todo Consider moving to a more advanced model with write vs read ports
404 /** The number of cache ports available each cycle. */
405 int cachePorts;
406
407 /** The number of used cache ports in this cycle. */
408 int usedPorts;
409
410 //list<InstSeqNum> mshrSeqNums;
411
412 /** Tota number of memory ordering violations. */
413 Stats::Scalar<> lsqMemOrderViolation;
414
415 //Stats::Scalar<> dcacheStallCycles;
416 Counter lastDcacheStall;
417
418 // Make these per thread?
419 /** Whether or not the LSQ is stalled. */
420 bool stalled;
421 /** The store that causes the stall due to partial store to load
422 * forwarding.
423 */
424 InstSeqNum stallingStoreIsn;
425 /** The index of the above store. */
426 LQIt stallingLoad;
427
428 /** The packet that needs to be retried. */
429 PacketPtr retryPkt;
430
431 /** Whehter or not a store is blocked due to the memory system. */
432 bool isStoreBlocked;
433
434 /** Whether or not a load is blocked due to the memory system. It is
435 * cleared when this value is checked via loadBlocked().
436 */
437 bool isLoadBlocked;
438
439 bool loadBlockedHandled;
440
441 InstSeqNum blockedLoadSeqNum;
442
443 /** The oldest faulting load instruction. */
444 DynInstPtr loadFaultInst;
445 /** The oldest faulting store instruction. */
446 DynInstPtr storeFaultInst;
447
448 /** The oldest load that caused a memory ordering violation. */
449 DynInstPtr memDepViolator;
450
451 // Will also need how many read/write ports the Dcache has. Or keep track
452 // of that in stage that is one level up, and only call executeLoad/Store
453 // the appropriate number of times.
454
455 public:
456 /** Executes the load at the given index. */
457 template <class T>
458 Fault read(RequestPtr req, T &data, int load_idx);
459
460 /** Executes the store at the given index. */
461 template <class T>
462 Fault write(RequestPtr req, T &data, int store_idx);
463
464 /** Returns the sequence number of the head load instruction. */
465 InstSeqNum getLoadHeadSeqNum()
466 {
467 if (!loadQueue.empty()) {
468 return loadQueue.back()->seqNum;
469 } else {
470 return 0;
471 }
472
473 }
474
475 /** Returns the sequence number of the head store instruction. */
476 InstSeqNum getStoreHeadSeqNum()
477 {
478 if (!storeQueue.empty()) {
479 return storeQueue.back().inst->seqNum;
480 } else {
481 return 0;
482 }
483
484 }
485
486 /** Returns whether or not the LSQ unit is stalled. */
487 bool isStalled() { return stalled; }
488 };
489
490 template <class Impl>
491 template <class T>
492 Fault
493 OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx)
494 {
495 //Depending on issue2execute delay a squashed load could
496 //execute if it is found to be squashed in the same
497 //cycle it is scheduled to execute
498 typename m5::hash_map<int, LQIt, HashFn>::iterator
499 lq_hash_it = LQItHash.find(load_idx);
500 assert(lq_hash_it != LQItHash.end());
501 DynInstPtr inst = (*(*lq_hash_it).second);
502
503 // Make sure this isn't an uncacheable access
504 // A bit of a hackish way to get uncached accesses to work only if they're
505 // at the head of the LSQ and are ready to commit (at the head of the ROB
506 // too).
507 // @todo: Fix uncached accesses.
508 if (req->isUncacheable() &&
509 (inst != loadQueue.back() || !inst->isAtCommit())) {
510 DPRINTF(OzoneLSQ, "[sn:%lli] Uncached load and not head of "
511 "commit/LSQ!\n",
512 inst->seqNum);
513 be->rescheduleMemInst(inst);
514 return TheISA::genMachineCheckFault();
515 }
516
517 // Check the SQ for any previous stores that might lead to forwarding
518 SQIt sq_it = storeQueue.begin();
519 int store_size = 0;
520
521 DPRINTF(OzoneLSQ, "Read called, load idx: %i addr: %#x\n",
522 load_idx, req->getPaddr());
523
524 while (sq_it != storeQueue.end() && (*sq_it).inst->seqNum > inst->seqNum)
525 ++sq_it;
526
527 while (1) {
528 // End once we've reached the top of the LSQ
529 if (sq_it == storeQueue.end()) {
530 break;
531 }
532
533 assert((*sq_it).inst);
534
535 store_size = (*sq_it).size;
536
537 if (store_size == 0 || (*sq_it).committed) {
538 sq_it++;
539 continue;
540 }
541
542 // Check if the store data is within the lower and upper bounds of
543 // addresses that the request needs.
544 bool store_has_lower_limit =
545 req->getVaddr() >= (*sq_it).inst->effAddr;
546 bool store_has_upper_limit =
547 (req->getVaddr() + req->getSize()) <= ((*sq_it).inst->effAddr +
548 store_size);
549 bool lower_load_has_store_part =
550 req->getVaddr() < ((*sq_it).inst->effAddr +
551 store_size);
552 bool upper_load_has_store_part =
553 (req->getVaddr() + req->getSize()) > (*sq_it).inst->effAddr;
554
555 // If the store's data has all of the data needed, we can forward.
556 if (store_has_lower_limit && store_has_upper_limit) {
557 int shift_amt = req->getVaddr() & (store_size - 1);
558 // Assumes byte addressing
559 shift_amt = shift_amt << 3;
560
561 // Cast this to type T?
562 data = (*sq_it).data >> shift_amt;
563
564 assert(!inst->memData);
565 inst->memData = new uint8_t[64];
566
567 memcpy(inst->memData, &data, req->getSize());
568
569 DPRINTF(OzoneLSQ, "Forwarding from store [sn:%lli] to load to "
570 "[sn:%lli] addr %#x, data %#x\n",
571 (*sq_it).inst->seqNum, inst->seqNum, req->getVaddr(),
572 *(inst->memData));
573
574 PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
575 data_pkt->dataStatic(inst->memData);
576
577 WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
578
579 // We'll say this has a 1 cycle load-store forwarding latency
580 // for now.
581 // @todo: Need to make this a parameter.
582 wb->schedule(curTick);
583
584 // Should keep track of stat for forwarded data
585 return NoFault;
586 } else if ((store_has_lower_limit && lower_load_has_store_part) ||
587 (store_has_upper_limit && upper_load_has_store_part) ||
588 (lower_load_has_store_part && upper_load_has_store_part)) {
589 // This is the partial store-load forwarding case where a store
590 // has only part of the load's data.
591
592 // If it's already been written back, then don't worry about
593 // stalling on it.
594 if ((*sq_it).completed) {
595 sq_it++;
596 break;
597 }
598
599 // Must stall load and force it to retry, so long as it's the oldest
600 // load that needs to do so.
601 if (!stalled ||
602 (stalled &&
603 inst->seqNum <
604 (*stallingLoad)->seqNum)) {
605 stalled = true;
606 stallingStoreIsn = (*sq_it).inst->seqNum;
607 stallingLoad = (*lq_hash_it).second;
608 }
609
610 // Tell IQ/mem dep unit that this instruction will need to be
611 // rescheduled eventually
612 be->rescheduleMemInst(inst);
613
614 DPRINTF(OzoneLSQ, "Load-store forwarding mis-match. "
615 "Store [sn:%lli] to load addr %#x\n",
616 (*sq_it).inst->seqNum, req->getVaddr());
617
618 return NoFault;
619 }
620 sq_it++;
621 }
622
623 // If there's no forwarding case, then go access memory
624 DPRINTF(OzoneLSQ, "Doing functional access for inst PC %#x\n",
625 inst->readPC());
626
627 assert(!inst->memData);
628 inst->memData = new uint8_t[64];
629
630 ++usedPorts;
631
632 DPRINTF(OzoneLSQ, "Doing timing access for inst PC %#x\n",
633 inst->readPC());
634
635 PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
636 data_pkt->dataStatic(inst->memData);
637
638 LSQSenderState *state = new LSQSenderState;
639 state->isLoad = true;
640 state->idx = load_idx;
641 state->inst = inst;
642 data_pkt->senderState = state;
643
644 // if we have a cache, do cache access too
645 if (!dcachePort.sendTiming(data_pkt)) {
646 // There's an older load that's already going to squash.
647 if (isLoadBlocked && blockedLoadSeqNum < inst->seqNum)
648 return NoFault;
649
650 // Record that the load was blocked due to memory. This
651 // load will squash all instructions after it, be
652 // refetched, and re-executed.
653 isLoadBlocked = true;
654 loadBlockedHandled = false;
655 blockedLoadSeqNum = inst->seqNum;
656 // No fault occurred, even though the interface is blocked.
657 return NoFault;
658 }
659
660 if (req->isLocked()) {
661 cpu->lockFlag = true;
662 }
663
664 if (data_pkt->result != Packet::Success) {
665 DPRINTF(OzoneLSQ, "OzoneLSQ: D-cache miss!\n");
666 DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
667 inst->seqNum);
668 } else {
669 DPRINTF(OzoneLSQ, "OzoneLSQ: D-cache hit!\n");
670 DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n",
671 inst->seqNum);
672 }
673
674 return NoFault;
675 }
676
677 template <class Impl>
678 template <class T>
679 Fault
680 OzoneLWLSQ<Impl>::write(RequestPtr req, T &data, int store_idx)
681 {
682 SQHashIt sq_hash_it = SQItHash.find(store_idx);
683 assert(sq_hash_it != SQItHash.end());
684
685 SQIt sq_it = (*sq_hash_it).second;
686 assert((*sq_it).inst);
687
688 DPRINTF(OzoneLSQ, "Doing write to store idx %i, addr %#x data %#x"
689 " | [sn:%lli]\n",
690 store_idx, req->getPaddr(), data, (*sq_it).inst->seqNum);
691
692 (*sq_it).req = req;
693 (*sq_it).size = sizeof(T);
694 (*sq_it).data = data;
695 /*
696 assert(!req->data);
697 req->data = new uint8_t[64];
698 memcpy(req->data, (uint8_t *)&(*sq_it).data, req->size);
699 */
700
701 // This function only writes the data to the store queue, so no fault
702 // can happen here.
703 return NoFault;
704 }
705
706 #endif // __CPU_OZONE_LW_LSQ_HH__