Merge ktlim@zamp:/z/ktlim2/clean/m5-o3
[gem5.git] / src / cpu / ozone / lw_lsq.hh
1 /*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #ifndef __CPU_OZONE_LW_LSQ_HH__
30 #define __CPU_OZONE_LW_LSQ_HH__
31
32 #include <list>
33 #include <map>
34 #include <queue>
35 #include <algorithm>
36
37 #include "arch/faults.hh"
38 #include "arch/isa_traits.hh"
39 #include "config/full_system.hh"
40 #include "base/hashmap.hh"
41 #include "cpu/inst_seq.hh"
42 #include "mem/packet.hh"
43 #include "mem/port.hh"
44 //#include "mem/page_table.hh"
45 #include "sim/debug.hh"
46 #include "sim/sim_object.hh"
47
48 //class PageTable;
49
50 /**
51 * Class that implements the actual LQ and SQ for each specific thread.
52 * Both are circular queues; load entries are freed upon committing, while
53 * store entries are freed once they writeback. The LSQUnit tracks if there
54 * are memory ordering violations, and also detects partial load to store
55 * forwarding cases (a store only has part of a load's data) that requires
56 * the load to wait until the store writes back. In the former case it
57 * holds onto the instruction until the dependence unit looks at it, and
58 * in the latter it stalls the LSQ until the store writes back. At that
59 * point the load is replayed.
60 */
61 template <class Impl>
62 class OzoneLWLSQ {
63 public:
64 typedef typename Impl::Params Params;
65 typedef typename Impl::FullCPU FullCPU;
66 typedef typename Impl::BackEnd BackEnd;
67 typedef typename Impl::DynInstPtr DynInstPtr;
68 typedef typename Impl::IssueStruct IssueStruct;
69
70 typedef TheISA::IntReg IntReg;
71
72 typedef typename std::map<InstSeqNum, DynInstPtr>::iterator LdMapIt;
73
74 private:
75 class StoreCompletionEvent : public Event {
76 public:
77 /** Constructs a store completion event. */
78 StoreCompletionEvent(DynInstPtr &inst, BackEnd *be,
79 Event *wb_event, OzoneLWLSQ *lsq_ptr);
80
81 /** Processes the store completion event. */
82 void process();
83
84 /** Returns the description of this event. */
85 const char *description();
86
87 private:
88 /** The store index of the store being written back. */
89 DynInstPtr inst;
90
91 BackEnd *be;
92 /** The writeback event for the store. Needed for store
93 * conditionals.
94 */
95 public:
96 Event *wbEvent;
97 bool miss;
98 private:
99 /** The pointer to the LSQ unit that issued the store. */
100 OzoneLWLSQ<Impl> *lsqPtr;
101 };
102
103 public:
104 /** Constructs an LSQ unit. init() must be called prior to use. */
105 OzoneLWLSQ();
106
107 /** Initializes the LSQ unit with the specified number of entries. */
108 void init(Params *params, unsigned maxLQEntries,
109 unsigned maxSQEntries, unsigned id);
110
111 /** Returns the name of the LSQ unit. */
112 std::string name() const;
113
114 /** Sets the CPU pointer. */
115 void setCPU(FullCPU *cpu_ptr)
116 { cpu = cpu_ptr; }
117
118 /** Sets the back-end stage pointer. */
119 void setBE(BackEnd *be_ptr)
120 { be = be_ptr; }
121
122 /** Sets the page table pointer. */
123 // void setPageTable(PageTable *pt_ptr);
124
125 /** Ticks the LSQ unit, which in this case only resets the number of
126 * used cache ports.
127 * @todo: Move the number of used ports up to the LSQ level so it can
128 * be shared by all LSQ units.
129 */
130 void tick() { usedPorts = 0; }
131
132 /** Inserts an instruction. */
133 void insert(DynInstPtr &inst);
134 /** Inserts a load instruction. */
135 void insertLoad(DynInstPtr &load_inst);
136 /** Inserts a store instruction. */
137 void insertStore(DynInstPtr &store_inst);
138
139 /** Executes a load instruction. */
140 Fault executeLoad(DynInstPtr &inst);
141
142 /** Executes a store instruction. */
143 Fault executeStore(DynInstPtr &inst);
144
145 /** Commits the head load. */
146 void commitLoad();
147 /** Commits loads older than a specific sequence number. */
148 void commitLoads(InstSeqNum &youngest_inst);
149
150 /** Commits stores older than a specific sequence number. */
151 void commitStores(InstSeqNum &youngest_inst);
152
153 /** Writes back stores. */
154 void writebackStores();
155
156 // @todo: Include stats in the LSQ unit.
157 //void regStats();
158
159 /** Clears all the entries in the LQ. */
160 void clearLQ();
161
162 /** Clears all the entries in the SQ. */
163 void clearSQ();
164
165 /** Resizes the LQ to a given size. */
166 void resizeLQ(unsigned size);
167
168 /** Resizes the SQ to a given size. */
169 void resizeSQ(unsigned size);
170
171 /** Squashes all instructions younger than a specific sequence number. */
172 void squash(const InstSeqNum &squashed_num);
173
174 /** Returns if there is a memory ordering violation. Value is reset upon
175 * call to getMemDepViolator().
176 */
177 bool violation() { return memDepViolator; }
178
179 /** Returns the memory ordering violator. */
180 DynInstPtr getMemDepViolator();
181
182 /** Returns if a load became blocked due to the memory system. It clears
183 * the bool's value upon this being called.
184 */
185 bool loadBlocked()
186 { return isLoadBlocked; }
187
188 void clearLoadBlocked()
189 { isLoadBlocked = false; }
190
191 bool isLoadBlockedHandled()
192 { return loadBlockedHandled; }
193
194 void setLoadBlockedHandled()
195 { loadBlockedHandled = true; }
196
197 /** Returns the number of free entries (min of free LQ and SQ entries). */
198 unsigned numFreeEntries();
199
200 /** Returns the number of loads ready to execute. */
201 int numLoadsReady();
202
203 /** Returns the number of loads in the LQ. */
204 int numLoads() { return loads; }
205
206 /** Returns the number of stores in the SQ. */
207 int numStores() { return stores; }
208
209 /** Returns if either the LQ or SQ is full. */
210 bool isFull() { return lqFull() || sqFull(); }
211
212 /** Returns if the LQ is full. */
213 bool lqFull() { return loads >= (LQEntries - 1); }
214
215 /** Returns if the SQ is full. */
216 bool sqFull() { return stores >= (SQEntries - 1); }
217
218 /** Debugging function to dump instructions in the LSQ. */
219 void dumpInsts();
220
221 /** Returns the number of instructions in the LSQ. */
222 unsigned getCount() { return loads + stores; }
223
224 /** Returns if there are any stores to writeback. */
225 bool hasStoresToWB() { return storesToWB; }
226
227 /** Returns the number of stores to writeback. */
228 int numStoresToWB() { return storesToWB; }
229
230 /** Returns if the LSQ unit will writeback on this cycle. */
231 bool willWB() { return storeQueue.back().canWB &&
232 !storeQueue.back().completed/* &&
233 !dcacheInterface->isBlocked()*/; }
234
235 void switchOut();
236
237 void takeOverFrom(ExecContext *old_xc = NULL);
238
239 bool isSwitchedOut() { return switchedOut; }
240
241 bool switchedOut;
242
243 private:
244 /** Completes the store at the specified index. */
245 void completeStore(int store_idx);
246
247 private:
248 /** Pointer to the CPU. */
249 FullCPU *cpu;
250
251 /** Pointer to the back-end stage. */
252 BackEnd *be;
253
254 MemObject *mem;
255
256 class DcachePort : public Port
257 {
258 protected:
259 FullCPU *cpu;
260
261 public:
262 DcachePort(const std::string &_name, FullCPU *_cpu)
263 : Port(_name), cpu(_cpu)
264 { }
265
266 protected:
267 virtual Tick recvAtomic(PacketPtr pkt);
268
269 virtual void recvFunctional(PacketPtr pkt);
270
271 virtual void recvStatusChange(Status status);
272
273 virtual void getDeviceAddressRanges(AddrRangeList &resp,
274 AddrRangeList &snoop)
275 { resp.clear(); snoop.clear(); }
276
277 virtual bool recvTiming(PacketPtr pkt);
278
279 virtual void recvRetry();
280 };
281
282 /** Pointer to the D-cache. */
283 DcachePort dcachePort;
284
285 /** Pointer to the page table. */
286 // PageTable *pTable;
287
288 public:
289 struct SQEntry {
290 /** Constructs an empty store queue entry. */
291 SQEntry()
292 : inst(NULL), req(NULL), size(0), data(0),
293 canWB(0), committed(0), completed(0), lqIt(NULL)
294 { }
295
296 /** Constructs a store queue entry for a given instruction. */
297 SQEntry(DynInstPtr &_inst)
298 : inst(_inst), req(NULL), size(0), data(0),
299 canWB(0), committed(0), completed(0), lqIt(NULL)
300 { }
301
302 /** The store instruction. */
303 DynInstPtr inst;
304 /** The memory request for the store. */
305 RequestPtr req;
306 /** The size of the store. */
307 int size;
308 /** The store data. */
309 IntReg data;
310 /** Whether or not the store can writeback. */
311 bool canWB;
312 /** Whether or not the store is committed. */
313 bool committed;
314 /** Whether or not the store is completed. */
315 bool completed;
316
317 typename std::list<DynInstPtr>::iterator lqIt;
318 };
319
320 enum Status {
321 Running,
322 Idle,
323 DcacheMissStall,
324 DcacheMissSwitch
325 };
326
327 private:
328 /** The OzoneLWLSQ thread id. */
329 unsigned lsqID;
330
331 /** The status of the LSQ unit. */
332 Status _status;
333
334 /** The store queue. */
335 std::list<SQEntry> storeQueue;
336 /** The load queue. */
337 std::list<DynInstPtr> loadQueue;
338
339 typedef typename std::list<SQEntry>::iterator SQIt;
340 typedef typename std::list<DynInstPtr>::iterator LQIt;
341
342
343 struct HashFn {
344 size_t operator() (const int a) const
345 {
346 unsigned hash = (((a >> 14) ^ ((a >> 2) & 0xffff))) & 0x7FFFFFFF;
347
348 return hash;
349 }
350 };
351
352 m5::hash_map<int, SQIt, HashFn> SQItHash;
353 std::queue<int> SQIndices;
354 m5::hash_map<int, LQIt, HashFn> LQItHash;
355 std::queue<int> LQIndices;
356
357 typedef typename m5::hash_map<int, LQIt, HashFn>::iterator LQHashIt;
358 typedef typename m5::hash_map<int, SQIt, HashFn>::iterator SQHashIt;
359 // Consider making these 16 bits
360 /** The number of LQ entries. */
361 unsigned LQEntries;
362 /** The number of SQ entries. */
363 unsigned SQEntries;
364
365 /** The number of load instructions in the LQ. */
366 int loads;
367 /** The number of store instructions in the SQ (excludes those waiting to
368 * writeback).
369 */
370 int stores;
371
372 int storesToWB;
373
374 /// @todo Consider moving to a more advanced model with write vs read ports
375 /** The number of cache ports available each cycle. */
376 int cachePorts;
377
378 /** The number of used cache ports in this cycle. */
379 int usedPorts;
380
381 //list<InstSeqNum> mshrSeqNums;
382
383 //Stats::Scalar<> dcacheStallCycles;
384 Counter lastDcacheStall;
385
386 // Make these per thread?
387 /** Whether or not the LSQ is stalled. */
388 bool stalled;
389 /** The store that causes the stall due to partial store to load
390 * forwarding.
391 */
392 InstSeqNum stallingStoreIsn;
393 /** The index of the above store. */
394 LQIt stallingLoad;
395
396 /** Whether or not a load is blocked due to the memory system. It is
397 * cleared when this value is checked via loadBlocked().
398 */
399 bool isLoadBlocked;
400
401 bool loadBlockedHandled;
402
403 InstSeqNum blockedLoadSeqNum;
404
405 /** The oldest faulting load instruction. */
406 DynInstPtr loadFaultInst;
407 /** The oldest faulting store instruction. */
408 DynInstPtr storeFaultInst;
409
410 /** The oldest load that caused a memory ordering violation. */
411 DynInstPtr memDepViolator;
412
413 // Will also need how many read/write ports the Dcache has. Or keep track
414 // of that in stage that is one level up, and only call executeLoad/Store
415 // the appropriate number of times.
416
417 public:
418 /** Executes the load at the given index. */
419 template <class T>
420 Fault read(RequestPtr req, T &data, int load_idx);
421
422 /** Executes the store at the given index. */
423 template <class T>
424 Fault write(RequestPtr req, T &data, int store_idx);
425
426 /** Returns the sequence number of the head load instruction. */
427 InstSeqNum getLoadHeadSeqNum()
428 {
429 if (!loadQueue.empty()) {
430 return loadQueue.back()->seqNum;
431 } else {
432 return 0;
433 }
434
435 }
436
437 /** Returns the sequence number of the head store instruction. */
438 InstSeqNum getStoreHeadSeqNum()
439 {
440 if (!storeQueue.empty()) {
441 return storeQueue.back().inst->seqNum;
442 } else {
443 return 0;
444 }
445
446 }
447
448 /** Returns whether or not the LSQ unit is stalled. */
449 bool isStalled() { return stalled; }
450 };
451
452 template <class Impl>
453 template <class T>
454 Fault
455 OzoneLWLSQ<Impl>::read(RequestPtr req, T &data, int load_idx)
456 {
457 //Depending on issue2execute delay a squashed load could
458 //execute if it is found to be squashed in the same
459 //cycle it is scheduled to execute
460 typename m5::hash_map<int, LQIt, HashFn>::iterator
461 lq_hash_it = LQItHash.find(load_idx);
462 assert(lq_hash_it != LQItHash.end());
463 DynInstPtr inst = (*(*lq_hash_it).second);
464
465 // Make sure this isn't an uncacheable access
466 // A bit of a hackish way to get uncached accesses to work only if they're
467 // at the head of the LSQ and are ready to commit (at the head of the ROB
468 // too).
469 // @todo: Fix uncached accesses.
470 if (req->getFlags() & UNCACHEABLE &&
471 (inst != loadQueue.back() || !inst->reachedCommit)) {
472 DPRINTF(OzoneLSQ, "[sn:%lli] Uncached load and not head of "
473 "commit/LSQ!\n",
474 inst->seqNum);
475 be->rescheduleMemInst(inst);
476 return TheISA::genMachineCheckFault();
477 }
478
479 // Check the SQ for any previous stores that might lead to forwarding
480 SQIt sq_it = storeQueue.begin();
481 int store_size = 0;
482
483 DPRINTF(OzoneLSQ, "Read called, load idx: %i addr: %#x\n",
484 load_idx, req->getPaddr());
485
486 while (sq_it != storeQueue.end() && (*sq_it).inst->seqNum > inst->seqNum)
487 ++sq_it;
488
489 while (1) {
490 // End once we've reached the top of the LSQ
491 if (sq_it == storeQueue.end()) {
492 break;
493 }
494
495 assert((*sq_it).inst);
496
497 store_size = (*sq_it).size;
498
499 if (store_size == 0) {
500 sq_it++;
501 continue;
502 }
503
504 // Check if the store data is within the lower and upper bounds of
505 // addresses that the request needs.
506 bool store_has_lower_limit =
507 req->getVaddr() >= (*sq_it).inst->effAddr;
508 bool store_has_upper_limit =
509 (req->getVaddr() + req->getSize()) <= ((*sq_it).inst->effAddr +
510 store_size);
511 bool lower_load_has_store_part =
512 req->getVaddr() < ((*sq_it).inst->effAddr +
513 store_size);
514 bool upper_load_has_store_part =
515 (req->getVaddr() + req->getSize()) > (*sq_it).inst->effAddr;
516
517 // If the store's data has all of the data needed, we can forward.
518 if (store_has_lower_limit && store_has_upper_limit) {
519 int shift_amt = req->getVaddr() & (store_size - 1);
520 // Assumes byte addressing
521 shift_amt = shift_amt << 3;
522
523 // Cast this to type T?
524 data = (*sq_it).data >> shift_amt;
525
526 assert(!inst->memData);
527 inst->memData = new uint8_t[64];
528
529 memcpy(inst->memData, &data, req->getSize());
530
531 DPRINTF(OzoneLSQ, "Forwarding from store [sn:%lli] to load to "
532 "[sn:%lli] addr %#x, data %#x\n",
533 (*sq_it).inst->seqNum, inst->seqNum, req->vaddr, *(inst->memData));
534 /*
535 typename BackEnd::LdWritebackEvent *wb =
536 new typename BackEnd::LdWritebackEvent(inst,
537 be);
538
539 // We'll say this has a 1 cycle load-store forwarding latency
540 // for now.
541 // FIXME - Need to make this a parameter.
542 wb->schedule(curTick);
543 */
544 // Should keep track of stat for forwarded data
545 return NoFault;
546 } else if ((store_has_lower_limit && lower_load_has_store_part) ||
547 (store_has_upper_limit && upper_load_has_store_part) ||
548 (lower_load_has_store_part && upper_load_has_store_part)) {
549 // This is the partial store-load forwarding case where a store
550 // has only part of the load's data.
551
552 // If it's already been written back, then don't worry about
553 // stalling on it.
554 if ((*sq_it).completed) {
555 sq_it++;
556 break;
557 }
558
559 // Must stall load and force it to retry, so long as it's the oldest
560 // load that needs to do so.
561 if (!stalled ||
562 (stalled &&
563 inst->seqNum <
564 (*stallingLoad)->seqNum)) {
565 stalled = true;
566 stallingStoreIsn = (*sq_it).inst->seqNum;
567 stallingLoad = (*lq_hash_it).second;
568 }
569
570 // Tell IQ/mem dep unit that this instruction will need to be
571 // rescheduled eventually
572 be->rescheduleMemInst(inst);
573
574 DPRINTF(OzoneLSQ, "Load-store forwarding mis-match. "
575 "Store [sn:%lli] to load addr %#x\n",
576 (*sq_it).inst->seqNum, req->vaddr);
577
578 return NoFault;
579 }
580 sq_it++;
581 }
582
583 // If there's no forwarding case, then go access memory
584 DPRINTF(OzoneLSQ, "Doing functional access for inst PC %#x\n",
585 inst->readPC());
586
587 assert(!inst->memData);
588 inst->memData = new uint8_t[64];
589
590 ++usedPorts;
591
592 DPRINTF(OzoneLSQ, "Doing timing access for inst PC %#x\n",
593 inst->readPC());
594
595 PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
596 data_pkt->dataStatic(inst->memData);
597
598 // if we have a cache, do cache access too
599 if (!dcachePort.sendTiming(data_pkt)) {
600 // There's an older load that's already going to squash.
601 if (isLoadBlocked && blockedLoadSeqNum < inst->seqNum)
602 return NoFault;
603
604 // Record that the load was blocked due to memory. This
605 // load will squash all instructions after it, be
606 // refetched, and re-executed.
607 isLoadBlocked = true;
608 loadBlockedHandled = false;
609 blockedLoadSeqNum = inst->seqNum;
610 // No fault occurred, even though the interface is blocked.
611 return NoFault;
612 }
613
614 if (data_pkt->result != Packet::Success) {
615 DPRINTF(OzoneLSQ, "OzoneLSQ: D-cache miss!\n");
616 DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
617 inst->seqNum);
618 } else {
619 DPRINTF(OzoneLSQ, "OzoneLSQ: D-cache hit!\n");
620 DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n",
621 inst->seqNum);
622 }
623
624 return NoFault;
625 }
626
627 template <class Impl>
628 template <class T>
629 Fault
630 OzoneLWLSQ<Impl>::write(RequestPtr req, T &data, int store_idx)
631 {
632 SQHashIt sq_hash_it = SQItHash.find(store_idx);
633 assert(sq_hash_it != SQItHash.end());
634
635 SQIt sq_it = (*sq_hash_it).second;
636 assert((*sq_it).inst);
637
638 DPRINTF(OzoneLSQ, "Doing write to store idx %i, addr %#x data %#x"
639 " | [sn:%lli]\n",
640 store_idx, req->getPaddr(), data, (*sq_it).inst->seqNum);
641
642 (*sq_it).req = req;
643 (*sq_it).size = sizeof(T);
644 (*sq_it).data = data;
645 /*
646 assert(!req->data);
647 req->data = new uint8_t[64];
648 memcpy(req->data, (uint8_t *)&(*sq_it).data, req->size);
649 */
650
651 // This function only writes the data to the store queue, so no fault
652 // can happen here.
653 return NoFault;
654 }
655
656 #endif // __CPU_OZONE_LW_LSQ_HH__