Turn Interrupts objects into SimObjects. Also, move local APIC state into x86's Inter...
[gem5.git] / src / cpu / ozone / lsq_unit.hh
1 /*
2 * Copyright (c) 2004-2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31 #ifndef __CPU_OZONE_LSQ_UNIT_HH__
32 #define __CPU_OZONE_LSQ_UNIT_HH__
33
34 #include <map>
35 #include <queue>
36 #include <algorithm>
37
38 #include "arch/faults.hh"
39 #include "arch/types.hh"
40 #include "config/full_system.hh"
41 #include "base/hashmap.hh"
42 #include "cpu/inst_seq.hh"
43 #include "mem/mem_interface.hh"
44 //#include "mem/page_table.hh"
45 #include "sim/sim_object.hh"
46
47 class PageTable;
48
49 /**
50 * Class that implements the actual LQ and SQ for each specific thread.
51 * Both are circular queues; load entries are freed upon committing, while
52 * store entries are freed once they writeback. The LSQUnit tracks if there
53 * are memory ordering violations, and also detects partial load to store
54 * forwarding cases (a store only has part of a load's data) that requires
55 * the load to wait until the store writes back. In the former case it
56 * holds onto the instruction until the dependence unit looks at it, and
57 * in the latter it stalls the LSQ until the store writes back. At that
58 * point the load is replayed.
59 */
60 template <class Impl>
61 class OzoneLSQ {
62 public:
63 typedef typename Impl::Params Params;
64 typedef typename Impl::FullCPU FullCPU;
65 typedef typename Impl::BackEnd BackEnd;
66 typedef typename Impl::DynInstPtr DynInstPtr;
67 typedef typename Impl::IssueStruct IssueStruct;
68
69 typedef TheISA::IntReg IntReg;
70
71 typedef typename std::map<InstSeqNum, DynInstPtr>::iterator LdMapIt;
72
73 private:
74 class StoreCompletionEvent : public Event {
75 public:
76 /** Constructs a store completion event. */
77 StoreCompletionEvent(int store_idx, Event *wb_event, OzoneLSQ *lsq_ptr);
78
79 /** Processes the store completion event. */
80 void process();
81
82 /** Returns the description of this event. */
83 const char *description() const;
84
85 private:
86 /** The store index of the store being written back. */
87 int storeIdx;
88 /** The writeback event for the store. Needed for store
89 * conditionals.
90 */
91 Event *wbEvent;
92 /** The pointer to the LSQ unit that issued the store. */
93 OzoneLSQ<Impl> *lsqPtr;
94 };
95
96 friend class StoreCompletionEvent;
97
98 public:
99 /** Constructs an LSQ unit. init() must be called prior to use. */
100 OzoneLSQ();
101
102 /** Initializes the LSQ unit with the specified number of entries. */
103 void init(Params *params, unsigned maxLQEntries,
104 unsigned maxSQEntries, unsigned id);
105
106 /** Returns the name of the LSQ unit. */
107 std::string name() const;
108
109 /** Sets the CPU pointer. */
110 void setCPU(FullCPU *cpu_ptr)
111 { cpu = cpu_ptr; }
112
113 /** Sets the back-end stage pointer. */
114 void setBE(BackEnd *be_ptr)
115 { be = be_ptr; }
116
117 /** Ticks the LSQ unit, which in this case only resets the number of
118 * used cache ports.
119 * @todo: Move the number of used ports up to the LSQ level so it can
120 * be shared by all LSQ units.
121 */
122 void tick() { usedPorts = 0; }
123
124 /** Inserts an instruction. */
125 void insert(DynInstPtr &inst);
126 /** Inserts a load instruction. */
127 void insertLoad(DynInstPtr &load_inst);
128 /** Inserts a store instruction. */
129 void insertStore(DynInstPtr &store_inst);
130
131 /** Executes a load instruction. */
132 Fault executeLoad(DynInstPtr &inst);
133
134 Fault executeLoad(int lq_idx);
135 /** Executes a store instruction. */
136 Fault executeStore(DynInstPtr &inst);
137
138 /** Commits the head load. */
139 void commitLoad();
140 /** Commits a specific load, given by the sequence number. */
141 void commitLoad(InstSeqNum &inst);
142 /** Commits loads older than a specific sequence number. */
143 void commitLoads(InstSeqNum &youngest_inst);
144
145 /** Commits stores older than a specific sequence number. */
146 void commitStores(InstSeqNum &youngest_inst);
147
148 /** Writes back stores. */
149 void writebackStores();
150
151 // @todo: Include stats in the LSQ unit.
152 //void regStats();
153
154 /** Clears all the entries in the LQ. */
155 void clearLQ();
156
157 /** Clears all the entries in the SQ. */
158 void clearSQ();
159
160 /** Resizes the LQ to a given size. */
161 void resizeLQ(unsigned size);
162
163 /** Resizes the SQ to a given size. */
164 void resizeSQ(unsigned size);
165
166 /** Squashes all instructions younger than a specific sequence number. */
167 void squash(const InstSeqNum &squashed_num);
168
169 /** Returns if there is a memory ordering violation. Value is reset upon
170 * call to getMemDepViolator().
171 */
172 bool violation() { return memDepViolator; }
173
174 /** Returns the memory ordering violator. */
175 DynInstPtr getMemDepViolator();
176
177 /** Returns if a load became blocked due to the memory system. It clears
178 * the bool's value upon this being called.
179 */
180 inline bool loadBlocked();
181
182 /** Returns the number of free entries (min of free LQ and SQ entries). */
183 unsigned numFreeEntries();
184
185 /** Returns the number of loads ready to execute. */
186 int numLoadsReady();
187
188 /** Returns the number of loads in the LQ. */
189 int numLoads() { return loads; }
190
191 /** Returns the number of stores in the SQ. */
192 int numStores() { return stores; }
193
194 /** Returns if either the LQ or SQ is full. */
195 bool isFull() { return lqFull() || sqFull(); }
196
197 /** Returns if the LQ is full. */
198 bool lqFull() { return loads >= (LQEntries - 1); }
199
200 /** Returns if the SQ is full. */
201 bool sqFull() { return stores >= (SQEntries - 1); }
202
203 /** Debugging function to dump instructions in the LSQ. */
204 void dumpInsts();
205
206 /** Returns the number of instructions in the LSQ. */
207 unsigned getCount() { return loads + stores; }
208
209 /** Returns if there are any stores to writeback. */
210 bool hasStoresToWB() { return storesToWB; }
211
212 /** Returns the number of stores to writeback. */
213 int numStoresToWB() { return storesToWB; }
214
215 /** Returns if the LSQ unit will writeback on this cycle. */
216 bool willWB() { return storeQueue[storeWBIdx].canWB &&
217 !storeQueue[storeWBIdx].completed &&
218 !dcacheInterface->isBlocked(); }
219
220 private:
221 /** Completes the store at the specified index. */
222 void completeStore(int store_idx);
223
224 /** Increments the given store index (circular queue). */
225 inline void incrStIdx(int &store_idx);
226 /** Decrements the given store index (circular queue). */
227 inline void decrStIdx(int &store_idx);
228 /** Increments the given load index (circular queue). */
229 inline void incrLdIdx(int &load_idx);
230 /** Decrements the given load index (circular queue). */
231 inline void decrLdIdx(int &load_idx);
232
233 private:
234 /** Pointer to the CPU. */
235 FullCPU *cpu;
236
237 /** Pointer to the back-end stage. */
238 BackEnd *be;
239
240 /** Pointer to the D-cache. */
241 MemInterface *dcacheInterface;
242
243 /** Pointer to the page table. */
244 PageTable *pTable;
245
246 public:
247 struct SQEntry {
248 /** Constructs an empty store queue entry. */
249 SQEntry()
250 : inst(NULL), req(NULL), size(0), data(0),
251 canWB(0), committed(0), completed(0)
252 { }
253
254 /** Constructs a store queue entry for a given instruction. */
255 SQEntry(DynInstPtr &_inst)
256 : inst(_inst), req(NULL), size(0), data(0),
257 canWB(0), committed(0), completed(0)
258 { }
259
260 /** The store instruction. */
261 DynInstPtr inst;
262 /** The memory request for the store. */
263 MemReqPtr req;
264 /** The size of the store. */
265 int size;
266 /** The store data. */
267 IntReg data;
268 /** Whether or not the store can writeback. */
269 bool canWB;
270 /** Whether or not the store is committed. */
271 bool committed;
272 /** Whether or not the store is completed. */
273 bool completed;
274 };
275
276 enum Status {
277 Running,
278 Idle,
279 DcacheMissStall,
280 DcacheMissSwitch
281 };
282
283 private:
284 /** The OzoneLSQ thread id. */
285 unsigned lsqID;
286
287 /** The status of the LSQ unit. */
288 Status _status;
289
290 /** The store queue. */
291 std::vector<SQEntry> storeQueue;
292
293 /** The load queue. */
294 std::vector<DynInstPtr> loadQueue;
295
296 // Consider making these 16 bits
297 /** The number of LQ entries. */
298 unsigned LQEntries;
299 /** The number of SQ entries. */
300 unsigned SQEntries;
301
302 /** The number of load instructions in the LQ. */
303 int loads;
304 /** The number of store instructions in the SQ (excludes those waiting to
305 * writeback).
306 */
307 int stores;
308 /** The number of store instructions in the SQ waiting to writeback. */
309 int storesToWB;
310
311 /** The index of the head instruction in the LQ. */
312 int loadHead;
313 /** The index of the tail instruction in the LQ. */
314 int loadTail;
315
316 /** The index of the head instruction in the SQ. */
317 int storeHead;
318 /** The index of the first instruction that is ready to be written back,
319 * and has not yet been written back.
320 */
321 int storeWBIdx;
322 /** The index of the tail instruction in the SQ. */
323 int storeTail;
324
325 /// @todo Consider moving to a more advanced model with write vs read ports
326 /** The number of cache ports available each cycle. */
327 int cachePorts;
328
329 /** The number of used cache ports in this cycle. */
330 int usedPorts;
331
332 //list<InstSeqNum> mshrSeqNums;
333
334 //Stats::Scalar<> dcacheStallCycles;
335 Counter lastDcacheStall;
336
337 /** Wire to read information from the issue stage time queue. */
338 typename TimeBuffer<IssueStruct>::wire fromIssue;
339
340 // Make these per thread?
341 /** Whether or not the LSQ is stalled. */
342 bool stalled;
343 /** The store that causes the stall due to partial store to load
344 * forwarding.
345 */
346 InstSeqNum stallingStoreIsn;
347 /** The index of the above store. */
348 int stallingLoadIdx;
349
350 /** Whether or not a load is blocked due to the memory system. It is
351 * cleared when this value is checked via loadBlocked().
352 */
353 bool isLoadBlocked;
354
355 /** The oldest faulting load instruction. */
356 DynInstPtr loadFaultInst;
357 /** The oldest faulting store instruction. */
358 DynInstPtr storeFaultInst;
359
360 /** The oldest load that caused a memory ordering violation. */
361 DynInstPtr memDepViolator;
362
363 // Will also need how many read/write ports the Dcache has. Or keep track
364 // of that in stage that is one level up, and only call executeLoad/Store
365 // the appropriate number of times.
366
367 public:
368 /** Executes the load at the given index. */
369 template <class T>
370 Fault read(MemReqPtr &req, T &data, int load_idx);
371
372 /** Executes the store at the given index. */
373 template <class T>
374 Fault write(MemReqPtr &req, T &data, int store_idx);
375
376 /** Returns the index of the head load instruction. */
377 int getLoadHead() { return loadHead; }
378 /** Returns the sequence number of the head load instruction. */
379 InstSeqNum getLoadHeadSeqNum()
380 {
381 if (loadQueue[loadHead]) {
382 return loadQueue[loadHead]->seqNum;
383 } else {
384 return 0;
385 }
386
387 }
388
389 /** Returns the index of the head store instruction. */
390 int getStoreHead() { return storeHead; }
391 /** Returns the sequence number of the head store instruction. */
392 InstSeqNum getStoreHeadSeqNum()
393 {
394 if (storeQueue[storeHead].inst) {
395 return storeQueue[storeHead].inst->seqNum;
396 } else {
397 return 0;
398 }
399
400 }
401
402 /** Returns whether or not the LSQ unit is stalled. */
403 bool isStalled() { return stalled; }
404 };
405
406 template <class Impl>
407 template <class T>
408 Fault
409 OzoneLSQ<Impl>::read(MemReqPtr &req, T &data, int load_idx)
410 {
411 //Depending on issue2execute delay a squashed load could
412 //execute if it is found to be squashed in the same
413 //cycle it is scheduled to execute
414 assert(loadQueue[load_idx]);
415
416 if (loadQueue[load_idx]->isExecuted()) {
417 panic("Should not reach this point with split ops!");
418
419 memcpy(&data,req->data,req->size);
420
421 return NoFault;
422 }
423
424 // Make sure this isn't an uncacheable access
425 // A bit of a hackish way to get uncached accesses to work only if they're
426 // at the head of the LSQ and are ready to commit (at the head of the ROB
427 // too).
428 // @todo: Fix uncached accesses.
429 if (req->isUncacheable() &&
430 (load_idx != loadHead || !loadQueue[load_idx]->readyToCommit())) {
431
432 return TheISA::genMachineCheckFault();
433 }
434
435 // Check the SQ for any previous stores that might lead to forwarding
436 int store_idx = loadQueue[load_idx]->sqIdx;
437
438 int store_size = 0;
439
440 DPRINTF(OzoneLSQ, "Read called, load idx: %i, store idx: %i, "
441 "storeHead: %i addr: %#x\n",
442 load_idx, store_idx, storeHead, req->paddr);
443
444 while (store_idx != -1) {
445 // End once we've reached the top of the LSQ
446 if (store_idx == storeWBIdx) {
447 break;
448 }
449
450 // Move the index to one younger
451 if (--store_idx < 0)
452 store_idx += SQEntries;
453
454 assert(storeQueue[store_idx].inst);
455
456 store_size = storeQueue[store_idx].size;
457
458 if (store_size == 0)
459 continue;
460
461 // Check if the store data is within the lower and upper bounds of
462 // addresses that the request needs.
463 bool store_has_lower_limit =
464 req->vaddr >= storeQueue[store_idx].inst->effAddr;
465 bool store_has_upper_limit =
466 (req->vaddr + req->size) <= (storeQueue[store_idx].inst->effAddr +
467 store_size);
468 bool lower_load_has_store_part =
469 req->vaddr < (storeQueue[store_idx].inst->effAddr +
470 store_size);
471 bool upper_load_has_store_part =
472 (req->vaddr + req->size) > storeQueue[store_idx].inst->effAddr;
473
474 // If the store's data has all of the data needed, we can forward.
475 if (store_has_lower_limit && store_has_upper_limit) {
476
477 int shift_amt = req->vaddr & (store_size - 1);
478 // Assumes byte addressing
479 shift_amt = shift_amt << 3;
480
481 // Cast this to type T?
482 data = storeQueue[store_idx].data >> shift_amt;
483
484 req->cmd = Read;
485 assert(!req->completionEvent);
486 req->completionEvent = NULL;
487 req->time = curTick;
488 assert(!req->data);
489 req->data = new uint8_t[64];
490
491 memcpy(req->data, &data, req->size);
492
493 DPRINTF(OzoneLSQ, "Forwarding from store idx %i to load to "
494 "addr %#x, data %#x\n",
495 store_idx, req->vaddr, *(req->data));
496
497 typename BackEnd::LdWritebackEvent *wb =
498 new typename BackEnd::LdWritebackEvent(loadQueue[load_idx],
499 be);
500
501 // We'll say this has a 1 cycle load-store forwarding latency
502 // for now.
503 // FIXME - Need to make this a parameter.
504 wb->schedule(curTick);
505
506 // Should keep track of stat for forwarded data
507 return NoFault;
508 } else if ((store_has_lower_limit && lower_load_has_store_part) ||
509 (store_has_upper_limit && upper_load_has_store_part) ||
510 (lower_load_has_store_part && upper_load_has_store_part)) {
511 // This is the partial store-load forwarding case where a store
512 // has only part of the load's data.
513
514 // If it's already been written back, then don't worry about
515 // stalling on it.
516 if (storeQueue[store_idx].completed) {
517 continue;
518 }
519
520 // Must stall load and force it to retry, so long as it's the oldest
521 // load that needs to do so.
522 if (!stalled ||
523 (stalled &&
524 loadQueue[load_idx]->seqNum <
525 loadQueue[stallingLoadIdx]->seqNum)) {
526 stalled = true;
527 stallingStoreIsn = storeQueue[store_idx].inst->seqNum;
528 stallingLoadIdx = load_idx;
529 }
530
531 // Tell IQ/mem dep unit that this instruction will need to be
532 // rescheduled eventually
533 be->rescheduleMemInst(loadQueue[load_idx]);
534
535 DPRINTF(OzoneLSQ, "Load-store forwarding mis-match. "
536 "Store idx %i to load addr %#x\n",
537 store_idx, req->vaddr);
538
539 return NoFault;
540 }
541 }
542
543
544 // If there's no forwarding case, then go access memory
545 DynInstPtr inst = loadQueue[load_idx];
546
547 ++usedPorts;
548
549 // if we have a cache, do cache access too
550 if (dcacheInterface) {
551 if (dcacheInterface->isBlocked()) {
552 isLoadBlocked = true;
553 // No fault occurred, even though the interface is blocked.
554 return NoFault;
555 }
556
557 DPRINTF(OzoneLSQ, "D-cache: PC:%#x reading from paddr:%#x "
558 "vaddr:%#x flags:%i\n",
559 inst->readPC(), req->paddr, req->vaddr, req->flags);
560
561 // Setup MemReq pointer
562 req->cmd = Read;
563 req->completionEvent = NULL;
564 req->time = curTick;
565 assert(!req->data);
566 req->data = new uint8_t[64];
567
568 assert(!req->completionEvent);
569 typedef typename BackEnd::LdWritebackEvent LdWritebackEvent;
570
571 LdWritebackEvent *wb = new LdWritebackEvent(loadQueue[load_idx], be);
572
573 req->completionEvent = wb;
574
575 // Do Cache Access
576 MemAccessResult result = dcacheInterface->access(req);
577
578 // Ugly hack to get an event scheduled *only* if the access is
579 // a miss. We really should add first-class support for this
580 // at some point.
581 // @todo: Probably should support having no events
582 if (result != MA_HIT) {
583 DPRINTF(OzoneLSQ, "D-cache miss!\n");
584 DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
585 inst->seqNum);
586
587 lastDcacheStall = curTick;
588
589 _status = DcacheMissStall;
590
591 wb->setDcacheMiss();
592
593 } else {
594 // DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n",
595 // inst->seqNum);
596
597 DPRINTF(OzoneLSQ, "D-cache hit!\n");
598 }
599 } else {
600 fatal("Must use D-cache with new memory system");
601 }
602
603 return NoFault;
604 }
605
606 template <class Impl>
607 template <class T>
608 Fault
609 OzoneLSQ<Impl>::write(MemReqPtr &req, T &data, int store_idx)
610 {
611 assert(storeQueue[store_idx].inst);
612
613 DPRINTF(OzoneLSQ, "Doing write to store idx %i, addr %#x data %#x"
614 " | storeHead:%i [sn:%i]\n",
615 store_idx, req->paddr, data, storeHead,
616 storeQueue[store_idx].inst->seqNum);
617
618 storeQueue[store_idx].req = req;
619 storeQueue[store_idx].size = sizeof(T);
620 storeQueue[store_idx].data = data;
621
622 // This function only writes the data to the store queue, so no fault
623 // can happen here.
624 return NoFault;
625 }
626
627 template <class Impl>
628 inline bool
629 OzoneLSQ<Impl>::loadBlocked()
630 {
631 bool ret_val = isLoadBlocked;
632 isLoadBlocked = false;
633 return ret_val;
634 }
635
636 #endif // __CPU_OZONE_LSQ_UNIT_HH__