Merge zizzer.eecs.umich.edu:/z/m5/Bitkeeper/m5
[gem5.git] / cpu / ozone / cpu.hh
1 /*
2 * Copyright (c) 2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #ifndef __CPU_OOO_CPU_OOO_CPU_HH__
30 #define __CPU_OOO_CPU_OOO_CPU_HH__
31
32 #include "base/statistics.hh"
33 #include "config/full_system.hh"
34 #include "cpu/base.hh"
35 #include "cpu/exec_context.hh"
36 #include "encumbered/cpu/full/fu_pool.hh"
37 #include "cpu/ooo_cpu/ea_list.hh"
38 #include "cpu/pc_event.hh"
39 #include "cpu/static_inst.hh"
40 #include "mem/mem_interface.hh"
41 #include "sim/eventq.hh"
42
43 // forward declarations
44 #if FULL_SYSTEM
45 class Processor;
46 class AlphaITB;
47 class AlphaDTB;
48 class PhysicalMemory;
49
50 class RemoteGDB;
51 class GDBListener;
52
53 #else
54
55 class Process;
56
57 #endif // FULL_SYSTEM
58
59 class Checkpoint;
60 class MemInterface;
61
62 namespace Trace {
63 class InstRecord;
64 }
65
66 /**
67 * Declaration of Out-of-Order CPU class. Basically it is a SimpleCPU with
68 * simple out-of-order capabilities added to it. It is still a 1 CPI machine
69 * (?), but is capable of handling cache misses. Basically it models having
70 * a ROB/IQ by only allowing a certain amount of instructions to execute while
71 * the cache miss is outstanding.
72 */
73
74 template <class Impl>
75 class OoOCPU : public BaseCPU
76 {
77 private:
78 typedef typename Impl::DynInst DynInst;
79 typedef typename Impl::DynInstPtr DynInstPtr;
80
81 public:
82 // main simulation loop (one cycle)
83 void tick();
84
85 private:
86 struct TickEvent : public Event
87 {
88 OoOCPU *cpu;
89 int width;
90
91 TickEvent(OoOCPU *c, int w);
92 void process();
93 const char *description();
94 };
95
96 TickEvent tickEvent;
97
98 /// Schedule tick event, regardless of its current state.
99 void scheduleTickEvent(int delay)
100 {
101 if (tickEvent.squashed())
102 tickEvent.reschedule(curTick + delay);
103 else if (!tickEvent.scheduled())
104 tickEvent.schedule(curTick + delay);
105 }
106
107 /// Unschedule tick event, regardless of its current state.
108 void unscheduleTickEvent()
109 {
110 if (tickEvent.scheduled())
111 tickEvent.squash();
112 }
113
114 private:
115 Trace::InstRecord *traceData;
116
117 template<typename T>
118 void trace_data(T data);
119
120 public:
121 //
122 enum Status {
123 Running,
124 Idle,
125 IcacheMiss,
126 IcacheMissComplete,
127 DcacheMissStall,
128 SwitchedOut
129 };
130
131 private:
132 Status _status;
133
134 public:
135 void post_interrupt(int int_num, int index);
136
137 void zero_fill_64(Addr addr) {
138 static int warned = 0;
139 if (!warned) {
140 warn ("WH64 is not implemented");
141 warned = 1;
142 }
143 };
144
145 struct Params : public BaseCPU::Params
146 {
147 MemInterface *icache_interface;
148 MemInterface *dcache_interface;
149 int width;
150 #if FULL_SYSTEM
151 AlphaITB *itb;
152 AlphaDTB *dtb;
153 FunctionalMemory *mem;
154 #else
155 Process *process;
156 #endif
157 int issueWidth;
158 };
159
160 OoOCPU(Params *params);
161
162 virtual ~OoOCPU();
163
164 void init();
165
166 private:
167 void copyFromXC();
168
169 public:
170 // execution context
171 ExecContext *xc;
172
173 void switchOut();
174 void takeOverFrom(BaseCPU *oldCPU);
175
176 #if FULL_SYSTEM
177 Addr dbg_vtophys(Addr addr);
178
179 bool interval_stats;
180 #endif
181
182 // L1 instruction cache
183 MemInterface *icacheInterface;
184
185 // L1 data cache
186 MemInterface *dcacheInterface;
187
188 FuncUnitPool *fuPool;
189
190 // Refcounted pointer to the one memory request.
191 MemReqPtr cacheMemReq;
192
193 class ICacheCompletionEvent : public Event
194 {
195 private:
196 OoOCPU *cpu;
197
198 public:
199 ICacheCompletionEvent(OoOCPU *_cpu);
200
201 virtual void process();
202 virtual const char *description();
203 };
204
205 // Will need to create a cache completion event upon any memory miss.
206 ICacheCompletionEvent iCacheCompletionEvent;
207
208 class DCacheCompletionEvent;
209
210 typedef typename
211 std::list<DCacheCompletionEvent>::iterator DCacheCompEventIt;
212
213 class DCacheCompletionEvent : public Event
214 {
215 private:
216 OoOCPU *cpu;
217 DynInstPtr inst;
218 DCacheCompEventIt dcceIt;
219
220 public:
221 DCacheCompletionEvent(OoOCPU *_cpu, DynInstPtr &_inst,
222 DCacheCompEventIt &_dcceIt);
223
224 virtual void process();
225 virtual const char *description();
226 };
227
228 friend class DCacheCompletionEvent;
229
230 protected:
231 std::list<DCacheCompletionEvent> dCacheCompList;
232 DCacheCompEventIt dcceIt;
233
234 private:
235 Status status() const { return _status; }
236
237 virtual void activateContext(int thread_num, int delay);
238 virtual void suspendContext(int thread_num);
239 virtual void deallocateContext(int thread_num);
240 virtual void haltContext(int thread_num);
241
242 // statistics
243 virtual void regStats();
244 virtual void resetStats();
245
246 // number of simulated instructions
247 Counter numInst;
248 Counter startNumInst;
249 Stats::Scalar<> numInsts;
250
251 virtual Counter totalInstructions() const
252 {
253 return numInst - startNumInst;
254 }
255
256 // number of simulated memory references
257 Stats::Scalar<> numMemRefs;
258
259 // number of simulated loads
260 Counter numLoad;
261 Counter startNumLoad;
262
263 // number of idle cycles
264 Stats::Average<> notIdleFraction;
265 Stats::Formula idleFraction;
266
267 // number of cycles stalled for I-cache misses
268 Stats::Scalar<> icacheStallCycles;
269 Counter lastIcacheStall;
270
271 // number of cycles stalled for D-cache misses
272 Stats::Scalar<> dcacheStallCycles;
273 Counter lastDcacheStall;
274
275 void processICacheCompletion();
276
277 public:
278
279 virtual void serialize(std::ostream &os);
280 virtual void unserialize(Checkpoint *cp, const std::string &section);
281
282 #if FULL_SYSTEM
283 bool validInstAddr(Addr addr) { return true; }
284 bool validDataAddr(Addr addr) { return true; }
285 int getInstAsid() { return xc->regs.instAsid(); }
286 int getDataAsid() { return xc->regs.dataAsid(); }
287
288 Fault translateInstReq(MemReqPtr &req)
289 {
290 return itb->translate(req);
291 }
292
293 Fault translateDataReadReq(MemReqPtr &req)
294 {
295 return dtb->translate(req, false);
296 }
297
298 Fault translateDataWriteReq(MemReqPtr &req)
299 {
300 return dtb->translate(req, true);
301 }
302
303 #else
304 bool validInstAddr(Addr addr)
305 { return xc->validInstAddr(addr); }
306
307 bool validDataAddr(Addr addr)
308 { return xc->validDataAddr(addr); }
309
310 int getInstAsid() { return xc->asid; }
311 int getDataAsid() { return xc->asid; }
312
313 Fault dummyTranslation(MemReqPtr &req)
314 {
315 #if 0
316 assert((req->vaddr >> 48 & 0xffff) == 0);
317 #endif
318
319 // put the asid in the upper 16 bits of the paddr
320 req->paddr = req->vaddr & ~((Addr)0xffff << sizeof(Addr) * 8 - 16);
321 req->paddr = req->paddr | (Addr)req->asid << sizeof(Addr) * 8 - 16;
322 return NoFault;
323 }
324 Fault translateInstReq(MemReqPtr &req)
325 {
326 return dummyTranslation(req);
327 }
328 Fault translateDataReadReq(MemReqPtr &req)
329 {
330 return dummyTranslation(req);
331 }
332 Fault translateDataWriteReq(MemReqPtr &req)
333 {
334 return dummyTranslation(req);
335 }
336
337 #endif
338
339 template <class T>
340 Fault read(Addr addr, T &data, unsigned flags, DynInstPtr inst);
341
342 template <class T>
343 Fault write(T data, Addr addr, unsigned flags,
344 uint64_t *res, DynInstPtr inst);
345
346 void prefetch(Addr addr, unsigned flags)
347 {
348 // need to do this...
349 }
350
351 void writeHint(Addr addr, int size, unsigned flags)
352 {
353 // need to do this...
354 }
355
356 Fault copySrcTranslate(Addr src);
357
358 Fault copy(Addr dest);
359
360 private:
361 bool executeInst(DynInstPtr &inst);
362
363 void renameInst(DynInstPtr &inst);
364
365 void addInst(DynInstPtr &inst);
366
367 void commitHeadInst();
368
369 bool getOneInst();
370
371 Fault fetchCacheLine();
372
373 InstSeqNum getAndIncrementInstSeq();
374
375 bool ambigMemAddr;
376
377 private:
378 InstSeqNum globalSeqNum;
379
380 DynInstPtr renameTable[TheISA::TotalNumRegs];
381 DynInstPtr commitTable[TheISA::TotalNumRegs];
382
383 // Might need a table of the shadow registers as well.
384 #if FULL_SYSTEM
385 DynInstPtr palShadowTable[TheISA::NumIntRegs];
386 #endif
387
388 public:
389 // The register accessor methods provide the index of the
390 // instruction's operand (e.g., 0 or 1), not the architectural
391 // register index, to simplify the implementation of register
392 // renaming. We find the architectural register index by indexing
393 // into the instruction's own operand index table. Note that a
394 // raw pointer to the StaticInst is provided instead of a
395 // ref-counted StaticInstPtr to redice overhead. This is fine as
396 // long as these methods don't copy the pointer into any long-term
397 // storage (which is pretty hard to imagine they would have reason
398 // to do).
399
400 // In the OoO case these shouldn't read from the XC but rather from the
401 // rename table of DynInsts. Also these likely shouldn't be called very
402 // often, other than when adding things into the xc during say a syscall.
403
404 uint64_t readIntReg(StaticInst *si, int idx)
405 {
406 return xc->readIntReg(si->srcRegIdx(idx));
407 }
408
409 float readFloatRegSingle(StaticInst *si, int idx)
410 {
411 int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag;
412 return xc->readFloatRegSingle(reg_idx);
413 }
414
415 double readFloatRegDouble(StaticInst *si, int idx)
416 {
417 int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag;
418 return xc->readFloatRegDouble(reg_idx);
419 }
420
421 uint64_t readFloatRegInt(StaticInst *si, int idx)
422 {
423 int reg_idx = si->srcRegIdx(idx) - TheISA::FP_Base_DepTag;
424 return xc->readFloatRegInt(reg_idx);
425 }
426
427 void setIntReg(StaticInst *si, int idx, uint64_t val)
428 {
429 xc->setIntReg(si->destRegIdx(idx), val);
430 }
431
432 void setFloatRegSingle(StaticInst *si, int idx, float val)
433 {
434 int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag;
435 xc->setFloatRegSingle(reg_idx, val);
436 }
437
438 void setFloatRegDouble(StaticInst *si, int idx, double val)
439 {
440 int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag;
441 xc->setFloatRegDouble(reg_idx, val);
442 }
443
444 void setFloatRegInt(StaticInst *si, int idx, uint64_t val)
445 {
446 int reg_idx = si->destRegIdx(idx) - TheISA::FP_Base_DepTag;
447 xc->setFloatRegInt(reg_idx, val);
448 }
449
450 uint64_t readPC() { return PC; }
451 void setNextPC(Addr val) { nextPC = val; }
452
453 private:
454 Addr PC;
455 Addr nextPC;
456
457 unsigned issueWidth;
458
459 bool fetchRedirExcp;
460 bool fetchRedirBranch;
461
462 /** Mask to get a cache block's address. */
463 Addr cacheBlkMask;
464
465 unsigned cacheBlkSize;
466
467 Addr cacheBlkPC;
468
469 /** The cache line being fetched. */
470 uint8_t *cacheData;
471
472 protected:
473 bool cacheBlkValid;
474
475 private:
476
477 // Align an address (typically a PC) to the start of an I-cache block.
478 // We fold in the PISA 64- to 32-bit conversion here as well.
479 Addr icacheBlockAlignPC(Addr addr)
480 {
481 addr = TheISA::realPCToFetchPC(addr);
482 return (addr & ~(cacheBlkMask));
483 }
484
485 unsigned instSize;
486
487 // ROB tracking stuff.
488 DynInstPtr robHeadPtr;
489 DynInstPtr robTailPtr;
490 unsigned robSize;
491 unsigned robInsts;
492
493 // List of outstanding EA instructions.
494 protected:
495 EAList eaList;
496
497 public:
498 void branchToTarget(Addr val)
499 {
500 if (!fetchRedirExcp) {
501 fetchRedirBranch = true;
502 PC = val;
503 }
504 }
505
506 // ISA stuff:
507 uint64_t readUniq() { return xc->readUniq(); }
508 void setUniq(uint64_t val) { xc->setUniq(val); }
509
510 uint64_t readFpcr() { return xc->readFpcr(); }
511 void setFpcr(uint64_t val) { xc->setFpcr(val); }
512
513 #if FULL_SYSTEM
514 uint64_t readIpr(int idx, Fault &fault) { return xc->readIpr(idx, fault); }
515 Fault setIpr(int idx, uint64_t val) { return xc->setIpr(idx, val); }
516 Fault hwrei() { return xc->hwrei(); }
517 int readIntrFlag() { return xc->readIntrFlag(); }
518 void setIntrFlag(int val) { xc->setIntrFlag(val); }
519 bool inPalMode() { return xc->inPalMode(); }
520 void ev5_trap(Fault fault) { xc->ev5_trap(fault); }
521 bool simPalCheck(int palFunc) { return xc->simPalCheck(palFunc); }
522 #else
523 void syscall() { xc->syscall(); }
524 #endif
525
526 ExecContext *xcBase() { return xc; }
527 };
528
529
530 // precise architected memory state accessor macros
531 template <class Impl>
532 template <class T>
533 Fault
534 OoOCPU<Impl>::read(Addr addr, T &data, unsigned flags, DynInstPtr inst)
535 {
536 MemReqPtr readReq = new MemReq();
537 readReq->xc = xc;
538 readReq->asid = 0;
539 readReq->data = new uint8_t[64];
540
541 readReq->reset(addr, sizeof(T), flags);
542
543 // translate to physical address - This might be an ISA impl call
544 Fault fault = translateDataReadReq(readReq);
545
546 // do functional access
547 if (fault == NoFault)
548 fault = xc->mem->read(readReq, data);
549 #if 0
550 if (traceData) {
551 traceData->setAddr(addr);
552 if (fault == NoFault)
553 traceData->setData(data);
554 }
555 #endif
556
557 // if we have a cache, do cache access too
558 if (fault == NoFault && dcacheInterface) {
559 readReq->cmd = Read;
560 readReq->completionEvent = NULL;
561 readReq->time = curTick;
562 /*MemAccessResult result = */dcacheInterface->access(readReq);
563
564 if (dcacheInterface->doEvents()) {
565 readReq->completionEvent = new DCacheCompletionEvent(this, inst,
566 dcceIt);
567 }
568 }
569
570 if (!dcacheInterface && (readReq->flags & UNCACHEABLE))
571 recordEvent("Uncached Read");
572
573 return fault;
574 }
575
576 template <class Impl>
577 template <class T>
578 Fault
579 OoOCPU<Impl>::write(T data, Addr addr, unsigned flags,
580 uint64_t *res, DynInstPtr inst)
581 {
582 MemReqPtr writeReq = new MemReq();
583 writeReq->xc = xc;
584 writeReq->asid = 0;
585 writeReq->data = new uint8_t[64];
586
587 #if 0
588 if (traceData) {
589 traceData->setAddr(addr);
590 traceData->setData(data);
591 }
592 #endif
593
594 writeReq->reset(addr, sizeof(T), flags);
595
596 // translate to physical address
597 Fault fault = translateDataWriteReq(writeReq);
598
599 // do functional access
600 if (fault == NoFault)
601 fault = xc->write(writeReq, data);
602
603 if (fault == NoFault && dcacheInterface) {
604 writeReq->cmd = Write;
605 memcpy(writeReq->data,(uint8_t *)&data,writeReq->size);
606 writeReq->completionEvent = NULL;
607 writeReq->time = curTick;
608 /*MemAccessResult result = */dcacheInterface->access(writeReq);
609
610 if (dcacheInterface->doEvents()) {
611 writeReq->completionEvent = new DCacheCompletionEvent(this, inst,
612 dcceIt);
613 }
614 }
615
616 if (res && (fault == NoFault))
617 *res = writeReq->result;
618
619 if (!dcacheInterface && (writeReq->flags & UNCACHEABLE))
620 recordEvent("Uncached Write");
621
622 return fault;
623 }
624
625
626 #endif // __CPU_OOO_CPU_OOO_CPU_HH__