Merge zizzer:/bk/newmem
[gem5.git] / src / cpu / ozone / inorder_back_end.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31 #ifndef __CPU_OZONE_INORDER_BACK_END_HH__
32 #define __CPU_OZONE_INORDER_BACK_END_HH__
33
34 #include <list>
35
36 #include "arch/faults.hh"
37 #include "base/timebuf.hh"
38 #include "cpu/thread_context.hh"
39 #include "cpu/inst_seq.hh"
40 #include "cpu/ozone/rename_table.hh"
41 #include "cpu/ozone/thread_state.hh"
42 #include "mem/request.hh"
43 #include "sim/eventq.hh"
44
45 template <class Impl>
46 class InorderBackEnd
47 {
48 public:
49 typedef typename Impl::Params Params;
50 typedef typename Impl::DynInstPtr DynInstPtr;
51 typedef typename Impl::FullCPU FullCPU;
52 typedef typename Impl::FrontEnd FrontEnd;
53
54 typedef typename FullCPU::OzoneTC OzoneTC;
55 typedef typename Impl::FullCPU::CommStruct CommStruct;
56
57 InorderBackEnd(Params *params);
58
59 std::string name() const;
60
61 void setCPU(FullCPU *cpu_ptr)
62 { cpu = cpu_ptr; }
63
64 void setFrontEnd(FrontEnd *front_end_ptr)
65 { frontEnd = front_end_ptr; }
66
67 void setCommBuffer(TimeBuffer<CommStruct> *_comm)
68 { comm = _comm; }
69
70 void setTC(ThreadContext *tc_ptr);
71
72 void setThreadState(OzoneThreadState<Impl> *thread_ptr);
73
74 void regStats() { }
75
76 #if FULL_SYSTEM
77 void checkInterrupts();
78 #endif
79
80 void tick();
81 void executeInsts();
82 void squash(const InstSeqNum &squash_num, const Addr &next_PC);
83
84 void squashFromXC();
85 void generateXCEvent() { }
86
87 bool robEmpty() { return instList.empty(); }
88
89 bool isFull() { return false; }
90 bool isBlocked() { return status == DcacheMissStoreStall ||
91 status == DcacheMissLoadStall ||
92 interruptBlocked; }
93
94 void fetchFault(Fault &fault);
95
96 void dumpInsts();
97
98 private:
99 void handleFault();
100
101 void setSquashInfoFromTC();
102
103 bool squashPending;
104 InstSeqNum squashSeqNum;
105 Addr squashNextPC;
106
107 Fault faultFromFetch;
108
109 bool interruptBlocked;
110
111 public:
112 template <class T>
113 Fault read(Addr addr, T &data, unsigned flags);
114
115 template <class T>
116 Fault read(RequestPtr req, T &data, int load_idx);
117
118 template <class T>
119 Fault write(T data, Addr addr, unsigned flags, uint64_t *res);
120
121 template <class T>
122 Fault write(RequestPtr req, T &data, int store_idx);
123
124 Addr readCommitPC() { return commitPC; }
125
126 Addr commitPC;
127
128 void switchOut() { panic("Not implemented!"); }
129 void doSwitchOut() { panic("Not implemented!"); }
130 void takeOverFrom(ThreadContext *old_tc = NULL) { panic("Not implemented!"); }
131
132 public:
133 FullCPU *cpu;
134
135 FrontEnd *frontEnd;
136
137 ThreadContext *tc;
138
139 OzoneThreadState<Impl> *thread;
140
141 RenameTable<Impl> renameTable;
142
143 protected:
144 enum Status {
145 Running,
146 Idle,
147 DcacheMissLoadStall,
148 DcacheMissStoreStall,
149 DcacheMissComplete,
150 Blocked
151 };
152
153 Status status;
154
155 class DCacheCompletionEvent : public Event
156 {
157 private:
158 InorderBackEnd *be;
159
160 public:
161 DCacheCompletionEvent(InorderBackEnd *_be);
162
163 virtual void process();
164 virtual const char *description();
165
166 DynInstPtr inst;
167 };
168
169 friend class DCacheCompletionEvent;
170
171 DCacheCompletionEvent cacheCompletionEvent;
172
173 // MemInterface *dcacheInterface;
174
175 RequestPtr memReq;
176
177 private:
178 typedef typename std::list<DynInstPtr>::iterator InstListIt;
179
180 std::list<DynInstPtr> instList;
181
182 // General back end width. Used if the more specific isn't given.
183 int width;
184
185 int latency;
186
187 int squashLatency;
188
189 TimeBuffer<int> numInstsToWB;
190 TimeBuffer<int>::wire instsAdded;
191 TimeBuffer<int>::wire instsToExecute;
192
193 TimeBuffer<CommStruct> *comm;
194 // number of cycles stalled for D-cache misses
195 Stats::Scalar<> dcacheStallCycles;
196 Counter lastDcacheStall;
197 };
198
199 template <class Impl>
200 template <class T>
201 Fault
202 InorderBackEnd<Impl>::read(Addr addr, T &data, unsigned flags)
203 {
204 memReq->reset(addr, sizeof(T), flags);
205
206 // translate to physical address
207 Fault fault = cpu->translateDataReadReq(memReq);
208
209 // if we have a cache, do cache access too
210 if (fault == NoFault && dcacheInterface) {
211 memReq->cmd = Read;
212 memReq->completionEvent = NULL;
213 memReq->time = curTick;
214 memReq->flags &= ~INST_READ;
215 MemAccessResult result = dcacheInterface->access(memReq);
216
217 // Ugly hack to get an event scheduled *only* if the access is
218 // a miss. We really should add first-class support for this
219 // at some point.
220 if (result != MA_HIT) {
221 // Fix this hack for keeping funcExeInst correct with loads that
222 // are executed twice.
223 memReq->completionEvent = &cacheCompletionEvent;
224 lastDcacheStall = curTick;
225 // unscheduleTickEvent();
226 status = DcacheMissLoadStall;
227 DPRINTF(IBE, "Dcache miss stall!\n");
228 } else {
229 // do functional access
230 DPRINTF(IBE, "Dcache hit!\n");
231 }
232 }
233 /*
234 if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
235 recordEvent("Uncached Read");
236 */
237 return fault;
238 }
239 #if 0
240 template <class Impl>
241 template <class T>
242 Fault
243 InorderBackEnd<Impl>::read(MemReqPtr &req, T &data)
244 {
245 #if FULL_SYSTEM && defined(TARGET_ALPHA)
246 if (req->flags & LOCKED) {
247 req->xc->setMiscReg(TheISA::Lock_Addr_DepTag, req->paddr);
248 req->xc->setMiscReg(TheISA::Lock_Flag_DepTag, true);
249 }
250 #endif
251
252 Fault error;
253 error = thread->mem->read(req, data);
254 data = LittleEndianGuest::gtoh(data);
255 return error;
256 }
257 #endif
258
259 template <class Impl>
260 template <class T>
261 Fault
262 InorderBackEnd<Impl>::write(T data, Addr addr, unsigned flags, uint64_t *res)
263 {
264 memReq->reset(addr, sizeof(T), flags);
265
266 // translate to physical address
267 Fault fault = cpu->translateDataWriteReq(memReq);
268
269 if (fault == NoFault && dcacheInterface) {
270 memReq->cmd = Write;
271 // memcpy(memReq->data,(uint8_t *)&data,memReq->size);
272 memReq->completionEvent = NULL;
273 memReq->time = curTick;
274 memReq->flags &= ~INST_READ;
275 MemAccessResult result = dcacheInterface->access(memReq);
276
277 // Ugly hack to get an event scheduled *only* if the access is
278 // a miss. We really should add first-class support for this
279 // at some point.
280 if (result != MA_HIT) {
281 memReq->completionEvent = &cacheCompletionEvent;
282 lastDcacheStall = curTick;
283 // unscheduleTickEvent();
284 status = DcacheMissStoreStall;
285 DPRINTF(IBE, "Dcache miss stall!\n");
286 } else {
287 DPRINTF(IBE, "Dcache hit!\n");
288 }
289 }
290
291 if (res && (fault == NoFault))
292 *res = memReq->result;
293 /*
294 if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
295 recordEvent("Uncached Write");
296 */
297 return fault;
298 }
299 #if 0
300 template <class Impl>
301 template <class T>
302 Fault
303 InorderBackEnd<Impl>::write(MemReqPtr &req, T &data)
304 {
305 #if FULL_SYSTEM && defined(TARGET_ALPHA)
306 ExecContext *xc;
307
308 // If this is a store conditional, act appropriately
309 if (req->flags & LOCKED) {
310 xc = req->xc;
311
312 if (req->flags & UNCACHEABLE) {
313 // Don't update result register (see stq_c in isa_desc)
314 req->result = 2;
315 xc->setStCondFailures(0);//Needed? [RGD]
316 } else {
317 bool lock_flag = xc->readMiscReg(TheISA::Lock_Flag_DepTag);
318 Addr lock_addr = xc->readMiscReg(TheISA::Lock_Addr_DepTag);
319 req->result = lock_flag;
320 if (!lock_flag ||
321 ((lock_addr & ~0xf) != (req->paddr & ~0xf))) {
322 xc->setMiscReg(TheISA::Lock_Flag_DepTag, false);
323 xc->setStCondFailures(xc->readStCondFailures() + 1);
324 if (((xc->readStCondFailures()) % 100000) == 0) {
325 std::cerr << "Warning: "
326 << xc->readStCondFailures()
327 << " consecutive store conditional failures "
328 << "on cpu " << req->xc->readCpuId()
329 << std::endl;
330 }
331 return NoFault;
332 }
333 else xc->setStCondFailures(0);
334 }
335 }
336
337 // Need to clear any locked flags on other proccessors for
338 // this address. Only do this for succsful Store Conditionals
339 // and all other stores (WH64?). Unsuccessful Store
340 // Conditionals would have returned above, and wouldn't fall
341 // through.
342 for (int i = 0; i < cpu->system->execContexts.size(); i++){
343 xc = cpu->system->execContexts[i];
344 if ((xc->readMiscReg(TheISA::Lock_Addr_DepTag) & ~0xf) ==
345 (req->paddr & ~0xf)) {
346 xc->setMiscReg(TheISA::Lock_Flag_DepTag, false);
347 }
348 }
349
350 #endif
351 return thread->mem->write(req, (T)LittleEndianGuest::htog(data));
352 }
353 #endif
354
355 template <class Impl>
356 template <class T>
357 Fault
358 InorderBackEnd<Impl>::read(MemReqPtr &req, T &data, int load_idx)
359 {
360 // panic("Unimplemented!");
361 // memReq->reset(addr, sizeof(T), flags);
362
363 // translate to physical address
364 // Fault fault = cpu->translateDataReadReq(req);
365 req->cmd = Read;
366 req->completionEvent = NULL;
367 req->time = curTick;
368 assert(!req->data);
369 req->data = new uint8_t[64];
370 req->flags &= ~INST_READ;
371 Fault fault = cpu->read(req, data);
372 memcpy(req->data, &data, sizeof(T));
373
374 // if we have a cache, do cache access too
375 if (dcacheInterface) {
376 MemAccessResult result = dcacheInterface->access(req);
377
378 // Ugly hack to get an event scheduled *only* if the access is
379 // a miss. We really should add first-class support for this
380 // at some point.
381 if (result != MA_HIT) {
382 req->completionEvent = &cacheCompletionEvent;
383 lastDcacheStall = curTick;
384 // unscheduleTickEvent();
385 status = DcacheMissLoadStall;
386 DPRINTF(IBE, "Dcache miss load stall!\n");
387 } else {
388 DPRINTF(IBE, "Dcache hit!\n");
389
390 }
391 }
392
393 /*
394 if (!dcacheInterface && (req->flags & UNCACHEABLE))
395 recordEvent("Uncached Read");
396 */
397 return NoFault;
398 }
399
400 template <class Impl>
401 template <class T>
402 Fault
403 InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx)
404 {
405 // req->reset(addr, sizeof(T), flags);
406
407 // translate to physical address
408 // Fault fault = cpu->translateDataWriteReq(req);
409
410 req->cmd = Write;
411 req->completionEvent = NULL;
412 req->time = curTick;
413 assert(!req->data);
414 req->data = new uint8_t[64];
415 memcpy(req->data, (uint8_t *)&data, req->size);
416
417 switch(req->size) {
418 case 1:
419 cpu->write(req, (uint8_t &)data);
420 break;
421 case 2:
422 cpu->write(req, (uint16_t &)data);
423 break;
424 case 4:
425 cpu->write(req, (uint32_t &)data);
426 break;
427 case 8:
428 cpu->write(req, (uint64_t &)data);
429 break;
430 default:
431 panic("Unexpected store size!\n");
432 }
433
434 if (dcacheInterface) {
435 req->cmd = Write;
436 req->data = new uint8_t[64];
437 memcpy(req->data,(uint8_t *)&data,req->size);
438 req->completionEvent = NULL;
439 req->time = curTick;
440 req->flags &= ~INST_READ;
441 MemAccessResult result = dcacheInterface->access(req);
442
443 // Ugly hack to get an event scheduled *only* if the access is
444 // a miss. We really should add first-class support for this
445 // at some point.
446 if (result != MA_HIT) {
447 req->completionEvent = &cacheCompletionEvent;
448 lastDcacheStall = curTick;
449 // unscheduleTickEvent();
450 status = DcacheMissStoreStall;
451 DPRINTF(IBE, "Dcache miss store stall!\n");
452 } else {
453 DPRINTF(IBE, "Dcache hit!\n");
454
455 }
456 }
457 /*
458 if (req->flags & LOCKED) {
459 if (req->flags & UNCACHEABLE) {
460 // Don't update result register (see stq_c in isa_desc)
461 req->result = 2;
462 } else {
463 req->result = 1;
464 }
465 }
466 */
467 /*
468 if (res && (fault == NoFault))
469 *res = req->result;
470 */
471 /*
472 if (!dcacheInterface && (req->flags & UNCACHEABLE))
473 recordEvent("Uncached Write");
474 */
475 return NoFault;
476 }
477
478 #endif // __CPU_OZONE_INORDER_BACK_END_HH__