inorder cpu: add missing DPRINTF argument
[gem5.git] / src / cpu / ozone / inorder_back_end.hh
1 /*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Kevin Lim
29 */
30
31 #ifndef __CPU_OZONE_INORDER_BACK_END_HH__
32 #define __CPU_OZONE_INORDER_BACK_END_HH__
33
34 #include <list>
35
36 #include "cpu/ozone/rename_table.hh"
37 #include "cpu/ozone/thread_state.hh"
38 #include "cpu/inst_seq.hh"
39 #include "cpu/thread_context.hh"
40 #include "cpu/timebuf.hh"
41 #include "mem/request.hh"
42 #include "sim/eventq.hh"
43 #include "sim/faults.hh"
44
45 template <class Impl>
46 class InorderBackEnd
47 {
48 public:
49 typedef typename Impl::Params Params;
50 typedef typename Impl::DynInstPtr DynInstPtr;
51 typedef typename Impl::FullCPU FullCPU;
52 typedef typename Impl::FrontEnd FrontEnd;
53
54 typedef typename FullCPU::OzoneTC OzoneTC;
55 typedef typename Impl::FullCPU::CommStruct CommStruct;
56
57 InorderBackEnd(Params *params);
58
59 std::string name() const;
60
61 void setCPU(FullCPU *cpu_ptr)
62 { cpu = cpu_ptr; }
63
64 void setFrontEnd(FrontEnd *front_end_ptr)
65 { frontEnd = front_end_ptr; }
66
67 void setCommBuffer(TimeBuffer<CommStruct> *_comm)
68 { comm = _comm; }
69
70 void setTC(ThreadContext *tc_ptr);
71
72 void setThreadState(OzoneThreadState<Impl> *thread_ptr);
73
74 void regStats() { }
75
76 void checkInterrupts();
77
78 void tick();
79 void executeInsts();
80 void squash(const InstSeqNum &squash_num, const Addr &next_PC);
81
82 void squashFromXC();
83 void generateXCEvent() { }
84
85 bool robEmpty() { return instList.empty(); }
86
87 bool isFull() { return false; }
88 bool isBlocked() { return status == DcacheMissStoreStall ||
89 status == DcacheMissLoadStall ||
90 interruptBlocked; }
91
92 void fetchFault(Fault &fault);
93
94 void dumpInsts();
95
96 private:
97 void handleFault();
98
99 void setSquashInfoFromTC();
100
101 bool squashPending;
102 InstSeqNum squashSeqNum;
103 Addr squashNextPC;
104
105 Fault faultFromFetch;
106
107 bool interruptBlocked;
108
109 public:
110 template <class T>
111 Fault read(Addr addr, T &data, unsigned flags);
112
113 template <class T>
114 Fault read(RequestPtr req, T &data, int load_idx);
115
116 template <class T>
117 Fault write(T data, Addr addr, unsigned flags, uint64_t *res);
118
119 template <class T>
120 Fault write(RequestPtr req, T &data, int store_idx);
121
122 Addr readCommitPC() { return commitPC; }
123
124 Addr commitPC;
125
126 void switchOut() { panic("Not implemented!"); }
127 void doSwitchOut() { panic("Not implemented!"); }
128 void takeOverFrom(ThreadContext *old_tc = NULL) { panic("Not implemented!"); }
129
130 public:
131 FullCPU *cpu;
132
133 FrontEnd *frontEnd;
134
135 ThreadContext *tc;
136
137 OzoneThreadState<Impl> *thread;
138
139 RenameTable<Impl> renameTable;
140
141 protected:
142 enum Status {
143 Running,
144 Idle,
145 DcacheMissLoadStall,
146 DcacheMissStoreStall,
147 DcacheMissComplete,
148 Blocked
149 };
150
151 Status status;
152
153 class DCacheCompletionEvent : public Event
154 {
155 private:
156 InorderBackEnd *be;
157
158 public:
159 DCacheCompletionEvent(InorderBackEnd *_be);
160
161 virtual void process();
162 virtual const char *description() const;
163
164 DynInstPtr inst;
165 };
166
167 friend class DCacheCompletionEvent;
168
169 DCacheCompletionEvent cacheCompletionEvent;
170
171 // MemInterface *dcacheInterface;
172
173 RequestPtr memReq;
174
175 private:
176 typedef typename std::list<DynInstPtr>::iterator InstListIt;
177
178 std::list<DynInstPtr> instList;
179
180 // General back end width. Used if the more specific isn't given.
181 int width;
182
183 int latency;
184
185 int squashLatency;
186
187 TimeBuffer<int> numInstsToWB;
188 TimeBuffer<int>::wire instsAdded;
189 TimeBuffer<int>::wire instsToExecute;
190
191 TimeBuffer<CommStruct> *comm;
192 // number of cycles stalled for D-cache misses
193 Stats::Scalar dcacheStallCycles;
194 Counter lastDcacheStall;
195 };
196
197 template <class Impl>
198 template <class T>
199 Fault
200 InorderBackEnd<Impl>::read(Addr addr, T &data, unsigned flags)
201 {
202 memReq->reset(addr, sizeof(T), flags);
203
204 // translate to physical address
205 Fault fault = cpu->dtb->translateAtomic(memReq, thread->getTC(), false);
206
207 // if we have a cache, do cache access too
208 if (fault == NoFault && dcacheInterface) {
209 memReq->cmd = Read;
210 memReq->completionEvent = NULL;
211 memReq->time = curTick();
212 MemAccessResult result = dcacheInterface->access(memReq);
213
214 // Ugly hack to get an event scheduled *only* if the access is
215 // a miss. We really should add first-class support for this
216 // at some point.
217 if (result != MA_HIT) {
218 // Fix this hack for keeping funcExeInst correct with loads that
219 // are executed twice.
220 memReq->completionEvent = &cacheCompletionEvent;
221 lastDcacheStall = curTick();
222 // unscheduleTickEvent();
223 status = DcacheMissLoadStall;
224 DPRINTF(IBE, "Dcache miss stall!\n");
225 } else {
226 // do functional access
227 DPRINTF(IBE, "Dcache hit!\n");
228 }
229 }
230 return fault;
231 }
232
233 template <class Impl>
234 template <class T>
235 Fault
236 InorderBackEnd<Impl>::write(T data, Addr addr, unsigned flags, uint64_t *res)
237 {
238 memReq->reset(addr, sizeof(T), flags);
239
240 // translate to physical address
241 Fault fault = cpu->dtb->translateAtomic(memReq, thread->getTC(), true);
242
243 if (fault == NoFault && dcacheInterface) {
244 memReq->cmd = Write;
245 // memcpy(memReq->data,(uint8_t *)&data,memReq->size);
246 memReq->completionEvent = NULL;
247 memReq->time = curTick();
248 MemAccessResult result = dcacheInterface->access(memReq);
249
250 // Ugly hack to get an event scheduled *only* if the access is
251 // a miss. We really should add first-class support for this
252 // at some point.
253 if (result != MA_HIT) {
254 memReq->completionEvent = &cacheCompletionEvent;
255 lastDcacheStall = curTick();
256 // unscheduleTickEvent();
257 status = DcacheMissStoreStall;
258 DPRINTF(IBE, "Dcache miss stall!\n");
259 } else {
260 DPRINTF(IBE, "Dcache hit!\n");
261 }
262 }
263
264 if (res && (fault == NoFault))
265 *res = memReq->result;
266 return fault;
267 }
268
269 template <class Impl>
270 template <class T>
271 Fault
272 InorderBackEnd<Impl>::read(MemReqPtr &req, T &data, int load_idx)
273 {
274 // panic("Unimplemented!");
275 // memReq->reset(addr, sizeof(T), flags);
276
277 // translate to physical address
278 // Fault fault = cpu->translateDataReadReq(req);
279 req->cmd = Read;
280 req->completionEvent = NULL;
281 req->time = curTick();
282 assert(!req->data);
283 req->data = new uint8_t[64];
284 Fault fault = cpu->read(req, data);
285 memcpy(req->data, &data, sizeof(T));
286
287 // if we have a cache, do cache access too
288 if (dcacheInterface) {
289 MemAccessResult result = dcacheInterface->access(req);
290
291 // Ugly hack to get an event scheduled *only* if the access is
292 // a miss. We really should add first-class support for this
293 // at some point.
294 if (result != MA_HIT) {
295 req->completionEvent = &cacheCompletionEvent;
296 lastDcacheStall = curTick();
297 // unscheduleTickEvent();
298 status = DcacheMissLoadStall;
299 DPRINTF(IBE, "Dcache miss load stall!\n");
300 } else {
301 DPRINTF(IBE, "Dcache hit!\n");
302
303 }
304 }
305
306 return NoFault;
307 }
308
309 template <class Impl>
310 template <class T>
311 Fault
312 InorderBackEnd<Impl>::write(MemReqPtr &req, T &data, int store_idx)
313 {
314 // req->reset(addr, sizeof(T), flags);
315
316 // translate to physical address
317 // Fault fault = cpu->translateDataWriteReq(req);
318
319 req->cmd = Write;
320 req->completionEvent = NULL;
321 req->time = curTick();
322 assert(!req->data);
323 req->data = new uint8_t[64];
324 memcpy(req->data, (uint8_t *)&data, req->size);
325
326 switch(req->size) {
327 case 1:
328 cpu->write(req, (uint8_t &)data);
329 break;
330 case 2:
331 cpu->write(req, (uint16_t &)data);
332 break;
333 case 4:
334 cpu->write(req, (uint32_t &)data);
335 break;
336 case 8:
337 cpu->write(req, (uint64_t &)data);
338 break;
339 default:
340 panic("Unexpected store size!\n");
341 }
342
343 if (dcacheInterface) {
344 req->cmd = Write;
345 req->data = new uint8_t[64];
346 memcpy(req->data,(uint8_t *)&data,req->size);
347 req->completionEvent = NULL;
348 req->time = curTick();
349 MemAccessResult result = dcacheInterface->access(req);
350
351 // Ugly hack to get an event scheduled *only* if the access is
352 // a miss. We really should add first-class support for this
353 // at some point.
354 if (result != MA_HIT) {
355 req->completionEvent = &cacheCompletionEvent;
356 lastDcacheStall = curTick();
357 // unscheduleTickEvent();
358 status = DcacheMissStoreStall;
359 DPRINTF(IBE, "Dcache miss store stall!\n");
360 } else {
361 DPRINTF(IBE, "Dcache hit!\n");
362
363 }
364 }
365 /*
366 if (req->isLLSC()) {
367 if (req->isUncacheable()) {
368 // Don't update result register (see stq_c in isa_desc)
369 req->result = 2;
370 } else {
371 req->result = 1;
372 }
373 }
374 */
375 /*
376 if (res && (fault == NoFault))
377 *res = req->result;
378 */
379 return NoFault;
380 }
381
382 #endif // __CPU_OZONE_INORDER_BACK_END_HH__