O3: Track if the RAS has been pushed or not to pop the RAS if neccessary.
[gem5.git] / src / cpu / inorder / resource_pool.cc
1 /*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2007 MIPS Technologies, Inc.
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Korey Sewell
41 *
42 */
43
44 #include <list>
45 #include <vector>
46
47 #include "cpu/inorder/resources/resource_list.hh"
48 #include "cpu/inorder/resource_pool.hh"
49 #include "debug/Resource.hh"
50
51 using namespace std;
52 using namespace ThePipeline;
53
54 ResourcePool::ResourcePool(InOrderCPU *_cpu, ThePipeline::Params *params)
55 : cpu(_cpu), instUnit(NULL), dataUnit(NULL)
56 {
57 //@todo: use this function to instantiate the resources in resource pool.
58 //This will help in the auto-generation of this pipeline model.
59 //ThePipeline::addResources(resources, memObjects);
60
61 int stage_width = cpu->stageWidth;
62
63 // Declare Resource Objects
64 // name - id - bandwidth - latency - CPU - Parameters
65 // --------------------------------------------------
66 resources.push_back(new FetchSeqUnit("fetch_seq_unit", FetchSeq,
67 stage_width * 2, 0, _cpu, params));
68
69 // Keep track of the instruction fetch unit so we can easily
70 // provide a pointer to it in the CPU.
71 instUnit = new FetchUnit("icache_port", ICache,
72 stage_width * 2 + MaxThreads, 0, _cpu,
73 params);
74 resources.push_back(instUnit);
75
76 resources.push_back(new DecodeUnit("decode_unit", Decode,
77 stage_width, 0, _cpu, params));
78
79 resources.push_back(new BranchPredictor("branch_predictor", BPred,
80 stage_width, 0, _cpu, params));
81
82 resources.push_back(new InstBuffer("fetch_buffer_t0", FetchBuff, 4,
83 0, _cpu, params));
84
85 resources.push_back(new UseDefUnit("regfile_manager", RegManager,
86 stage_width * 3, 0, _cpu,
87 params));
88
89 resources.push_back(new AGENUnit("agen_unit", AGEN,
90 stage_width, 0, _cpu, params));
91
92 resources.push_back(new ExecutionUnit("execution_unit", ExecUnit,
93 stage_width, 0, _cpu, params));
94
95 resources.push_back(new MultDivUnit("mult_div_unit", MDU,
96 stage_width * 2,
97 0,
98 _cpu,
99 params));
100
101 // Keep track of the data load/store unit so we can easily provide
102 // a pointer to it in the CPU.
103 dataUnit = new CacheUnit("dcache_port", DCache,
104 stage_width * 2 + MaxThreads, 0, _cpu,
105 params);
106 resources.push_back(dataUnit);
107
108 gradObjects.push_back(BPred);
109 resources.push_back(new GraduationUnit("graduation_unit", Grad,
110 stage_width, 0, _cpu,
111 params));
112
113 resources.push_back(new InstBuffer("fetch_buffer_t1", FetchBuff2, 4,
114 0, _cpu, params));
115
116 }
117
118 ResourcePool::~ResourcePool()
119 {
120 cout << "Deleting resources ..." << endl;
121
122 for (int i=0; i < resources.size(); i++) {
123 DPRINTF(Resource, "Deleting resource: %s.\n", resources[i]->name());
124
125 delete resources[i];
126 }
127 }
128
129
130 void
131 ResourcePool::init()
132 {
133 for (int i=0; i < resources.size(); i++) {
134 DPRINTF(Resource, "Initializing resource: %s.\n",
135 resources[i]->name());
136
137 resources[i]->init();
138 }
139 }
140
141 string
142 ResourcePool::name()
143 {
144 return cpu->name() + ".ResourcePool";
145 }
146
147 void
148 ResourcePool::print()
149 {
150 for (int i=0; i < resources.size(); i++) {
151 DPRINTF(InOrderDynInst, "Res:%i %s\n",
152 i, resources[i]->name());
153 }
154
155 }
156
157
158 void
159 ResourcePool::regStats()
160 {
161 DPRINTF(Resource, "Registering Stats Throughout Resource Pool.\n");
162
163 int num_resources = resources.size();
164
165 for (int idx = 0; idx < num_resources; idx++) {
166 resources[idx]->regStats();
167 }
168 }
169
170 unsigned
171 ResourcePool::getResIdx(const ThePipeline::ResourceId &res_id)
172 {
173 int num_resources = resources.size();
174
175 for (int idx = 0; idx < num_resources; idx++) {
176 if (resources[idx]->getId() == res_id)
177 return idx;
178 }
179
180 // todo: change return value to int and return a -1 here
181 // maybe even have enumerated type
182 // panic for now...
183 panic("Can't find resource idx for: %i\n", res_id);
184
185 return 0;
186 }
187
188 ResReqPtr
189 ResourcePool::request(int res_idx, DynInstPtr inst)
190 {
191 //Make Sure This is a valid resource ID
192 assert(res_idx >= 0 && res_idx < resources.size());
193
194 return resources[res_idx]->request(inst);
195 }
196
197 void
198 ResourcePool::squash(DynInstPtr inst, int res_idx, InstSeqNum done_seq_num,
199 ThreadID tid)
200 {
201 resources[res_idx]->squash(inst, ThePipeline::NumStages-1, done_seq_num,
202 tid);
203 }
204
205 void
206 ResourcePool::trap(Fault fault, ThreadID tid, DynInstPtr inst)
207 {
208 DPRINTF(Resource, "[tid:%i] Broadcasting Trap to all "
209 "resources.\n", tid);
210
211 int num_resources = resources.size();
212
213 for (int idx = 0; idx < num_resources; idx++)
214 resources[idx]->trap(fault, tid, inst);
215 }
216
217 int
218 ResourcePool::slotsAvail(int res_idx)
219 {
220 return resources[res_idx]->slotsAvail();
221 }
222
223 int
224 ResourcePool::slotsInUse(int res_idx)
225 {
226 return resources[res_idx]->slotsInUse();
227 }
228
229 //@todo: split this function and call this version schedulePoolEvent
230 // and use this scheduleEvent for scheduling a specific event on
231 // a resource
232 //@todo: For arguments that arent being used in a ResPoolEvent, a dummyParam
233 // or some typedef can be used to signify what's important info
234 // to the event construction
235 void
236 ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
237 int delay, int res_idx, ThreadID tid)
238 {
239 assert(delay >= 0);
240
241 Tick when = cpu->nextCycle(curTick() + cpu->ticks(delay));
242
243 switch ((int)e_type)
244 {
245 case ResourcePool::InstGraduated:
246 {
247 DPRINTF(Resource, "Scheduling Inst-Graduated Resource Pool "
248 "Event for tick %i.\n", curTick() + delay);
249 ResPoolEventPri grad_pri = ResGrad_Pri;
250 ResPoolEvent *res_pool_event =
251 new ResPoolEvent(this,
252 e_type,
253 inst,
254 inst->squashingStage,
255 inst->seqNum,
256 inst->readTid(),
257 grad_pri);
258 cpu->schedule(res_pool_event, when);
259 }
260 break;
261
262 case ResourcePool::SquashAll:
263 {
264 DPRINTF(Resource, "Scheduling Squash Resource Pool Event for "
265 "tick %i.\n", curTick() + delay);
266 ResPoolEventPri squash_pri = ResSquash_Pri;
267 ResPoolEvent *res_pool_event =
268 new ResPoolEvent(this,
269 e_type,
270 inst,
271 inst->squashingStage,
272 inst->squashSeqNum,
273 inst->readTid(),
274 squash_pri);
275 cpu->schedule(res_pool_event, when);
276 }
277 break;
278
279 case ResourcePool::UpdateAfterContextSwitch:
280 {
281 DPRINTF(Resource, "Scheduling UpdatePC Resource Pool Event "
282 "for tick %i.\n",
283 curTick() + delay);
284 ResPoolEvent *res_pool_event = new ResPoolEvent(this,
285 e_type,
286 inst,
287 inst->squashingStage,
288 inst->seqNum,
289 inst->readTid());
290 cpu->schedule(res_pool_event, when);
291 }
292 break;
293
294 default:
295 DPRINTF(Resource, "Ignoring Unrecognized CPU Event (%s).\n",
296 InOrderCPU::eventNames[e_type]);
297 }
298 }
299
300 void
301 ResourcePool::unscheduleEvent(int res_idx, DynInstPtr inst)
302 {
303 resources[res_idx]->unscheduleEvent(inst);
304 }
305
306 void
307 ResourcePool::squashAll(DynInstPtr inst, int stage_num,
308 InstSeqNum done_seq_num, ThreadID tid)
309 {
310 DPRINTF(Resource, "[tid:%i] Broadcasting Squash All Event "
311 " starting w/stage %i for all instructions above [sn:%i].\n",
312 tid, stage_num, done_seq_num);
313
314 int num_resources = resources.size();
315
316 for (int idx = 0; idx < num_resources; idx++) {
317 resources[idx]->squash(inst, stage_num, done_seq_num, tid);
318 }
319 }
320
321 void
322 ResourcePool::squashDueToMemStall(DynInstPtr inst, int stage_num,
323 InstSeqNum done_seq_num, ThreadID tid)
324 {
325 DPRINTF(Resource, "[tid:%i] Broadcasting SquashDueToMemStall Event"
326 " starting w/stage %i for all instructions above [sn:%i].\n",
327 tid, stage_num, done_seq_num);
328
329 int num_resources = resources.size();
330
331 for (int idx = 0; idx < num_resources; idx++) {
332 resources[idx]->squashDueToMemStall(inst, stage_num, done_seq_num,
333 tid);
334 }
335 }
336
337 void
338 ResourcePool::activateThread(ThreadID tid)
339 {
340 bool do_activate = cpu->threadModel != InOrderCPU::SwitchOnCacheMiss ||
341 cpu->numActiveThreads() < 1 ||
342 cpu->activeThreadId() == tid;
343
344
345 if (do_activate) {
346 DPRINTF(Resource, "[tid:%i] Broadcasting Thread Activation to all "
347 "resources.\n", tid);
348
349 int num_resources = resources.size();
350
351 for (int idx = 0; idx < num_resources; idx++) {
352 resources[idx]->activateThread(tid);
353 }
354 } else {
355 DPRINTF(Resource, "[tid:%i] Ignoring Thread Activation to all "
356 "resources.\n", tid);
357 }
358 }
359
360 void
361 ResourcePool::deactivateThread(ThreadID tid)
362 {
363 DPRINTF(Resource, "[tid:%i] Broadcasting Thread Deactivation to all "
364 "resources.\n", tid);
365
366 int num_resources = resources.size();
367
368 for (int idx = 0; idx < num_resources; idx++) {
369 resources[idx]->deactivateThread(tid);
370 }
371 }
372
373 void
374 ResourcePool::suspendThread(ThreadID tid)
375 {
376 DPRINTF(Resource, "[tid:%i] Broadcasting Thread Suspension to all "
377 "resources.\n", tid);
378
379 int num_resources = resources.size();
380
381 for (int idx = 0; idx < num_resources; idx++) {
382 resources[idx]->suspendThread(tid);
383 }
384 }
385
386 void
387 ResourcePool::instGraduated(InstSeqNum seq_num, ThreadID tid)
388 {
389 DPRINTF(Resource, "[tid:%i] Broadcasting [sn:%i] graduation to "
390 "appropriate resources.\n", tid, seq_num);
391
392 int num_resources = gradObjects.size();
393
394 for (int idx = 0; idx < num_resources; idx++) {
395 resources[gradObjects[idx]]->instGraduated(seq_num, tid);
396 }
397 }
398
399 void
400 ResourcePool::updateAfterContextSwitch(DynInstPtr inst, ThreadID tid)
401 {
402 DPRINTF(Resource, "[tid:%i] Broadcasting Update PC to all resources.\n",
403 tid);
404
405 int num_resources = resources.size();
406
407 for (int idx = 0; idx < num_resources; idx++) {
408 resources[idx]->updateAfterContextSwitch(inst, tid);
409 }
410 }
411
412 ResourcePool::ResPoolEvent::ResPoolEvent(ResourcePool *_resPool,
413 InOrderCPU::CPUEventType e_type,
414 DynInstPtr _inst,
415 int stage_num,
416 InstSeqNum seq_num,
417 ThreadID _tid,
418 ResPoolEventPri res_pri)
419 : Event(res_pri), resPool(_resPool),
420 eventType(e_type), inst(_inst), seqNum(seq_num),
421 stageNum(stage_num), tid(_tid)
422 { }
423
424
425 void
426 ResourcePool::ResPoolEvent::process()
427 {
428 switch ((int)eventType)
429 {
430
431 case ResourcePool::InstGraduated:
432 resPool->instGraduated(seqNum, tid);
433 break;
434
435 case ResourcePool::SquashAll:
436 resPool->squashAll(inst, stageNum, seqNum, tid);
437 break;
438
439 case ResourcePool::UpdateAfterContextSwitch:
440 resPool->updateAfterContextSwitch(inst, tid);
441 break;
442
443 default:
444 fatal("Unrecognized Event Type");
445 }
446
447 resPool->cpu->cpuEventRemoveList.push(this);
448 }
449
450
451 const char *
452 ResourcePool::ResPoolEvent::description() const
453 {
454 return "Resource Pool event";
455 }
456
457 /** Schedule resource event, regardless of its current state. */
458 void
459 ResourcePool::ResPoolEvent::scheduleEvent(int delay)
460 {
461 InOrderCPU *cpu = resPool->cpu;
462 assert(!scheduled() || squashed());
463 cpu->reschedule(this, cpu->nextCycle(curTick() + cpu->ticks(delay)), true);
464 }
465
466 /** Unschedule resource event, regardless of its current state. */
467 void
468 ResourcePool::ResPoolEvent::unscheduleEvent()
469 {
470 if (scheduled())
471 squash();
472 }