systemc: Switch to using predefined messages for channels.
[gem5.git] / src / gpu-compute / schedule_stage.cc
1 /*
2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Authors: John Kalamatianos,
34 * Sooraj Puthoor,
35 * Mark Wyse
36 */
37
38 #include "gpu-compute/schedule_stage.hh"
39
40 #include "gpu-compute/compute_unit.hh"
41 #include "gpu-compute/gpu_static_inst.hh"
42 #include "gpu-compute/vector_register_file.hh"
43 #include "gpu-compute/wavefront.hh"
44
45 ScheduleStage::ScheduleStage(const ComputeUnitParams *p)
46 : numSIMDs(p->num_SIMDs),
47 numMemUnits(p->num_global_mem_pipes + p->num_shared_mem_pipes)
48 {
49 for (int j = 0; j < numSIMDs + numMemUnits; ++j) {
50 scheduler.emplace_back(p);
51 }
52 }
53
54 ScheduleStage::~ScheduleStage()
55 {
56 scheduler.clear();
57 waveStatusList.clear();
58 }
59
60 void
61 ScheduleStage::init(ComputeUnit *cu)
62 {
63 computeUnit = cu;
64 _name = computeUnit->name() + ".ScheduleStage";
65
66 for (int j = 0; j < numSIMDs + numMemUnits; ++j) {
67 scheduler[j].bindList(&computeUnit->readyList[j]);
68 }
69
70 for (int j = 0; j < numSIMDs; ++j) {
71 waveStatusList.push_back(&computeUnit->waveStatusList[j]);
72 }
73
74 dispatchList = &computeUnit->dispatchList;
75 }
76
77 void
78 ScheduleStage::arbitrate()
79 {
80 // iterate over all Memory pipelines
81 for (int j = numSIMDs; j < numSIMDs + numMemUnits; ++j) {
82 if (dispatchList->at(j).first) {
83 Wavefront *waveToMemPipe = dispatchList->at(j).first;
84 // iterate over all execution pipelines
85 for (int i = 0; i < numSIMDs + numMemUnits; ++i) {
86 if ((i != j) && (dispatchList->at(i).first)) {
87 Wavefront *waveToExePipe = dispatchList->at(i).first;
88 // if the two selected wavefronts are mapped to the same
89 // SIMD unit then they share the VRF
90 if (waveToMemPipe->simdId == waveToExePipe->simdId) {
91 int simdId = waveToMemPipe->simdId;
92 // Read VRF port arbitration:
93 // If there are read VRF port conflicts between the
94 // a memory and another instruction we drop the other
95 // instruction. We don't need to check for write VRF
96 // port conflicts because the memory instruction either
97 // does not need to write to the VRF (store) or will
98 // write to the VRF when the data comes back (load) in
99 // which case the arbiter of the memory pipes will
100 // resolve any conflicts
101 if (computeUnit->vrf[simdId]->
102 isReadConflict(waveToMemPipe->wfSlotId,
103 waveToExePipe->wfSlotId)) {
104 // FIXME: The "second" member variable is never
105 // used in the model. I am setting it to READY
106 // simply to follow the protocol of setting it
107 // when the WF has an instruction ready to issue
108 waveStatusList[simdId]->at(waveToExePipe->wfSlotId)
109 .second = READY;
110
111 dispatchList->at(i).first = nullptr;
112 dispatchList->at(i).second = EMPTY;
113 break;
114 }
115 }
116 }
117 }
118 }
119 }
120 }
121
122 void
123 ScheduleStage::exec()
124 {
125 for (int j = 0; j < numSIMDs + numMemUnits; ++j) {
126 uint32_t readyListSize = computeUnit->readyList[j].size();
127
128 // If no wave is ready to be scheduled on the execution resource
129 // then skip scheduling for this execution resource
130 if (!readyListSize) {
131 continue;
132 }
133
134 Wavefront *waveToBeDispatched = scheduler[j].chooseWave();
135 dispatchList->at(j).first = waveToBeDispatched;
136 waveToBeDispatched->updateResources();
137 dispatchList->at(j).second = FILLED;
138
139 waveStatusList[waveToBeDispatched->simdId]->at(
140 waveToBeDispatched->wfSlotId).second = BLOCKED;
141
142 assert(computeUnit->readyList[j].size() == readyListSize - 1);
143 }
144 // arbitrate over all shared resources among instructions being issued
145 // simultaneously
146 arbitrate();
147 }
148
149 void
150 ScheduleStage::regStats()
151 {
152 }