gpu-compute: Wavefront refactoring
[gem5.git] / src / gpu-compute / local_memory_pipeline.cc
1 /*
2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Sooraj Puthoor
34 */
35
36 #include "gpu-compute/local_memory_pipeline.hh"
37
38 #include "debug/GPUPort.hh"
39 #include "gpu-compute/compute_unit.hh"
40 #include "gpu-compute/gpu_dyn_inst.hh"
41 #include "gpu-compute/shader.hh"
42 #include "gpu-compute/vector_register_file.hh"
43 #include "gpu-compute/wavefront.hh"
44
45 LocalMemPipeline::LocalMemPipeline(const ComputeUnitParams* p) :
46 computeUnit(nullptr), lmQueueSize(p->local_mem_queue_size)
47 {
48 }
49
50 void
51 LocalMemPipeline::init(ComputeUnit *cu)
52 {
53 computeUnit = cu;
54 _name = computeUnit->name() + ".LocalMemPipeline";
55 }
56
57 void
58 LocalMemPipeline::exec()
59 {
60 // apply any returned shared (LDS) memory operations
61 GPUDynInstPtr m = !lmReturnedRequests.empty() ?
62 lmReturnedRequests.front() : nullptr;
63
64 bool accessVrf = true;
65 if ((m) && (m->m_op==Enums::MO_LD || MO_A(m->m_op))) {
66 Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId];
67
68 accessVrf =
69 w->computeUnit->vrf[m->simdId]->
70 vrfOperandAccessReady(m->seqNum(), w, m,
71 VrfAccessType::WRITE);
72 }
73
74 if (!lmReturnedRequests.empty() && m->latency.rdy() && accessVrf &&
75 computeUnit->locMemToVrfBus.rdy() && (computeUnit->shader->coissue_return
76 || computeUnit->wfWait.at(m->pipeId).rdy())) {
77 if (m->v_type == VT_32 && m->m_type == Enums::M_U8)
78 doSmReturn<uint32_t, uint8_t>(m);
79 else if (m->v_type == VT_32 && m->m_type == Enums::M_U16)
80 doSmReturn<uint32_t, uint16_t>(m);
81 else if (m->v_type == VT_32 && m->m_type == Enums::M_U32)
82 doSmReturn<uint32_t, uint32_t>(m);
83 else if (m->v_type == VT_32 && m->m_type == Enums::M_S8)
84 doSmReturn<int32_t, int8_t>(m);
85 else if (m->v_type == VT_32 && m->m_type == Enums::M_S16)
86 doSmReturn<int32_t, int16_t>(m);
87 else if (m->v_type == VT_32 && m->m_type == Enums::M_S32)
88 doSmReturn<int32_t, int32_t>(m);
89 else if (m->v_type == VT_32 && m->m_type == Enums::M_F16)
90 doSmReturn<float, Float16>(m);
91 else if (m->v_type == VT_32 && m->m_type == Enums::M_F32)
92 doSmReturn<float, float>(m);
93 else if (m->v_type == VT_64 && m->m_type == Enums::M_U8)
94 doSmReturn<uint64_t, uint8_t>(m);
95 else if (m->v_type == VT_64 && m->m_type == Enums::M_U16)
96 doSmReturn<uint64_t, uint16_t>(m);
97 else if (m->v_type == VT_64 && m->m_type == Enums::M_U32)
98 doSmReturn<uint64_t, uint32_t>(m);
99 else if (m->v_type == VT_64 && m->m_type == Enums::M_U64)
100 doSmReturn<uint64_t, uint64_t>(m);
101 else if (m->v_type == VT_64 && m->m_type == Enums::M_S8)
102 doSmReturn<int64_t, int8_t>(m);
103 else if (m->v_type == VT_64 && m->m_type == Enums::M_S16)
104 doSmReturn<int64_t, int16_t>(m);
105 else if (m->v_type == VT_64 && m->m_type == Enums::M_S32)
106 doSmReturn<int64_t, int32_t>(m);
107 else if (m->v_type == VT_64 && m->m_type == Enums::M_S64)
108 doSmReturn<int64_t, int64_t>(m);
109 else if (m->v_type == VT_64 && m->m_type == Enums::M_F16)
110 doSmReturn<double, Float16>(m);
111 else if (m->v_type == VT_64 && m->m_type == Enums::M_F32)
112 doSmReturn<double, float>(m);
113 else if (m->v_type == VT_64 && m->m_type == Enums::M_F64)
114 doSmReturn<double, double>(m);
115 }
116
117 // If pipeline has executed a local memory instruction
118 // execute local memory packet and issue the packets
119 // to LDS
120 if (!lmIssuedRequests.empty() && lmReturnedRequests.size() < lmQueueSize) {
121
122 GPUDynInstPtr m = lmIssuedRequests.front();
123
124 bool returnVal = computeUnit->sendToLds(m);
125 if (!returnVal) {
126 DPRINTF(GPUPort, "packet was nack'd and put in retry queue");
127 }
128 lmIssuedRequests.pop();
129 }
130 }
131
132 template<typename c0, typename c1>
133 void
134 LocalMemPipeline::doSmReturn(GPUDynInstPtr m)
135 {
136 lmReturnedRequests.pop();
137 Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId];
138
139 // Return data to registers
140 if (m->m_op == Enums::MO_LD || MO_A(m->m_op)) {
141 std::vector<uint32_t> regVec;
142 for (int k = 0; k < m->n_reg; ++k) {
143 int dst = m->dst_reg+k;
144
145 if (m->n_reg > MAX_REGS_FOR_NON_VEC_MEM_INST)
146 dst = m->dst_reg_vec[k];
147 // virtual->physical VGPR mapping
148 int physVgpr = w->remap(dst,sizeof(c0),1);
149 // save the physical VGPR index
150 regVec.push_back(physVgpr);
151 c1 *p1 = &((c1 *)m->d_data)[k * w->computeUnit->wfSize()];
152
153 for (int i = 0; i < w->computeUnit->wfSize(); ++i) {
154 if (m->exec_mask[i]) {
155 // write the value into the physical VGPR. This is a purely
156 // functional operation. No timing is modeled.
157 w->computeUnit->vrf[w->simdId]->write<c0>(physVgpr,
158 *p1, i);
159 }
160 ++p1;
161 }
162 }
163
164 // Schedule the write operation of the load data on the VRF. This simply
165 // models the timing aspect of the VRF write operation. It does not
166 // modify the physical VGPR.
167 loadVrfBankConflictCycles +=
168 w->computeUnit->vrf[w->simdId]->exec(m->seqNum(), w,
169 regVec, sizeof(c0), m->time);
170 }
171
172 // Decrement outstanding request count
173 computeUnit->shader->ScheduleAdd(&w->outstandingReqs, m->time, -1);
174
175 if (m->m_op == Enums::MO_ST || MO_A(m->m_op) || MO_ANR(m->m_op)
176 || MO_H(m->m_op)) {
177 computeUnit->shader->ScheduleAdd(&w->outstandingReqsWrLm,
178 m->time, -1);
179 }
180
181 if (m->m_op == Enums::MO_LD || MO_A(m->m_op) || MO_ANR(m->m_op)) {
182 computeUnit->shader->ScheduleAdd(&w->outstandingReqsRdLm,
183 m->time, -1);
184 }
185
186 // Mark write bus busy for appropriate amount of time
187 computeUnit->locMemToVrfBus.set(m->time);
188 if (computeUnit->shader->coissue_return == 0)
189 w->computeUnit->wfWait.at(m->pipeId).set(m->time);
190 }
191
192 void
193 LocalMemPipeline::regStats()
194 {
195 loadVrfBankConflictCycles
196 .name(name() + ".load_vrf_bank_conflict_cycles")
197 .desc("total number of cycles LDS data are delayed before updating "
198 "the VRF")
199 ;
200 }