2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
5 * For use for simulation and test purposes only
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Author: Sooraj Puthoor
36 #include "gpu-compute/local_memory_pipeline.hh"
38 #include "debug/GPUPort.hh"
39 #include "gpu-compute/compute_unit.hh"
40 #include "gpu-compute/gpu_dyn_inst.hh"
41 #include "gpu-compute/shader.hh"
42 #include "gpu-compute/vector_register_file.hh"
43 #include "gpu-compute/wavefront.hh"
45 LocalMemPipeline::LocalMemPipeline(const ComputeUnitParams
* p
) :
46 computeUnit(nullptr), lmQueueSize(p
->local_mem_queue_size
)
51 LocalMemPipeline::init(ComputeUnit
*cu
)
54 _name
= computeUnit
->name() + ".LocalMemPipeline";
58 LocalMemPipeline::exec()
60 // apply any returned shared (LDS) memory operations
61 GPUDynInstPtr m
= !lmReturnedRequests
.empty() ?
62 lmReturnedRequests
.front() : nullptr;
64 bool accessVrf
= true;
65 Wavefront
*w
= nullptr;
67 if ((m
) && (m
->isLoad() || m
->isAtomicRet())) {
71 w
->computeUnit
->vrf
[w
->simdId
]->
72 vrfOperandAccessReady(m
->seqNum(), w
, m
,
73 VrfAccessType::WRITE
);
76 if (!lmReturnedRequests
.empty() && m
->latency
.rdy() && accessVrf
&&
77 computeUnit
->locMemToVrfBus
.rdy() && (computeUnit
->shader
->coissue_return
78 || computeUnit
->wfWait
.at(m
->pipeId
).rdy())) {
80 lmReturnedRequests
.pop();
85 // Decrement outstanding request count
86 computeUnit
->shader
->ScheduleAdd(&w
->outstandingReqs
, m
->time
, -1);
88 if (m
->isStore() || m
->isAtomic()) {
89 computeUnit
->shader
->ScheduleAdd(&w
->outstandingReqsWrLm
,
93 if (m
->isLoad() || m
->isAtomic()) {
94 computeUnit
->shader
->ScheduleAdd(&w
->outstandingReqsRdLm
,
98 // Mark write bus busy for appropriate amount of time
99 computeUnit
->locMemToVrfBus
.set(m
->time
);
100 if (computeUnit
->shader
->coissue_return
== 0)
101 w
->computeUnit
->wfWait
.at(m
->pipeId
).set(m
->time
);
104 // If pipeline has executed a local memory instruction
105 // execute local memory packet and issue the packets
107 if (!lmIssuedRequests
.empty() && lmReturnedRequests
.size() < lmQueueSize
) {
109 GPUDynInstPtr m
= lmIssuedRequests
.front();
111 bool returnVal
= computeUnit
->sendToLds(m
);
113 DPRINTF(GPUPort
, "packet was nack'd and put in retry queue");
115 lmIssuedRequests
.pop();
120 LocalMemPipeline::regStats()
122 loadVrfBankConflictCycles
123 .name(name() + ".load_vrf_bank_conflict_cycles")
124 .desc("total number of cycles LDS data are delayed before updating "