gpu-compute, arch-gcn3: refactor barriers
[gem5.git] / src / gpu-compute / fetch_stage.cc
1 /*
2 * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "gpu-compute/fetch_stage.hh"
35
36 #include "gpu-compute/compute_unit.hh"
37 #include "gpu-compute/wavefront.hh"
38
39 FetchStage::FetchStage(const ComputeUnitParams* p) :
40 numVectorALUs(p->num_SIMDs), computeUnit(nullptr)
41 {
42 for (int j = 0; j < numVectorALUs; ++j) {
43 FetchUnit newFetchUnit(p);
44 _fetchUnit.push_back(newFetchUnit);
45 }
46 }
47
48 FetchStage::~FetchStage()
49 {
50 _fetchUnit.clear();
51 }
52
53 void
54 FetchStage::init(ComputeUnit *cu)
55 {
56 computeUnit = cu;
57 _name = computeUnit->name() + ".FetchStage";
58
59 for (int j = 0; j < numVectorALUs; ++j) {
60 _fetchUnit[j].bindWaveList(&computeUnit->wfList[j]);
61 _fetchUnit[j].init(computeUnit);
62 }
63 }
64
65 void
66 FetchStage::exec()
67 {
68 for (int j = 0; j < numVectorALUs; ++j) {
69 _fetchUnit[j].exec();
70 }
71 }
72
73 void
74 FetchStage::processFetchReturn(PacketPtr pkt)
75 {
76 ComputeUnit::SQCPort::SenderState *sender_state =
77 safe_cast<ComputeUnit::SQCPort::SenderState*>(pkt->senderState);
78
79 Wavefront *wavefront = sender_state->wavefront;
80
81 const unsigned num_instructions = pkt->req->getSize() /
82 sizeof(TheGpuISA::RawMachInst);
83
84 instFetchInstReturned.sample(num_instructions);
85 uint32_t simdId = wavefront->simdId;
86 _fetchUnit[simdId].processFetchReturn(pkt);
87 }
88
89 void
90 FetchStage::fetch(PacketPtr pkt, Wavefront *wavefront)
91 {
92 _fetchUnit[wavefront->simdId].fetch(pkt, wavefront);
93 }
94
95 void
96 FetchStage::regStats()
97 {
98 instFetchInstReturned
99 .init(1, 32, 1)
100 .name(name() + ".inst_fetch_instr_returned")
101 .desc("For each instruction fetch request recieved record how many "
102 "instructions you got from it")
103 ;
104 }