cpu: `Minor' in-order CPU model
[gem5.git] / src / cpu / minor / fetch2.hh
1 /*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andrew Bardsley
38 */
39
40 /**
41 * @file
42 *
43 * Fetch2 receives lines of data from Fetch1, separates them into
44 * instructions and passes them to Decode
45 */
46
47 #ifndef __CPU_MINOR_FETCH2_HH__
48 #define __CPU_MINOR_FETCH2_HH__
49
50 #include "cpu/minor/buffers.hh"
51 #include "cpu/minor/cpu.hh"
52 #include "cpu/minor/pipe_data.hh"
53 #include "cpu/pred/bpred_unit.hh"
54 #include "params/MinorCPU.hh"
55
56 namespace Minor
57 {
58
59 /** This stage receives lines of data from Fetch1, separates them into
60 * instructions and passes them to Decode */
61 class Fetch2 : public Named
62 {
63 protected:
64 /** Pointer back to the containing CPU */
65 MinorCPU &cpu;
66
67 /** Input port carrying lines from Fetch1 */
68 Latch<ForwardLineData>::Output inp;
69
70 /** Input port carrying branches from Execute. This is a snoop of the
71 * data provided to F1. */
72 Latch<BranchData>::Output branchInp;
73
74 /** Output port carrying predictions back to Fetch1 */
75 Latch<BranchData>::Input predictionOut;
76
77 /** Output port carrying instructions into Decode */
78 Latch<ForwardInstData>::Input out;
79
80 /** Interface to reserve space in the next stage */
81 Reservable &nextStageReserve;
82
83 /** Width of output of this stage/input of next in instructions */
84 unsigned int outputWidth;
85
86 /** If true, more than one input word can be processed each cycle if
87 * there is room in the output to contain its processed data */
88 bool processMoreThanOneInput;
89
90 /** Branch predictor passed from Python configuration */
91 BPredUnit &branchPredictor;
92
93 public:
94 /* Public so that Pipeline can pass it to Fetch1 */
95 InputBuffer<ForwardLineData> inputBuffer;
96
97 protected:
98 /** Data members after this line are cycle-to-cycle state */
99
100 /** Index into an incompletely processed input line that instructions
101 * are to be extracted from */
102 unsigned int inputIndex;
103
104 /** Remembered program counter value. Between contiguous lines, this
105 * is just updated with advancePC. For lines following changes of
106 * stream, a new PC must be loaded and havePC be set.
107 * havePC is needed to accomodate instructions which span across
108 * lines meaning that Fetch2 and the decoder need to remember a PC
109 * value and a partially-offered instruction from the previous line */
110 TheISA::PCState pc;
111
112 /** PC is currently valid. Initially false, gets set to true when a
113 * change-of-stream line is received and false again when lines are
114 * discarded for any reason */
115 bool havePC;
116
117 /** Stream sequence number of the last seen line used to identify changes
118 * of instruction stream */
119 InstSeqNum lastStreamSeqNum;
120
121 /** Fetch2 is the source of fetch sequence numbers. These represent the
122 * sequence that instructions were extracted from fetched lines. */
123 InstSeqNum fetchSeqNum;
124
125 /** Stream sequence number remembered from last time the predictionSeqNum
126 * changed. Lines should only be discarded when their predictionSeqNums
127 * disagree with Fetch2::predictionSeqNum *and* they are from the same
128 * stream that bore that prediction number */
129 InstSeqNum expectedStreamSeqNum;
130
131 /** Fetch2 is the source of prediction sequence numbers. These represent
132 * predicted changes of control flow sources from branch prediction in
133 * Fetch2. */
134 InstSeqNum predictionSeqNum;
135
136 /** Blocked indication for report */
137 bool blocked;
138
139 protected:
140 /** Get a piece of data to work on from the inputBuffer, or 0 if there
141 * is no data. */
142 const ForwardLineData *getInput();
143
144 /** Pop an element off the input buffer, if there are any */
145 void popInput();
146
147 /** Dump the whole contents of the input buffer. Useful after a
148 * prediction changes control flow */
149 void dumpAllInput();
150
151 /** Update local branch prediction structures from feedback from
152 * Execute. */
153 void updateBranchPrediction(const BranchData &branch);
154
155 /** Predicts branches for the given instruction. Updates the
156 * instruction's predicted... fields and also the branch which
157 * carries the prediction to Fetch1 */
158 void predictBranch(MinorDynInstPtr inst, BranchData &branch);
159
160 public:
161 Fetch2(const std::string &name,
162 MinorCPU &cpu_,
163 MinorCPUParams &params,
164 Latch<ForwardLineData>::Output inp_,
165 Latch<BranchData>::Output branchInp_,
166 Latch<BranchData>::Input predictionOut_,
167 Latch<ForwardInstData>::Input out_,
168 Reservable &next_stage_input_buffer);
169
170 public:
171 /** Pass on input/buffer data to the output if you can */
172 void evaluate();
173
174 void minorTrace() const;
175
176 /** Is this stage drained? For Fetch2, draining is initiated by
177 * Execute halting Fetch1 causing Fetch2 to naturally drain.
178 * Branch predictions are ignored by Fetch1 during halt */
179 bool isDrained();
180 };
181
182 }
183
184 #endif /* __CPU_MINOR_FETCH2_HH__ */