inorder: don't stall after stores
[gem5.git] / src / cpu / inorder / resources / cache_unit.hh
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #ifndef __CPU_INORDER_CACHE_UNIT_HH__
33 #define __CPU_INORDER_CACHE_UNIT_HH__
34
35 #include <list>
36 #include <string>
37 #include <vector>
38
39 #include "arch/predecoder.hh"
40 #include "arch/tlb.hh"
41 #include "config/the_isa.hh"
42 #include "cpu/inorder/inorder_dyn_inst.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource.hh"
45 #include "mem/packet.hh"
46 #include "mem/packet_access.hh"
47 #include "mem/port.hh"
48 #include "params/InOrderCPU.hh"
49 #include "sim/sim_object.hh"
50
51 class CacheRequest;
52 typedef CacheRequest* CacheReqPtr;
53
54 class CacheReqPacket;
55 typedef CacheReqPacket* CacheReqPktPtr;
56
57 class CacheUnit : public Resource
58 {
59 public:
60 typedef ThePipeline::DynInstPtr DynInstPtr;
61
62 public:
63 CacheUnit(std::string res_name, int res_id, int res_width,
64 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
65
66 enum Command {
67 InitiateReadData,
68 CompleteReadData,
69 InitiateWriteData,
70 CompleteWriteData,
71 InitSecondSplitRead,
72 InitSecondSplitWrite,
73 CompleteSecondSplitRead,
74 CompleteSecondSplitWrite
75 };
76
77 public:
78 /** CachePort class for the Cache Unit. Handles doing the
79 * communication with the cache/memory.
80 */
81 class CachePort : public Port
82 {
83 protected:
84 /** Pointer to cache port unit */
85 CacheUnit *cachePortUnit;
86
87 public:
88 /** Default constructor. */
89 CachePort(CacheUnit *_cachePortUnit)
90 : Port(_cachePortUnit->name() + "-cache-port",
91 (MemObject*)_cachePortUnit->cpu),
92 cachePortUnit(_cachePortUnit)
93 { }
94
95 bool snoopRangeSent;
96
97 protected:
98 /** Atomic version of receive. Panics. */
99 Tick recvAtomic(PacketPtr pkt);
100
101 /** Functional version of receive. Panics. */
102 void recvFunctional(PacketPtr pkt);
103
104 /** Receives status change. Other than range changing, panics. */
105 void recvStatusChange(Status status);
106
107 /** Returns the address ranges of this device. */
108 void getDeviceAddressRanges(AddrRangeList &resp,
109 AddrRangeList &snoop)
110 { resp.clear(); snoop.clear(); }
111
112 /** Timing version of receive. Handles setting fetch to the
113 * proper status to start fetching. */
114 bool recvTiming(PacketPtr pkt);
115
116 /** Handles doing a retry of a failed fetch. */
117 void recvRetry();
118 };
119
120 void init();
121
122 ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,
123 int res_idx, int slot_num,
124 unsigned cmd);
125
126 ResReqPtr findRequest(DynInstPtr inst);
127 ResReqPtr findRequest(DynInstPtr inst, int idx);
128
129 void requestAgain(DynInstPtr inst, bool &try_request);
130
131 virtual int getSlot(DynInstPtr inst);
132
133 /** Executes one of the commands from the "Command" enum */
134 virtual void execute(int slot_num);
135
136 virtual void squash(DynInstPtr inst, int stage_num,
137 InstSeqNum squash_seq_num, ThreadID tid);
138
139 void squashDueToMemStall(DynInstPtr inst, int stage_num,
140 InstSeqNum squash_seq_num, ThreadID tid);
141
142 virtual void squashCacheRequest(CacheReqPtr req_ptr);
143
144 /** After memory request is completedd in the cache, then do final
145 processing to complete the request in the CPU.
146 */
147 virtual void processCacheCompletion(PacketPtr pkt);
148
149 /** Create request that will interface w/TLB and Memory objects */
150 virtual void setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
151 int acc_size, int flags);
152
153 void finishCacheUnitReq(DynInstPtr inst, CacheRequest *cache_req);
154
155 void buildDataPacket(CacheRequest *cache_req);
156
157 bool processSquash(CacheReqPacket *cache_pkt);
158
159 void recvRetry();
160
161 /** Returns a specific port. */
162 Port *getPort(const std::string &if_name, int idx);
163
164 Fault read(DynInstPtr inst, Addr addr,
165 uint8_t *data, unsigned size, unsigned flags);
166
167 Fault write(DynInstPtr inst, uint8_t *data, unsigned size,
168 Addr addr, unsigned flags, uint64_t *res);
169
170 void doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
171 int flags, TheISA::TLB::Mode tlb_mode);
172
173 /** Read/Write on behalf of an instruction.
174 * curResSlot needs to be a valid value in instruction.
175 */
176 void doCacheAccess(DynInstPtr inst, uint64_t *write_result=NULL,
177 CacheReqPtr split_req=NULL);
178
179 uint64_t getMemData(Packet *packet);
180
181 void setAddrDependency(DynInstPtr inst);
182 virtual void removeAddrDependency(DynInstPtr inst);
183
184 protected:
185 /** Cache interface. */
186 CachePort *cachePort;
187
188 bool cachePortBlocked;
189
190 std::list<Addr> addrList[ThePipeline::MaxThreads];
191
192 m5::hash_map<Addr, InstSeqNum> addrMap[ThePipeline::MaxThreads];
193
194 public:
195 int cacheBlkSize;
196
197 int cacheBlkMask;
198
199 /** Align a PC to the start of the Cache block. */
200 Addr cacheBlockAlign(Addr addr)
201 {
202 return (addr & ~(cacheBlkMask));
203 }
204
205 bool tlbBlocked[ThePipeline::MaxThreads];
206
207 TheISA::TLB* tlb();
208
209 TheISA::TLB *_tlb;
210 };
211
212 class CacheUnitEvent : public ResourceEvent {
213 public:
214 const std::string name() const
215 {
216 return "CacheUnitEvent";
217 }
218
219
220 /** Constructs a resource event. */
221 CacheUnitEvent();
222 virtual ~CacheUnitEvent() {}
223
224 /** Processes a resource event. */
225 void process();
226 };
227
228 //@todo: Move into CacheUnit Class for private access to "valid" field
229 class CacheRequest : public ResourceRequest
230 {
231 public:
232 CacheRequest(CacheUnit *cres)
233 : ResourceRequest(cres), memReq(NULL), reqData(NULL),
234 dataPkt(NULL), memAccComplete(false),
235 memAccPending(false), tlbStall(false), splitAccess(false),
236 splitAccessNum(-1), split2ndAccess(false),
237 fetchBufferFill(false)
238 { }
239
240 virtual ~CacheRequest()
241 {
242 if (reqData && !splitAccess)
243 delete [] reqData;
244 }
245
246 void setRequest(DynInstPtr _inst, int stage_num, int res_idx, int slot_num,
247 unsigned _cmd, MemCmd::Command pkt_cmd, int idx)
248 {
249 pktCmd = pkt_cmd;
250 instIdx = idx;
251
252 ResourceRequest::setRequest(_inst, stage_num, res_idx, slot_num, _cmd);
253 }
254
255 void clearRequest();
256
257 virtual PacketDataPtr getData()
258 { return reqData; }
259
260 void
261 setMemAccCompleted(bool completed = true)
262 {
263 memAccComplete = completed;
264 }
265
266 bool is2ndSplit()
267 {
268 return split2ndAccess;
269 }
270
271 bool isMemAccComplete() { return memAccComplete; }
272
273 void setMemAccPending(bool pending = true) { memAccPending = pending; }
274 bool isMemAccPending() { return memAccPending; }
275
276 //Make this data private/protected!
277 MemCmd::Command pktCmd;
278 RequestPtr memReq;
279 PacketDataPtr reqData;
280 CacheReqPacket *dataPkt;
281
282 bool memAccComplete;
283 bool memAccPending;
284 bool tlbStall;
285
286 bool splitAccess;
287 int splitAccessNum;
288 bool split2ndAccess;
289 int instIdx;
290
291 /** Should we expect block from cache access or fetch buffer? */
292 bool fetchBufferFill;
293 };
294
295 class CacheReqPacket : public Packet
296 {
297 public:
298 CacheReqPacket(CacheRequest *_req,
299 Command _cmd, short _dest, int _idx = 0)
300 : Packet(&(*_req->memReq), _cmd, _dest), cacheReq(_req),
301 instIdx(_idx), hasSlot(false), reqData(NULL), memReq(NULL)
302 {
303
304 }
305
306 CacheRequest *cacheReq;
307 int instIdx;
308 bool hasSlot;
309 PacketDataPtr reqData;
310 RequestPtr memReq;
311 };
312
313 #endif //__CPU_CACHE_UNIT_HH__