Merge with main repository.
[gem5.git] / src / cpu / inorder / resources / cache_unit.hh
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #ifndef __CPU_INORDER_CACHE_UNIT_HH__
33 #define __CPU_INORDER_CACHE_UNIT_HH__
34
35 #include <list>
36 #include <string>
37 #include <vector>
38
39 #include "arch/predecoder.hh"
40 #include "arch/tlb.hh"
41 #include "base/hashmap.hh"
42 #include "config/the_isa.hh"
43 #include "cpu/inorder/inorder_dyn_inst.hh"
44 #include "cpu/inorder/pipeline_traits.hh"
45 #include "cpu/inorder/resource.hh"
46 #include "mem/packet.hh"
47 #include "mem/packet_access.hh"
48 #include "mem/port.hh"
49 #include "params/InOrderCPU.hh"
50 #include "sim/sim_object.hh"
51
52 class CacheRequest;
53 typedef CacheRequest* CacheReqPtr;
54
55 class CacheReqPacket;
56 typedef CacheReqPacket* CacheReqPktPtr;
57
58 class CacheUnit : public Resource
59 {
60 public:
61 typedef ThePipeline::DynInstPtr DynInstPtr;
62
63 public:
64 CacheUnit(std::string res_name, int res_id, int res_width,
65 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params);
66
67 enum Command {
68 InitiateReadData,
69 CompleteReadData,
70 InitiateWriteData,
71 CompleteWriteData,
72 InitSecondSplitRead,
73 InitSecondSplitWrite,
74 CompleteSecondSplitRead,
75 CompleteSecondSplitWrite
76 };
77
78 public:
79 /** CachePort class for the Cache Unit. Handles doing the
80 * communication with the cache/memory.
81 */
82 class CachePort : public Port
83 {
84 protected:
85 /** Pointer to cache port unit */
86 CacheUnit *cachePortUnit;
87
88 public:
89 /** Default constructor. */
90 CachePort(CacheUnit *_cachePortUnit)
91 : Port(_cachePortUnit->name() + "-cache-port",
92 (MemObject*)_cachePortUnit->cpu),
93 cachePortUnit(_cachePortUnit), snoopRangeSent(false)
94 { }
95
96 bool snoopRangeSent;
97
98 void setPeer(Port *port);
99
100 protected:
101 /** Atomic version of receive. Panics. */
102 Tick recvAtomic(PacketPtr pkt);
103
104 /** Functional version of receive.*/
105 void recvFunctional(PacketPtr pkt);
106
107 /** Receives status change. Other than range changing, panics. */
108 void recvStatusChange(Status status);
109
110 /** Returns the address ranges of this device. */
111 void getDeviceAddressRanges(AddrRangeList &resp,
112 bool &snoop)
113 { resp.clear(); snoop = true; }
114
115 /** Timing version of receive */
116 bool recvTiming(PacketPtr pkt);
117
118 /** Handles doing a retry of a failed fetch. */
119 void recvRetry();
120 };
121
122 void init();
123
124 ResourceRequest* getRequest(DynInstPtr _inst, int stage_num,
125 int res_idx, int slot_num,
126 unsigned cmd);
127
128 ResReqPtr findRequest(DynInstPtr inst);
129 ResReqPtr findRequest(DynInstPtr inst, int idx);
130
131 void requestAgain(DynInstPtr inst, bool &try_request);
132
133 virtual int getSlot(DynInstPtr inst);
134
135 /** Executes one of the commands from the "Command" enum */
136 virtual void execute(int slot_num);
137
138 virtual void squash(DynInstPtr inst, int stage_num,
139 InstSeqNum squash_seq_num, ThreadID tid);
140
141 void squashDueToMemStall(DynInstPtr inst, int stage_num,
142 InstSeqNum squash_seq_num, ThreadID tid);
143
144 virtual void squashCacheRequest(CacheReqPtr req_ptr);
145
146 /** After memory request is completedd in the cache, then do final
147 processing to complete the request in the CPU.
148 */
149 virtual void processCacheCompletion(PacketPtr pkt);
150
151 /** Create request that will interface w/TLB and Memory objects */
152 virtual void setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
153 int acc_size, int flags);
154
155 void finishCacheUnitReq(DynInstPtr inst, CacheRequest *cache_req);
156
157 void buildDataPacket(CacheRequest *cache_req);
158
159 bool processSquash(CacheReqPacket *cache_pkt);
160
161 void trap(Fault fault, ThreadID tid, DynInstPtr inst);
162
163 void recvRetry();
164
165 /** Returns a specific port. */
166 Port *getPort(const std::string &if_name, int idx);
167
168 Fault read(DynInstPtr inst, Addr addr,
169 uint8_t *data, unsigned size, unsigned flags);
170
171 Fault write(DynInstPtr inst, uint8_t *data, unsigned size,
172 Addr addr, unsigned flags, uint64_t *res);
173
174 void doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
175 int flags, TheISA::TLB::Mode tlb_mode);
176
177 /** Read/Write on behalf of an instruction.
178 * curResSlot needs to be a valid value in instruction.
179 */
180 void doCacheAccess(DynInstPtr inst, uint64_t *write_result=NULL,
181 CacheReqPtr split_req=NULL);
182
183 uint64_t getMemData(Packet *packet);
184
185 void setAddrDependency(DynInstPtr inst);
186 virtual void removeAddrDependency(DynInstPtr inst);
187
188 protected:
189 /** Cache interface. */
190 CachePort *cachePort;
191
192 bool cachePortBlocked;
193
194 std::list<Addr> addrList[ThePipeline::MaxThreads];
195
196 m5::hash_map<Addr, InstSeqNum> addrMap[ThePipeline::MaxThreads];
197
198 public:
199 int cacheBlkSize;
200
201 int cacheBlkMask;
202
203 /** Align a PC to the start of the Cache block. */
204 Addr cacheBlockAlign(Addr addr)
205 {
206 return (addr & ~(cacheBlkMask));
207 }
208
209 bool tlbBlocked[ThePipeline::MaxThreads];
210 InstSeqNum tlbBlockSeqNum[ThePipeline::MaxThreads];
211
212 TheISA::TLB* tlb();
213 TheISA::TLB *_tlb;
214 };
215
216 class CacheUnitEvent : public ResourceEvent {
217 public:
218 const std::string name() const
219 {
220 return "CacheUnitEvent";
221 }
222
223
224 /** Constructs a resource event. */
225 CacheUnitEvent();
226 virtual ~CacheUnitEvent() {}
227
228 /** Processes a resource event. */
229 void process();
230 };
231
232 //@todo: Move into CacheUnit Class for private access to "valid" field
233 class CacheRequest : public ResourceRequest
234 {
235 public:
236 CacheRequest(CacheUnit *cres)
237 : ResourceRequest(cres), memReq(NULL), reqData(NULL),
238 dataPkt(NULL), memAccComplete(false),
239 memAccPending(false), tlbStall(false), splitAccess(false),
240 splitAccessNum(-1), split2ndAccess(false),
241 fetchBufferFill(false)
242 { }
243
244 virtual ~CacheRequest()
245 {
246 if (reqData && !splitAccess)
247 delete [] reqData;
248 }
249
250 void setRequest(DynInstPtr _inst, int stage_num, int res_idx, int slot_num,
251 unsigned _cmd, MemCmd::Command pkt_cmd, int idx)
252 {
253 pktCmd = pkt_cmd;
254 instIdx = idx;
255
256 ResourceRequest::setRequest(_inst, stage_num, res_idx, slot_num, _cmd);
257 }
258
259 void clearRequest();
260
261 virtual PacketDataPtr getData()
262 { return reqData; }
263
264 void
265 setMemAccCompleted(bool completed = true)
266 {
267 memAccComplete = completed;
268 }
269
270 bool is2ndSplit()
271 {
272 return split2ndAccess;
273 }
274
275 bool isMemAccComplete() { return memAccComplete; }
276
277 void setMemAccPending(bool pending = true) { memAccPending = pending; }
278 bool isMemAccPending() { return memAccPending; }
279
280 //Make this data private/protected!
281 MemCmd::Command pktCmd;
282 RequestPtr memReq;
283 PacketDataPtr reqData;
284 CacheReqPacket *dataPkt;
285
286 bool memAccComplete;
287 bool memAccPending;
288 bool tlbStall;
289
290 bool splitAccess;
291 int splitAccessNum;
292 bool split2ndAccess;
293 int instIdx;
294
295 /** Should we expect block from cache access or fetch buffer? */
296 bool fetchBufferFill;
297 };
298
299 class CacheReqPacket : public Packet
300 {
301 public:
302 CacheReqPacket(CacheRequest *_req,
303 Command _cmd, short _dest, int _idx = 0)
304 : Packet(&(*_req->memReq), _cmd, _dest), cacheReq(_req),
305 instIdx(_idx), hasSlot(false), reqData(NULL), memReq(NULL)
306 {
307
308 }
309
310 CacheRequest *cacheReq;
311 int instIdx;
312 bool hasSlot;
313 PacketDataPtr reqData;
314 RequestPtr memReq;
315 };
316
317 #endif //__CPU_CACHE_UNIT_HH__