mem-cache: Create an address aware TempCacheBlk
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(MachineType:L1Cache, "MI Example L1 Cache")
31 : Sequencer * sequencer;
32 CacheMemory * cacheMemory;
33 Cycles cache_response_latency := 12;
34 Cycles issue_latency := 2;
35 bool send_evictions;
36
37 // NETWORK BUFFERS
38 MessageBuffer * requestFromCache, network="To", virtual_network="2",
39 vnet_type="request";
40 MessageBuffer * responseFromCache, network="To", virtual_network="4",
41 vnet_type="response";
42
43 MessageBuffer * forwardToCache, network="From", virtual_network="3",
44 vnet_type="forward";
45 MessageBuffer * responseToCache, network="From", virtual_network="4",
46 vnet_type="response";
47
48 MessageBuffer * mandatoryQueue;
49 {
50 // STATES
51 state_declaration(State, desc="Cache states") {
52 I, AccessPermission:Invalid, desc="Not Present/Invalid";
53 II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
54 M, AccessPermission:Read_Write, desc="Modified";
55 MI, AccessPermission:Busy, desc="Modified, issued PUT";
56 MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
57
58 IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
59 IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
60 }
61
62 // EVENTS
63 enumeration(Event, desc="Cache events") {
64 // From processor
65
66 Load, desc="Load request from processor";
67 Ifetch, desc="Ifetch request from processor";
68 Store, desc="Store request from processor";
69
70 Data, desc="Data from network";
71 Fwd_GETX, desc="Forward from network";
72
73 Inv, desc="Invalidate request from dir";
74
75 Replacement, desc="Replace a block";
76 Writeback_Ack, desc="Ack from the directory for a writeback";
77 Writeback_Nack, desc="Nack from the directory for a writeback";
78 }
79
80 // STRUCTURE DEFINITIONS
81 // CacheEntry
82 structure(Entry, desc="...", interface="AbstractCacheEntry") {
83 State CacheState, desc="cache state";
84 bool Dirty, desc="Is the data dirty (different than memory)?";
85 DataBlock DataBlk, desc="Data in the block";
86 }
87
88 // TBE fields
89 structure(TBE, desc="...") {
90 State TBEState, desc="Transient state";
91 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
92 }
93
94 structure(TBETable, external="yes") {
95 TBE lookup(Addr);
96 void allocate(Addr);
97 void deallocate(Addr);
98 bool isPresent(Addr);
99 }
100
101
102 // STRUCTURES
103 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
104
105 // PROTOTYPES
106 Tick clockEdge();
107 Cycles ticksToCycles(Tick t);
108 void set_cache_entry(AbstractCacheEntry a);
109 void unset_cache_entry();
110 void set_tbe(TBE b);
111 void unset_tbe();
112 void profileMsgDelay(int virtualNetworkType, Cycles b);
113 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
114
115 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
116 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
117 }
118
119 // FUNCTIONS
120 Event mandatory_request_type_to_event(RubyRequestType type) {
121 if (type == RubyRequestType:LD) {
122 return Event:Load;
123 } else if (type == RubyRequestType:IFETCH) {
124 return Event:Ifetch;
125 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
126 return Event:Store;
127 } else {
128 error("Invalid RubyRequestType");
129 }
130 }
131
132 State getState(TBE tbe, Entry cache_entry, Addr addr) {
133
134 if (is_valid(tbe)) {
135 return tbe.TBEState;
136 }
137 else if (is_valid(cache_entry)) {
138 return cache_entry.CacheState;
139 }
140 else {
141 return State:I;
142 }
143 }
144
145 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
146
147 if (is_valid(tbe)) {
148 tbe.TBEState := state;
149 }
150
151 if (is_valid(cache_entry)) {
152 cache_entry.CacheState := state;
153 }
154 }
155
156 AccessPermission getAccessPermission(Addr addr) {
157 TBE tbe := TBEs[addr];
158 if(is_valid(tbe)) {
159 return L1Cache_State_to_permission(tbe.TBEState);
160 }
161
162 Entry cache_entry := getCacheEntry(addr);
163 if(is_valid(cache_entry)) {
164 return L1Cache_State_to_permission(cache_entry.CacheState);
165 }
166
167 return AccessPermission:NotPresent;
168 }
169
170 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
171 if (is_valid(cache_entry)) {
172 cache_entry.changePermission(L1Cache_State_to_permission(state));
173 }
174 }
175
176 void functionalRead(Addr addr, Packet *pkt) {
177 TBE tbe := TBEs[addr];
178 if(is_valid(tbe)) {
179 testAndRead(addr, tbe.DataBlk, pkt);
180 } else {
181 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
182 }
183 }
184
185 int functionalWrite(Addr addr, Packet *pkt) {
186 int num_functional_writes := 0;
187
188 TBE tbe := TBEs[addr];
189 if(is_valid(tbe)) {
190 num_functional_writes := num_functional_writes +
191 testAndWrite(addr, tbe.DataBlk, pkt);
192 return num_functional_writes;
193 }
194
195 num_functional_writes := num_functional_writes +
196 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
197 return num_functional_writes;
198 }
199
200 // NETWORK PORTS
201
202 out_port(requestNetwork_out, RequestMsg, requestFromCache);
203 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
204
205 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
206 if (forwardRequestNetwork_in.isReady(clockEdge())) {
207 peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
208
209 Entry cache_entry := getCacheEntry(in_msg.addr);
210 TBE tbe := TBEs[in_msg.addr];
211
212 if (in_msg.Type == CoherenceRequestType:GETX) {
213 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
214 }
215 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
216 trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
217 }
218 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
219 trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
220 }
221 else if (in_msg.Type == CoherenceRequestType:INV) {
222 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
223 }
224 else {
225 error("Unexpected message");
226 }
227 }
228 }
229 }
230
231 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
232 if (responseNetwork_in.isReady(clockEdge())) {
233 peek(responseNetwork_in, ResponseMsg, block_on="addr") {
234
235 Entry cache_entry := getCacheEntry(in_msg.addr);
236 TBE tbe := TBEs[in_msg.addr];
237
238 if (in_msg.Type == CoherenceResponseType:DATA) {
239 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
240 }
241 else {
242 error("Unexpected message");
243 }
244 }
245 }
246 }
247
248 // Mandatory Queue
249 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
250 if (mandatoryQueue_in.isReady(clockEdge())) {
251 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
252
253 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
254 if (is_invalid(cache_entry) &&
255 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
256 // make room for the block
257 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
258 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
259 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
260 }
261 else {
262 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
263 cache_entry, TBEs[in_msg.LineAddress]);
264 }
265 }
266 }
267 }
268
269 // ACTIONS
270
271 action(a_issueRequest, "a", desc="Issue a request") {
272 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
273 out_msg.addr := address;
274 out_msg.Type := CoherenceRequestType:GETX;
275 out_msg.Requestor := machineID;
276 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
277 out_msg.MessageSize := MessageSizeType:Control;
278 }
279 }
280
281 action(b_issuePUT, "b", desc="Issue a PUT request") {
282 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
283 assert(is_valid(cache_entry));
284 out_msg.addr := address;
285 out_msg.Type := CoherenceRequestType:PUTX;
286 out_msg.Requestor := machineID;
287 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
288 out_msg.DataBlk := cache_entry.DataBlk;
289 out_msg.MessageSize := MessageSizeType:Data;
290 }
291 }
292
293 action(e_sendData, "e", desc="Send data from cache to requestor") {
294 peek(forwardRequestNetwork_in, RequestMsg) {
295 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
296 assert(is_valid(cache_entry));
297 out_msg.addr := address;
298 out_msg.Type := CoherenceResponseType:DATA;
299 out_msg.Sender := machineID;
300 out_msg.Destination.add(in_msg.Requestor);
301 out_msg.DataBlk := cache_entry.DataBlk;
302 out_msg.MessageSize := MessageSizeType:Response_Data;
303 }
304 }
305 }
306
307 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
308 peek(forwardRequestNetwork_in, RequestMsg) {
309 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
310 assert(is_valid(tbe));
311 out_msg.addr := address;
312 out_msg.Type := CoherenceResponseType:DATA;
313 out_msg.Sender := machineID;
314 out_msg.Destination.add(in_msg.Requestor);
315 out_msg.DataBlk := tbe.DataBlk;
316 out_msg.MessageSize := MessageSizeType:Response_Data;
317 }
318 }
319 }
320
321 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
322 if (is_valid(cache_entry)) {
323 } else {
324 set_cache_entry(cacheMemory.allocate(address, new Entry));
325 }
326 }
327
328 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
329 if (is_valid(cache_entry)) {
330 cacheMemory.deallocate(address);
331 unset_cache_entry();
332 }
333 }
334
335 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
336 mandatoryQueue_in.dequeue(clockEdge());
337 }
338
339 action(n_popResponseQueue, "n", desc="Pop the response queue") {
340 Tick delay := responseNetwork_in.dequeue(clockEdge());
341 profileMsgDelay(1, ticksToCycles(delay));
342 }
343
344 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
345 Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
346 profileMsgDelay(2, ticksToCycles(delay));
347 }
348
349 action(p_profileMiss, "pi", desc="Profile cache miss") {
350 ++cacheMemory.demand_misses;
351 }
352
353 action(p_profileHit, "ph", desc="Profile cache miss") {
354 ++cacheMemory.demand_hits;
355 }
356
357 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
358 assert(is_valid(cache_entry));
359 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
360 cacheMemory.setMRU(cache_entry);
361 sequencer.readCallback(address, cache_entry.DataBlk, false);
362 }
363
364 action(rx_load_hit, "rx", desc="External load completed.") {
365 peek(responseNetwork_in, ResponseMsg) {
366 assert(is_valid(cache_entry));
367 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
368 cacheMemory.setMRU(cache_entry);
369 sequencer.readCallback(address, cache_entry.DataBlk, true,
370 machineIDToMachineType(in_msg.Sender));
371 }
372 }
373
374 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
375 assert(is_valid(cache_entry));
376 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
377 cacheMemory.setMRU(cache_entry);
378 sequencer.writeCallback(address, cache_entry.DataBlk, false);
379 }
380
381 action(sx_store_hit, "sx", desc="External store completed.") {
382 peek(responseNetwork_in, ResponseMsg) {
383 assert(is_valid(cache_entry));
384 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
385 cacheMemory.setMRU(cache_entry);
386 sequencer.writeCallback(address, cache_entry.DataBlk, true,
387 machineIDToMachineType(in_msg.Sender));
388 }
389 }
390
391 action(u_writeDataToCache, "u", desc="Write data to the cache") {
392 peek(responseNetwork_in, ResponseMsg) {
393 assert(is_valid(cache_entry));
394 cache_entry.DataBlk := in_msg.DataBlk;
395 }
396 }
397
398 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
399 if (send_evictions) {
400 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
401 sequencer.evictionCallback(address);
402 }
403 }
404
405 action(v_allocateTBE, "v", desc="Allocate TBE") {
406 TBEs.allocate(address);
407 set_tbe(TBEs[address]);
408 }
409
410 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
411 TBEs.deallocate(address);
412 unset_tbe();
413 }
414
415 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
416 assert(is_valid(cache_entry));
417 assert(is_valid(tbe));
418 tbe.DataBlk := cache_entry.DataBlk;
419 }
420
421 action(z_stall, "z", desc="stall") {
422 // do nothing
423 }
424
425 // TRANSITIONS
426
427 transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
428 z_stall;
429 }
430
431 transition({IS, IM}, {Fwd_GETX, Inv}) {
432 z_stall;
433 }
434
435 transition(MI, Inv) {
436 o_popForwardedRequestQueue;
437 }
438
439 transition(M, Store) {
440 s_store_hit;
441 p_profileHit;
442 m_popMandatoryQueue;
443 }
444
445 transition(M, {Load, Ifetch}) {
446 r_load_hit;
447 p_profileHit;
448 m_popMandatoryQueue;
449 }
450
451 transition(I, Inv) {
452 o_popForwardedRequestQueue;
453 }
454
455 transition(I, Store, IM) {
456 v_allocateTBE;
457 i_allocateL1CacheBlock;
458 a_issueRequest;
459 p_profileMiss;
460 m_popMandatoryQueue;
461 }
462
463 transition(I, {Load, Ifetch}, IS) {
464 v_allocateTBE;
465 i_allocateL1CacheBlock;
466 a_issueRequest;
467 p_profileMiss;
468 m_popMandatoryQueue;
469 }
470
471 transition(IS, Data, M) {
472 u_writeDataToCache;
473 rx_load_hit;
474 w_deallocateTBE;
475 n_popResponseQueue;
476 }
477
478 transition(IM, Data, M) {
479 u_writeDataToCache;
480 sx_store_hit;
481 w_deallocateTBE;
482 n_popResponseQueue;
483 }
484
485 transition(M, Fwd_GETX, I) {
486 e_sendData;
487 forward_eviction_to_cpu;
488 o_popForwardedRequestQueue;
489 }
490
491 transition(I, Replacement) {
492 h_deallocateL1CacheBlock;
493 }
494
495 transition(M, {Replacement,Inv}, MI) {
496 v_allocateTBE;
497 b_issuePUT;
498 x_copyDataFromCacheToTBE;
499 forward_eviction_to_cpu;
500 h_deallocateL1CacheBlock;
501 }
502
503 transition(MI, Writeback_Ack, I) {
504 w_deallocateTBE;
505 o_popForwardedRequestQueue;
506 }
507
508 transition(MI, Fwd_GETX, II) {
509 ee_sendDataFromTBE;
510 o_popForwardedRequestQueue;
511 }
512
513 transition(MI, Writeback_Nack, MII) {
514 o_popForwardedRequestQueue;
515 }
516
517 transition(MII, Fwd_GETX, I) {
518 ee_sendDataFromTBE;
519 w_deallocateTBE;
520 o_popForwardedRequestQueue;
521 }
522
523 transition(II, Writeback_Nack, I) {
524 w_deallocateTBE;
525 o_popForwardedRequestQueue;
526 }
527 }