ruby: slicc: have a static MachineType
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(MachineType:L1Cache, "MI Example L1 Cache")
31 : Sequencer * sequencer;
32 CacheMemory * cacheMemory;
33 Cycles cache_response_latency := 12;
34 Cycles issue_latency := 2;
35 bool send_evictions;
36
37 // NETWORK BUFFERS
38 MessageBuffer * requestFromCache, network="To", virtual_network="2",
39 vnet_type="request";
40 MessageBuffer * responseFromCache, network="To", virtual_network="4",
41 vnet_type="response";
42
43 MessageBuffer * forwardToCache, network="From", virtual_network="3",
44 vnet_type="forward";
45 MessageBuffer * responseToCache, network="From", virtual_network="4",
46 vnet_type="response";
47
48 MessageBuffer * mandatoryQueue;
49 {
50 // STATES
51 state_declaration(State, desc="Cache states") {
52 I, AccessPermission:Invalid, desc="Not Present/Invalid";
53 II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
54 M, AccessPermission:Read_Write, desc="Modified";
55 MI, AccessPermission:Busy, desc="Modified, issued PUT";
56 MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
57
58 IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
59 IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
60 }
61
62 // EVENTS
63 enumeration(Event, desc="Cache events") {
64 // From processor
65
66 Load, desc="Load request from processor";
67 Ifetch, desc="Ifetch request from processor";
68 Store, desc="Store request from processor";
69
70 Data, desc="Data from network";
71 Fwd_GETX, desc="Forward from network";
72
73 Inv, desc="Invalidate request from dir";
74
75 Replacement, desc="Replace a block";
76 Writeback_Ack, desc="Ack from the directory for a writeback";
77 Writeback_Nack, desc="Nack from the directory for a writeback";
78 }
79
80 // STRUCTURE DEFINITIONS
81 // CacheEntry
82 structure(Entry, desc="...", interface="AbstractCacheEntry") {
83 State CacheState, desc="cache state";
84 bool Dirty, desc="Is the data dirty (different than memory)?";
85 DataBlock DataBlk, desc="Data in the block";
86 }
87
88 // TBE fields
89 structure(TBE, desc="...") {
90 State TBEState, desc="Transient state";
91 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
92 }
93
94 structure(TBETable, external="yes") {
95 TBE lookup(Addr);
96 void allocate(Addr);
97 void deallocate(Addr);
98 bool isPresent(Addr);
99 }
100
101
102 // STRUCTURES
103 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
104
105 // PROTOTYPES
106 Tick clockEdge();
107 Cycles ticksToCycles(Tick t);
108 void set_cache_entry(AbstractCacheEntry a);
109 void unset_cache_entry();
110 void set_tbe(TBE b);
111 void unset_tbe();
112 void profileMsgDelay(int virtualNetworkType, Cycles b);
113
114 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
115 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
116 }
117
118 // FUNCTIONS
119 Event mandatory_request_type_to_event(RubyRequestType type) {
120 if (type == RubyRequestType:LD) {
121 return Event:Load;
122 } else if (type == RubyRequestType:IFETCH) {
123 return Event:Ifetch;
124 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
125 return Event:Store;
126 } else {
127 error("Invalid RubyRequestType");
128 }
129 }
130
131 State getState(TBE tbe, Entry cache_entry, Addr addr) {
132
133 if (is_valid(tbe)) {
134 return tbe.TBEState;
135 }
136 else if (is_valid(cache_entry)) {
137 return cache_entry.CacheState;
138 }
139 else {
140 return State:I;
141 }
142 }
143
144 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
145
146 if (is_valid(tbe)) {
147 tbe.TBEState := state;
148 }
149
150 if (is_valid(cache_entry)) {
151 cache_entry.CacheState := state;
152 }
153 }
154
155 AccessPermission getAccessPermission(Addr addr) {
156 TBE tbe := TBEs[addr];
157 if(is_valid(tbe)) {
158 return L1Cache_State_to_permission(tbe.TBEState);
159 }
160
161 Entry cache_entry := getCacheEntry(addr);
162 if(is_valid(cache_entry)) {
163 return L1Cache_State_to_permission(cache_entry.CacheState);
164 }
165
166 return AccessPermission:NotPresent;
167 }
168
169 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
170 if (is_valid(cache_entry)) {
171 cache_entry.changePermission(L1Cache_State_to_permission(state));
172 }
173 }
174
175 void functionalRead(Addr addr, Packet *pkt) {
176 TBE tbe := TBEs[addr];
177 if(is_valid(tbe)) {
178 testAndRead(addr, tbe.DataBlk, pkt);
179 } else {
180 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
181 }
182 }
183
184 int functionalWrite(Addr addr, Packet *pkt) {
185 int num_functional_writes := 0;
186
187 TBE tbe := TBEs[addr];
188 if(is_valid(tbe)) {
189 num_functional_writes := num_functional_writes +
190 testAndWrite(addr, tbe.DataBlk, pkt);
191 return num_functional_writes;
192 }
193
194 num_functional_writes := num_functional_writes +
195 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
196 return num_functional_writes;
197 }
198
199 // NETWORK PORTS
200
201 out_port(requestNetwork_out, RequestMsg, requestFromCache);
202 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
203
204 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
205 if (forwardRequestNetwork_in.isReady(clockEdge())) {
206 peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
207
208 Entry cache_entry := getCacheEntry(in_msg.addr);
209 TBE tbe := TBEs[in_msg.addr];
210
211 if (in_msg.Type == CoherenceRequestType:GETX) {
212 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
213 }
214 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
215 trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
216 }
217 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
218 trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
219 }
220 else if (in_msg.Type == CoherenceRequestType:INV) {
221 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
222 }
223 else {
224 error("Unexpected message");
225 }
226 }
227 }
228 }
229
230 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
231 if (responseNetwork_in.isReady(clockEdge())) {
232 peek(responseNetwork_in, ResponseMsg, block_on="addr") {
233
234 Entry cache_entry := getCacheEntry(in_msg.addr);
235 TBE tbe := TBEs[in_msg.addr];
236
237 if (in_msg.Type == CoherenceResponseType:DATA) {
238 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
239 }
240 else {
241 error("Unexpected message");
242 }
243 }
244 }
245 }
246
247 // Mandatory Queue
248 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
249 if (mandatoryQueue_in.isReady(clockEdge())) {
250 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
251
252 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
253 if (is_invalid(cache_entry) &&
254 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
255 // make room for the block
256 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
257 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
258 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
259 }
260 else {
261 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
262 cache_entry, TBEs[in_msg.LineAddress]);
263 }
264 }
265 }
266 }
267
268 // ACTIONS
269
270 action(a_issueRequest, "a", desc="Issue a request") {
271 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
272 out_msg.addr := address;
273 out_msg.Type := CoherenceRequestType:GETX;
274 out_msg.Requestor := machineID;
275 out_msg.Destination.add(map_Address_to_Directory(address));
276 out_msg.MessageSize := MessageSizeType:Control;
277 }
278 }
279
280 action(b_issuePUT, "b", desc="Issue a PUT request") {
281 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
282 assert(is_valid(cache_entry));
283 out_msg.addr := address;
284 out_msg.Type := CoherenceRequestType:PUTX;
285 out_msg.Requestor := machineID;
286 out_msg.Destination.add(map_Address_to_Directory(address));
287 out_msg.DataBlk := cache_entry.DataBlk;
288 out_msg.MessageSize := MessageSizeType:Data;
289 }
290 }
291
292 action(e_sendData, "e", desc="Send data from cache to requestor") {
293 peek(forwardRequestNetwork_in, RequestMsg) {
294 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
295 assert(is_valid(cache_entry));
296 out_msg.addr := address;
297 out_msg.Type := CoherenceResponseType:DATA;
298 out_msg.Sender := machineID;
299 out_msg.Destination.add(in_msg.Requestor);
300 out_msg.DataBlk := cache_entry.DataBlk;
301 out_msg.MessageSize := MessageSizeType:Response_Data;
302 }
303 }
304 }
305
306 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
307 peek(forwardRequestNetwork_in, RequestMsg) {
308 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
309 assert(is_valid(tbe));
310 out_msg.addr := address;
311 out_msg.Type := CoherenceResponseType:DATA;
312 out_msg.Sender := machineID;
313 out_msg.Destination.add(in_msg.Requestor);
314 out_msg.DataBlk := tbe.DataBlk;
315 out_msg.MessageSize := MessageSizeType:Response_Data;
316 }
317 }
318 }
319
320 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
321 if (is_valid(cache_entry)) {
322 } else {
323 set_cache_entry(cacheMemory.allocate(address, new Entry));
324 }
325 }
326
327 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
328 if (is_valid(cache_entry)) {
329 cacheMemory.deallocate(address);
330 unset_cache_entry();
331 }
332 }
333
334 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
335 mandatoryQueue_in.dequeue(clockEdge());
336 }
337
338 action(n_popResponseQueue, "n", desc="Pop the response queue") {
339 Tick delay := responseNetwork_in.dequeue(clockEdge());
340 profileMsgDelay(1, ticksToCycles(delay));
341 }
342
343 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
344 Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
345 profileMsgDelay(2, ticksToCycles(delay));
346 }
347
348 action(p_profileMiss, "pi", desc="Profile cache miss") {
349 ++cacheMemory.demand_misses;
350 }
351
352 action(p_profileHit, "ph", desc="Profile cache miss") {
353 ++cacheMemory.demand_hits;
354 }
355
356 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
357 assert(is_valid(cache_entry));
358 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
359 cacheMemory.setMRU(cache_entry);
360 sequencer.readCallback(address, cache_entry.DataBlk, false);
361 }
362
363 action(rx_load_hit, "rx", desc="External load completed.") {
364 peek(responseNetwork_in, ResponseMsg) {
365 assert(is_valid(cache_entry));
366 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
367 cacheMemory.setMRU(cache_entry);
368 sequencer.readCallback(address, cache_entry.DataBlk, true,
369 machineIDToMachineType(in_msg.Sender));
370 }
371 }
372
373 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
374 assert(is_valid(cache_entry));
375 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
376 cacheMemory.setMRU(cache_entry);
377 sequencer.writeCallback(address, cache_entry.DataBlk, false);
378 }
379
380 action(sx_store_hit, "sx", desc="External store completed.") {
381 peek(responseNetwork_in, ResponseMsg) {
382 assert(is_valid(cache_entry));
383 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
384 cacheMemory.setMRU(cache_entry);
385 sequencer.writeCallback(address, cache_entry.DataBlk, true,
386 machineIDToMachineType(in_msg.Sender));
387 }
388 }
389
390 action(u_writeDataToCache, "u", desc="Write data to the cache") {
391 peek(responseNetwork_in, ResponseMsg) {
392 assert(is_valid(cache_entry));
393 cache_entry.DataBlk := in_msg.DataBlk;
394 }
395 }
396
397 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
398 if (send_evictions) {
399 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
400 sequencer.evictionCallback(address);
401 }
402 }
403
404 action(v_allocateTBE, "v", desc="Allocate TBE") {
405 TBEs.allocate(address);
406 set_tbe(TBEs[address]);
407 }
408
409 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
410 TBEs.deallocate(address);
411 unset_tbe();
412 }
413
414 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
415 assert(is_valid(cache_entry));
416 assert(is_valid(tbe));
417 tbe.DataBlk := cache_entry.DataBlk;
418 }
419
420 action(z_stall, "z", desc="stall") {
421 // do nothing
422 }
423
424 // TRANSITIONS
425
426 transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
427 z_stall;
428 }
429
430 transition({IS, IM}, {Fwd_GETX, Inv}) {
431 z_stall;
432 }
433
434 transition(MI, Inv) {
435 o_popForwardedRequestQueue;
436 }
437
438 transition(M, Store) {
439 s_store_hit;
440 p_profileHit;
441 m_popMandatoryQueue;
442 }
443
444 transition(M, {Load, Ifetch}) {
445 r_load_hit;
446 p_profileHit;
447 m_popMandatoryQueue;
448 }
449
450 transition(I, Inv) {
451 o_popForwardedRequestQueue;
452 }
453
454 transition(I, Store, IM) {
455 v_allocateTBE;
456 i_allocateL1CacheBlock;
457 a_issueRequest;
458 p_profileMiss;
459 m_popMandatoryQueue;
460 }
461
462 transition(I, {Load, Ifetch}, IS) {
463 v_allocateTBE;
464 i_allocateL1CacheBlock;
465 a_issueRequest;
466 p_profileMiss;
467 m_popMandatoryQueue;
468 }
469
470 transition(IS, Data, M) {
471 u_writeDataToCache;
472 rx_load_hit;
473 w_deallocateTBE;
474 n_popResponseQueue;
475 }
476
477 transition(IM, Data, M) {
478 u_writeDataToCache;
479 sx_store_hit;
480 w_deallocateTBE;
481 n_popResponseQueue;
482 }
483
484 transition(M, Fwd_GETX, I) {
485 e_sendData;
486 forward_eviction_to_cpu;
487 o_popForwardedRequestQueue;
488 }
489
490 transition(I, Replacement) {
491 h_deallocateL1CacheBlock;
492 }
493
494 transition(M, {Replacement,Inv}, MI) {
495 v_allocateTBE;
496 b_issuePUT;
497 x_copyDataFromCacheToTBE;
498 forward_eviction_to_cpu;
499 h_deallocateL1CacheBlock;
500 }
501
502 transition(MI, Writeback_Ack, I) {
503 w_deallocateTBE;
504 o_popForwardedRequestQueue;
505 }
506
507 transition(MI, Fwd_GETX, II) {
508 ee_sendDataFromTBE;
509 o_popForwardedRequestQueue;
510 }
511
512 transition(MI, Writeback_Nack, MII) {
513 o_popForwardedRequestQueue;
514 }
515
516 transition(MII, Fwd_GETX, I) {
517 ee_sendDataFromTBE;
518 w_deallocateTBE;
519 o_popForwardedRequestQueue;
520 }
521
522 transition(II, Writeback_Nack, I) {
523 w_deallocateTBE;
524 o_popForwardedRequestQueue;
525 }
526 }