ruby: replace Address by Addr
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(L1Cache, "MI Example L1 Cache")
31 : Sequencer * sequencer;
32 CacheMemory * cacheMemory;
33 Cycles cache_response_latency := 12;
34 Cycles issue_latency := 2;
35 bool send_evictions;
36
37 // NETWORK BUFFERS
38 MessageBuffer * requestFromCache, network="To", virtual_network="2",
39 vnet_type="request";
40 MessageBuffer * responseFromCache, network="To", virtual_network="4",
41 vnet_type="response";
42
43 MessageBuffer * forwardToCache, network="From", virtual_network="3",
44 vnet_type="forward";
45 MessageBuffer * responseToCache, network="From", virtual_network="4",
46 vnet_type="response";
47 {
48 // STATES
49 state_declaration(State, desc="Cache states") {
50 I, AccessPermission:Invalid, desc="Not Present/Invalid";
51 II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
52 M, AccessPermission:Read_Write, desc="Modified";
53 MI, AccessPermission:Busy, desc="Modified, issued PUT";
54 MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
55
56 IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
57 IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
58 }
59
60 // EVENTS
61 enumeration(Event, desc="Cache events") {
62 // From processor
63
64 Load, desc="Load request from processor";
65 Ifetch, desc="Ifetch request from processor";
66 Store, desc="Store request from processor";
67
68 Data, desc="Data from network";
69 Fwd_GETX, desc="Forward from network";
70
71 Inv, desc="Invalidate request from dir";
72
73 Replacement, desc="Replace a block";
74 Writeback_Ack, desc="Ack from the directory for a writeback";
75 Writeback_Nack, desc="Nack from the directory for a writeback";
76 }
77
78 // STRUCTURE DEFINITIONS
79
80 MessageBuffer mandatoryQueue;
81
82 // CacheEntry
83 structure(Entry, desc="...", interface="AbstractCacheEntry") {
84 State CacheState, desc="cache state";
85 bool Dirty, desc="Is the data dirty (different than memory)?";
86 DataBlock DataBlk, desc="Data in the block";
87 }
88
89 // TBE fields
90 structure(TBE, desc="...") {
91 State TBEState, desc="Transient state";
92 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
93 }
94
95 structure(TBETable, external="yes") {
96 TBE lookup(Addr);
97 void allocate(Addr);
98 void deallocate(Addr);
99 bool isPresent(Addr);
100 }
101
102
103 // STRUCTURES
104 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
105
106 // PROTOTYPES
107 void set_cache_entry(AbstractCacheEntry a);
108 void unset_cache_entry();
109 void set_tbe(TBE b);
110 void unset_tbe();
111 void profileMsgDelay(int virtualNetworkType, Cycles b);
112
113 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
114 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
115 }
116
117 // FUNCTIONS
118 Event mandatory_request_type_to_event(RubyRequestType type) {
119 if (type == RubyRequestType:LD) {
120 return Event:Load;
121 } else if (type == RubyRequestType:IFETCH) {
122 return Event:Ifetch;
123 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
124 return Event:Store;
125 } else {
126 error("Invalid RubyRequestType");
127 }
128 }
129
130 State getState(TBE tbe, Entry cache_entry, Addr addr) {
131
132 if (is_valid(tbe)) {
133 return tbe.TBEState;
134 }
135 else if (is_valid(cache_entry)) {
136 return cache_entry.CacheState;
137 }
138 else {
139 return State:I;
140 }
141 }
142
143 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
144
145 if (is_valid(tbe)) {
146 tbe.TBEState := state;
147 }
148
149 if (is_valid(cache_entry)) {
150 cache_entry.CacheState := state;
151 }
152 }
153
154 AccessPermission getAccessPermission(Addr addr) {
155 TBE tbe := TBEs[addr];
156 if(is_valid(tbe)) {
157 return L1Cache_State_to_permission(tbe.TBEState);
158 }
159
160 Entry cache_entry := getCacheEntry(addr);
161 if(is_valid(cache_entry)) {
162 return L1Cache_State_to_permission(cache_entry.CacheState);
163 }
164
165 return AccessPermission:NotPresent;
166 }
167
168 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
169 if (is_valid(cache_entry)) {
170 cache_entry.changePermission(L1Cache_State_to_permission(state));
171 }
172 }
173
174 void functionalRead(Addr addr, Packet *pkt) {
175 TBE tbe := TBEs[addr];
176 if(is_valid(tbe)) {
177 testAndRead(addr, tbe.DataBlk, pkt);
178 } else {
179 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
180 }
181 }
182
183 int functionalWrite(Addr addr, Packet *pkt) {
184 int num_functional_writes := 0;
185
186 TBE tbe := TBEs[addr];
187 if(is_valid(tbe)) {
188 num_functional_writes := num_functional_writes +
189 testAndWrite(addr, tbe.DataBlk, pkt);
190 return num_functional_writes;
191 }
192
193 num_functional_writes := num_functional_writes +
194 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
195 return num_functional_writes;
196 }
197
198 // NETWORK PORTS
199
200 out_port(requestNetwork_out, RequestMsg, requestFromCache);
201 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
202
203 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
204 if (forwardRequestNetwork_in.isReady()) {
205 peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
206
207 Entry cache_entry := getCacheEntry(in_msg.addr);
208 TBE tbe := TBEs[in_msg.addr];
209
210 if (in_msg.Type == CoherenceRequestType:GETX) {
211 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
212 }
213 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
214 trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
215 }
216 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
217 trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
218 }
219 else if (in_msg.Type == CoherenceRequestType:INV) {
220 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
221 }
222 else {
223 error("Unexpected message");
224 }
225 }
226 }
227 }
228
229 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
230 if (responseNetwork_in.isReady()) {
231 peek(responseNetwork_in, ResponseMsg, block_on="addr") {
232
233 Entry cache_entry := getCacheEntry(in_msg.addr);
234 TBE tbe := TBEs[in_msg.addr];
235
236 if (in_msg.Type == CoherenceResponseType:DATA) {
237 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
238 }
239 else {
240 error("Unexpected message");
241 }
242 }
243 }
244 }
245
246 // Mandatory Queue
247 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
248 if (mandatoryQueue_in.isReady()) {
249 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
250
251 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
252 if (is_invalid(cache_entry) &&
253 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
254 // make room for the block
255 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
256 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
257 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
258 }
259 else {
260 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
261 cache_entry, TBEs[in_msg.LineAddress]);
262 }
263 }
264 }
265 }
266
267 // ACTIONS
268
269 action(a_issueRequest, "a", desc="Issue a request") {
270 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
271 out_msg.addr := address;
272 out_msg.Type := CoherenceRequestType:GETX;
273 out_msg.Requestor := machineID;
274 out_msg.Destination.add(map_Address_to_Directory(address));
275 out_msg.MessageSize := MessageSizeType:Control;
276 }
277 }
278
279 action(b_issuePUT, "b", desc="Issue a PUT request") {
280 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
281 assert(is_valid(cache_entry));
282 out_msg.addr := address;
283 out_msg.Type := CoherenceRequestType:PUTX;
284 out_msg.Requestor := machineID;
285 out_msg.Destination.add(map_Address_to_Directory(address));
286 out_msg.DataBlk := cache_entry.DataBlk;
287 out_msg.MessageSize := MessageSizeType:Data;
288 }
289 }
290
291 action(e_sendData, "e", desc="Send data from cache to requestor") {
292 peek(forwardRequestNetwork_in, RequestMsg) {
293 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
294 assert(is_valid(cache_entry));
295 out_msg.addr := address;
296 out_msg.Type := CoherenceResponseType:DATA;
297 out_msg.Sender := machineID;
298 out_msg.Destination.add(in_msg.Requestor);
299 out_msg.DataBlk := cache_entry.DataBlk;
300 out_msg.MessageSize := MessageSizeType:Response_Data;
301 }
302 }
303 }
304
305 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
306 peek(forwardRequestNetwork_in, RequestMsg) {
307 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
308 assert(is_valid(tbe));
309 out_msg.addr := address;
310 out_msg.Type := CoherenceResponseType:DATA;
311 out_msg.Sender := machineID;
312 out_msg.Destination.add(in_msg.Requestor);
313 out_msg.DataBlk := tbe.DataBlk;
314 out_msg.MessageSize := MessageSizeType:Response_Data;
315 }
316 }
317 }
318
319 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
320 if (is_valid(cache_entry)) {
321 } else {
322 set_cache_entry(cacheMemory.allocate(address, new Entry));
323 }
324 }
325
326 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
327 if (is_valid(cache_entry)) {
328 cacheMemory.deallocate(address);
329 unset_cache_entry();
330 }
331 }
332
333 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
334 mandatoryQueue_in.dequeue();
335 }
336
337 action(n_popResponseQueue, "n", desc="Pop the response queue") {
338 profileMsgDelay(1, responseNetwork_in.dequeue());
339 }
340
341 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
342 profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
343 }
344
345 action(p_profileMiss, "pi", desc="Profile cache miss") {
346 ++cacheMemory.demand_misses;
347 }
348
349 action(p_profileHit, "ph", desc="Profile cache miss") {
350 ++cacheMemory.demand_hits;
351 }
352
353 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
354 assert(is_valid(cache_entry));
355 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
356 sequencer.readCallback(address, cache_entry.DataBlk, false);
357 }
358
359 action(rx_load_hit, "rx", desc="External load completed.") {
360 peek(responseNetwork_in, ResponseMsg) {
361 assert(is_valid(cache_entry));
362 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
363 sequencer.readCallback(address, cache_entry.DataBlk, true,
364 machineIDToMachineType(in_msg.Sender));
365 }
366 }
367
368 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
369 assert(is_valid(cache_entry));
370 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
371 sequencer.writeCallback(address, cache_entry.DataBlk, false);
372 }
373
374 action(sx_store_hit, "sx", desc="External store completed.") {
375 peek(responseNetwork_in, ResponseMsg) {
376 assert(is_valid(cache_entry));
377 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
378 sequencer.writeCallback(address, cache_entry.DataBlk, true,
379 machineIDToMachineType(in_msg.Sender));
380 }
381 }
382
383 action(u_writeDataToCache, "u", desc="Write data to the cache") {
384 peek(responseNetwork_in, ResponseMsg) {
385 assert(is_valid(cache_entry));
386 cache_entry.DataBlk := in_msg.DataBlk;
387 }
388 }
389
390 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
391 if (send_evictions) {
392 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
393 sequencer.evictionCallback(address);
394 }
395 }
396
397 action(v_allocateTBE, "v", desc="Allocate TBE") {
398 TBEs.allocate(address);
399 set_tbe(TBEs[address]);
400 }
401
402 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
403 TBEs.deallocate(address);
404 unset_tbe();
405 }
406
407 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
408 assert(is_valid(cache_entry));
409 assert(is_valid(tbe));
410 tbe.DataBlk := cache_entry.DataBlk;
411 }
412
413 action(z_stall, "z", desc="stall") {
414 // do nothing
415 }
416
417 // TRANSITIONS
418
419 transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
420 z_stall;
421 }
422
423 transition({IS, IM}, {Fwd_GETX, Inv}) {
424 z_stall;
425 }
426
427 transition(MI, Inv) {
428 o_popForwardedRequestQueue;
429 }
430
431 transition(M, Store) {
432 s_store_hit;
433 p_profileHit;
434 m_popMandatoryQueue;
435 }
436
437 transition(M, {Load, Ifetch}) {
438 r_load_hit;
439 p_profileHit;
440 m_popMandatoryQueue;
441 }
442
443 transition(I, Inv) {
444 o_popForwardedRequestQueue;
445 }
446
447 transition(I, Store, IM) {
448 v_allocateTBE;
449 i_allocateL1CacheBlock;
450 a_issueRequest;
451 p_profileMiss;
452 m_popMandatoryQueue;
453 }
454
455 transition(I, {Load, Ifetch}, IS) {
456 v_allocateTBE;
457 i_allocateL1CacheBlock;
458 a_issueRequest;
459 p_profileMiss;
460 m_popMandatoryQueue;
461 }
462
463 transition(IS, Data, M) {
464 u_writeDataToCache;
465 rx_load_hit;
466 w_deallocateTBE;
467 n_popResponseQueue;
468 }
469
470 transition(IM, Data, M) {
471 u_writeDataToCache;
472 sx_store_hit;
473 w_deallocateTBE;
474 n_popResponseQueue;
475 }
476
477 transition(M, Fwd_GETX, I) {
478 e_sendData;
479 forward_eviction_to_cpu;
480 o_popForwardedRequestQueue;
481 }
482
483 transition(I, Replacement) {
484 h_deallocateL1CacheBlock;
485 }
486
487 transition(M, {Replacement,Inv}, MI) {
488 v_allocateTBE;
489 b_issuePUT;
490 x_copyDataFromCacheToTBE;
491 forward_eviction_to_cpu;
492 h_deallocateL1CacheBlock;
493 }
494
495 transition(MI, Writeback_Ack, I) {
496 w_deallocateTBE;
497 o_popForwardedRequestQueue;
498 }
499
500 transition(MI, Fwd_GETX, II) {
501 ee_sendDataFromTBE;
502 o_popForwardedRequestQueue;
503 }
504
505 transition(MI, Writeback_Nack, MII) {
506 o_popForwardedRequestQueue;
507 }
508
509 transition(MII, Fwd_GETX, I) {
510 ee_sendDataFromTBE;
511 w_deallocateTBE;
512 o_popForwardedRequestQueue;
513 }
514
515 transition(II, Writeback_Nack, I) {
516 w_deallocateTBE;
517 o_popForwardedRequestQueue;
518 }
519 }