mem: Fix guest corruption when caches handle uncacheable accesses
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(L1Cache, "MI Example L1 Cache")
31 : Sequencer * sequencer,
32 CacheMemory * cacheMemory,
33 int cache_response_latency = 12,
34 int issue_latency = 2,
35 bool send_evictions
36 {
37
38 // NETWORK BUFFERS
39 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
40 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
41
42 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
43 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
44
45 // STATES
46 state_declaration(State, desc="Cache states") {
47 I, AccessPermission:Invalid, desc="Not Present/Invalid";
48 II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
49 M, AccessPermission:Read_Write, desc="Modified";
50 MI, AccessPermission:Busy, desc="Modified, issued PUT";
51 MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
52
53 IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
54 IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
55 }
56
57 // EVENTS
58 enumeration(Event, desc="Cache events") {
59 // From processor
60
61 Load, desc="Load request from processor";
62 Ifetch, desc="Ifetch request from processor";
63 Store, desc="Store request from processor";
64
65 Data, desc="Data from network";
66 Fwd_GETX, desc="Forward from network";
67
68 Inv, desc="Invalidate request from dir";
69
70 Replacement, desc="Replace a block";
71 Writeback_Ack, desc="Ack from the directory for a writeback";
72 Writeback_Nack, desc="Nack from the directory for a writeback";
73 }
74
75 // STRUCTURE DEFINITIONS
76
77 MessageBuffer mandatoryQueue, ordered="false";
78
79 // CacheEntry
80 structure(Entry, desc="...", interface="AbstractCacheEntry") {
81 State CacheState, desc="cache state";
82 bool Dirty, desc="Is the data dirty (different than memory)?";
83 DataBlock DataBlk, desc="Data in the block";
84 }
85
86 // TBE fields
87 structure(TBE, desc="...") {
88 State TBEState, desc="Transient state";
89 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
90 }
91
92 structure(TBETable, external="yes") {
93 TBE lookup(Address);
94 void allocate(Address);
95 void deallocate(Address);
96 bool isPresent(Address);
97 }
98
99
100 // STRUCTURES
101 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
102
103 // PROTOTYPES
104 void set_cache_entry(AbstractCacheEntry a);
105 void unset_cache_entry();
106 void set_tbe(TBE b);
107 void unset_tbe();
108
109 Entry getCacheEntry(Address address), return_by_pointer="yes" {
110 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
111 }
112
113 // FUNCTIONS
114 Event mandatory_request_type_to_event(RubyRequestType type) {
115 if (type == RubyRequestType:LD) {
116 return Event:Load;
117 } else if (type == RubyRequestType:IFETCH) {
118 return Event:Ifetch;
119 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
120 return Event:Store;
121 } else {
122 error("Invalid RubyRequestType");
123 }
124 }
125
126 State getState(TBE tbe, Entry cache_entry, Address addr) {
127
128 if (is_valid(tbe)) {
129 return tbe.TBEState;
130 }
131 else if (is_valid(cache_entry)) {
132 return cache_entry.CacheState;
133 }
134 else {
135 return State:I;
136 }
137 }
138
139 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
140
141 if (is_valid(tbe)) {
142 tbe.TBEState := state;
143 }
144
145 if (is_valid(cache_entry)) {
146 cache_entry.CacheState := state;
147 }
148 }
149
150 AccessPermission getAccessPermission(Address addr) {
151 TBE tbe := TBEs[addr];
152 if(is_valid(tbe)) {
153 return L1Cache_State_to_permission(tbe.TBEState);
154 }
155
156 Entry cache_entry := getCacheEntry(addr);
157 if(is_valid(cache_entry)) {
158 return L1Cache_State_to_permission(cache_entry.CacheState);
159 }
160
161 return AccessPermission:NotPresent;
162 }
163
164 void setAccessPermission(Entry cache_entry, Address addr, State state) {
165 if (is_valid(cache_entry)) {
166 cache_entry.changePermission(L1Cache_State_to_permission(state));
167 }
168 }
169
170 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
171 TBE tbe := TBEs[addr];
172 if(is_valid(tbe)) {
173 return tbe.DataBlk;
174 }
175
176 return getCacheEntry(addr).DataBlk;
177 }
178
179 GenericMachineType getNondirectHitMachType(MachineID sender) {
180 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
181 //
182 // NOTE direct local hits should not call this
183 //
184 return GenericMachineType:L1Cache_wCC;
185 } else {
186 return ConvertMachToGenericMach(machineIDToMachineType(sender));
187 }
188 }
189
190
191 // NETWORK PORTS
192
193 out_port(requestNetwork_out, RequestMsg, requestFromCache);
194 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
195
196 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
197 if (forwardRequestNetwork_in.isReady()) {
198 peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
199
200 Entry cache_entry := getCacheEntry(in_msg.Address);
201 TBE tbe := TBEs[in_msg.Address];
202
203 if (in_msg.Type == CoherenceRequestType:GETX) {
204 trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
205 }
206 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
207 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
208 }
209 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
210 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
211 }
212 else if (in_msg.Type == CoherenceRequestType:INV) {
213 trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
214 }
215 else {
216 error("Unexpected message");
217 }
218 }
219 }
220 }
221
222 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
223 if (responseNetwork_in.isReady()) {
224 peek(responseNetwork_in, ResponseMsg, block_on="Address") {
225
226 Entry cache_entry := getCacheEntry(in_msg.Address);
227 TBE tbe := TBEs[in_msg.Address];
228
229 if (in_msg.Type == CoherenceResponseType:DATA) {
230 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
231 }
232 else {
233 error("Unexpected message");
234 }
235 }
236 }
237 }
238
239 // Mandatory Queue
240 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
241 if (mandatoryQueue_in.isReady()) {
242 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
243
244 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
245 if (is_invalid(cache_entry) &&
246 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
247 // make room for the block
248 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
249 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
250 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
251 }
252 else {
253 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
254 cache_entry, TBEs[in_msg.LineAddress]);
255 }
256 }
257 }
258 }
259
260 // ACTIONS
261
262 action(a_issueRequest, "a", desc="Issue a request") {
263 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
264 out_msg.Address := address;
265 out_msg.Type := CoherenceRequestType:GETX;
266 out_msg.Requestor := machineID;
267 out_msg.Destination.add(map_Address_to_Directory(address));
268 out_msg.MessageSize := MessageSizeType:Control;
269 }
270 }
271
272 action(b_issuePUT, "b", desc="Issue a PUT request") {
273 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
274 assert(is_valid(cache_entry));
275 out_msg.Address := address;
276 out_msg.Type := CoherenceRequestType:PUTX;
277 out_msg.Requestor := machineID;
278 out_msg.Destination.add(map_Address_to_Directory(address));
279 out_msg.DataBlk := cache_entry.DataBlk;
280 out_msg.MessageSize := MessageSizeType:Data;
281 }
282 }
283
284 action(e_sendData, "e", desc="Send data from cache to requestor") {
285 peek(forwardRequestNetwork_in, RequestMsg) {
286 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
287 assert(is_valid(cache_entry));
288 out_msg.Address := address;
289 out_msg.Type := CoherenceResponseType:DATA;
290 out_msg.Sender := machineID;
291 out_msg.Destination.add(in_msg.Requestor);
292 out_msg.DataBlk := cache_entry.DataBlk;
293 out_msg.MessageSize := MessageSizeType:Response_Data;
294 }
295 }
296 }
297
298 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
299 peek(forwardRequestNetwork_in, RequestMsg) {
300 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
301 assert(is_valid(tbe));
302 out_msg.Address := address;
303 out_msg.Type := CoherenceResponseType:DATA;
304 out_msg.Sender := machineID;
305 out_msg.Destination.add(in_msg.Requestor);
306 out_msg.DataBlk := tbe.DataBlk;
307 out_msg.MessageSize := MessageSizeType:Response_Data;
308 }
309 }
310 }
311
312 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
313 if (is_valid(cache_entry)) {
314 } else {
315 set_cache_entry(cacheMemory.allocate(address, new Entry));
316 }
317 }
318
319 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
320 if (is_valid(cache_entry)) {
321 cacheMemory.deallocate(address);
322 unset_cache_entry();
323 }
324 }
325
326 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
327 mandatoryQueue_in.dequeue();
328 }
329
330 action(n_popResponseQueue, "n", desc="Pop the response queue") {
331 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
332 }
333
334 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
335 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
336 }
337
338 action(p_profileMiss, "p", desc="Profile cache miss") {
339 peek(mandatoryQueue_in, RubyRequest) {
340 cacheMemory.profileMiss(in_msg);
341 }
342 }
343
344 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
345 assert(is_valid(cache_entry));
346 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
347 sequencer.readCallback(address,
348 GenericMachineType:L1Cache,
349 cache_entry.DataBlk);
350 }
351
352 action(rx_load_hit, "rx", desc="External load completed.") {
353 peek(responseNetwork_in, ResponseMsg) {
354 assert(is_valid(cache_entry));
355 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
356 sequencer.readCallback(address,
357 getNondirectHitMachType(in_msg.Sender),
358 cache_entry.DataBlk);
359 }
360 }
361
362 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
363 assert(is_valid(cache_entry));
364 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
365 sequencer.writeCallback(address,
366 GenericMachineType:L1Cache,
367 cache_entry.DataBlk);
368 }
369
370 action(sx_store_hit, "sx", desc="External store completed.") {
371 peek(responseNetwork_in, ResponseMsg) {
372 assert(is_valid(cache_entry));
373 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
374 sequencer.writeCallback(address,
375 getNondirectHitMachType(in_msg.Sender),
376 cache_entry.DataBlk);
377 }
378 }
379
380 action(u_writeDataToCache, "u", desc="Write data to the cache") {
381 peek(responseNetwork_in, ResponseMsg) {
382 assert(is_valid(cache_entry));
383 cache_entry.DataBlk := in_msg.DataBlk;
384 }
385 }
386
387 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
388 if (send_evictions) {
389 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
390 sequencer.evictionCallback(address);
391 }
392 }
393
394 action(v_allocateTBE, "v", desc="Allocate TBE") {
395 TBEs.allocate(address);
396 set_tbe(TBEs[address]);
397 }
398
399 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
400 TBEs.deallocate(address);
401 unset_tbe();
402 }
403
404 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
405 assert(is_valid(cache_entry));
406 assert(is_valid(tbe));
407 tbe.DataBlk := cache_entry.DataBlk;
408 }
409
410 action(z_stall, "z", desc="stall") {
411 // do nothing
412 }
413
414 // TRANSITIONS
415
416 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
417 z_stall;
418 }
419
420 transition({IS, IM}, {Fwd_GETX, Inv}) {
421 z_stall;
422 }
423
424 transition(MI, Inv) {
425 o_popForwardedRequestQueue;
426 }
427
428 transition(M, Store) {
429 s_store_hit;
430 m_popMandatoryQueue;
431 }
432
433 transition(M, {Load, Ifetch}) {
434 r_load_hit;
435 m_popMandatoryQueue;
436 }
437
438 transition(I, Inv) {
439 o_popForwardedRequestQueue;
440 }
441
442 transition(I, Store, IM) {
443 v_allocateTBE;
444 i_allocateL1CacheBlock;
445 a_issueRequest;
446 p_profileMiss;
447 m_popMandatoryQueue;
448 }
449
450 transition(I, {Load, Ifetch}, IS) {
451 v_allocateTBE;
452 i_allocateL1CacheBlock;
453 a_issueRequest;
454 p_profileMiss;
455 m_popMandatoryQueue;
456 }
457
458 transition(IS, Data, M) {
459 u_writeDataToCache;
460 rx_load_hit;
461 w_deallocateTBE;
462 n_popResponseQueue;
463 }
464
465 transition(IM, Data, M) {
466 u_writeDataToCache;
467 sx_store_hit;
468 w_deallocateTBE;
469 n_popResponseQueue;
470 }
471
472 transition(M, Fwd_GETX, I) {
473 e_sendData;
474 forward_eviction_to_cpu;
475 o_popForwardedRequestQueue;
476 }
477
478 transition(I, Replacement) {
479 h_deallocateL1CacheBlock;
480 }
481
482 transition(M, {Replacement,Inv}, MI) {
483 v_allocateTBE;
484 b_issuePUT;
485 x_copyDataFromCacheToTBE;
486 forward_eviction_to_cpu;
487 h_deallocateL1CacheBlock;
488 }
489
490 transition(MI, Writeback_Ack, I) {
491 w_deallocateTBE;
492 o_popForwardedRequestQueue;
493 }
494
495 transition(MI, Fwd_GETX, II) {
496 ee_sendDataFromTBE;
497 o_popForwardedRequestQueue;
498 }
499
500 transition(MI, Writeback_Nack, MII) {
501 o_popForwardedRequestQueue;
502 }
503
504 transition(MII, Fwd_GETX, I) {
505 ee_sendDataFromTBE;
506 w_deallocateTBE;
507 o_popForwardedRequestQueue;
508 }
509
510 transition(II, Writeback_Nack, I) {
511 w_deallocateTBE;
512 o_popForwardedRequestQueue;
513 }
514 }