ruby: add stats to .sm files, remove cache profiler
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(L1Cache, "MI Example L1 Cache")
31 : Sequencer * sequencer,
32 CacheMemory * cacheMemory,
33 Cycles cache_response_latency = 12,
34 Cycles issue_latency = 2,
35 bool send_evictions
36 {
37
38 // NETWORK BUFFERS
39 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
40 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
41
42 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
43 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
44
45 // STATES
46 state_declaration(State, desc="Cache states") {
47 I, AccessPermission:Invalid, desc="Not Present/Invalid";
48 II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
49 M, AccessPermission:Read_Write, desc="Modified";
50 MI, AccessPermission:Busy, desc="Modified, issued PUT";
51 MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
52
53 IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
54 IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
55 }
56
57 // EVENTS
58 enumeration(Event, desc="Cache events") {
59 // From processor
60
61 Load, desc="Load request from processor";
62 Ifetch, desc="Ifetch request from processor";
63 Store, desc="Store request from processor";
64
65 Data, desc="Data from network";
66 Fwd_GETX, desc="Forward from network";
67
68 Inv, desc="Invalidate request from dir";
69
70 Replacement, desc="Replace a block";
71 Writeback_Ack, desc="Ack from the directory for a writeback";
72 Writeback_Nack, desc="Nack from the directory for a writeback";
73 }
74
75 // STRUCTURE DEFINITIONS
76
77 MessageBuffer mandatoryQueue, ordered="false";
78
79 // CacheEntry
80 structure(Entry, desc="...", interface="AbstractCacheEntry") {
81 State CacheState, desc="cache state";
82 bool Dirty, desc="Is the data dirty (different than memory)?";
83 DataBlock DataBlk, desc="Data in the block";
84 }
85
86 // TBE fields
87 structure(TBE, desc="...") {
88 State TBEState, desc="Transient state";
89 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
90 }
91
92 structure(TBETable, external="yes") {
93 TBE lookup(Address);
94 void allocate(Address);
95 void deallocate(Address);
96 bool isPresent(Address);
97 }
98
99
100 // STRUCTURES
101 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
102
103 // PROTOTYPES
104 void set_cache_entry(AbstractCacheEntry a);
105 void unset_cache_entry();
106 void set_tbe(TBE b);
107 void unset_tbe();
108
109 Entry getCacheEntry(Address address), return_by_pointer="yes" {
110 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
111 }
112
113 // FUNCTIONS
114 Event mandatory_request_type_to_event(RubyRequestType type) {
115 if (type == RubyRequestType:LD) {
116 return Event:Load;
117 } else if (type == RubyRequestType:IFETCH) {
118 return Event:Ifetch;
119 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
120 return Event:Store;
121 } else {
122 error("Invalid RubyRequestType");
123 }
124 }
125
126 State getState(TBE tbe, Entry cache_entry, Address addr) {
127
128 if (is_valid(tbe)) {
129 return tbe.TBEState;
130 }
131 else if (is_valid(cache_entry)) {
132 return cache_entry.CacheState;
133 }
134 else {
135 return State:I;
136 }
137 }
138
139 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
140
141 if (is_valid(tbe)) {
142 tbe.TBEState := state;
143 }
144
145 if (is_valid(cache_entry)) {
146 cache_entry.CacheState := state;
147 }
148 }
149
150 AccessPermission getAccessPermission(Address addr) {
151 TBE tbe := TBEs[addr];
152 if(is_valid(tbe)) {
153 return L1Cache_State_to_permission(tbe.TBEState);
154 }
155
156 Entry cache_entry := getCacheEntry(addr);
157 if(is_valid(cache_entry)) {
158 return L1Cache_State_to_permission(cache_entry.CacheState);
159 }
160
161 return AccessPermission:NotPresent;
162 }
163
164 void setAccessPermission(Entry cache_entry, Address addr, State state) {
165 if (is_valid(cache_entry)) {
166 cache_entry.changePermission(L1Cache_State_to_permission(state));
167 }
168 }
169
170 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
171 TBE tbe := TBEs[addr];
172 if(is_valid(tbe)) {
173 return tbe.DataBlk;
174 }
175
176 return getCacheEntry(addr).DataBlk;
177 }
178
179 GenericMachineType getNondirectHitMachType(MachineID sender) {
180 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
181 //
182 // NOTE direct local hits should not call this
183 //
184 return GenericMachineType:L1Cache_wCC;
185 } else {
186 return ConvertMachToGenericMach(machineIDToMachineType(sender));
187 }
188 }
189
190
191 // NETWORK PORTS
192
193 out_port(requestNetwork_out, RequestMsg, requestFromCache);
194 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
195
196 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
197 if (forwardRequestNetwork_in.isReady()) {
198 peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
199
200 Entry cache_entry := getCacheEntry(in_msg.Address);
201 TBE tbe := TBEs[in_msg.Address];
202
203 if (in_msg.Type == CoherenceRequestType:GETX) {
204 trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
205 }
206 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
207 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
208 }
209 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
210 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
211 }
212 else if (in_msg.Type == CoherenceRequestType:INV) {
213 trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
214 }
215 else {
216 error("Unexpected message");
217 }
218 }
219 }
220 }
221
222 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
223 if (responseNetwork_in.isReady()) {
224 peek(responseNetwork_in, ResponseMsg, block_on="Address") {
225
226 Entry cache_entry := getCacheEntry(in_msg.Address);
227 TBE tbe := TBEs[in_msg.Address];
228
229 if (in_msg.Type == CoherenceResponseType:DATA) {
230 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
231 }
232 else {
233 error("Unexpected message");
234 }
235 }
236 }
237 }
238
239 // Mandatory Queue
240 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
241 if (mandatoryQueue_in.isReady()) {
242 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
243
244 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
245 if (is_invalid(cache_entry) &&
246 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
247 // make room for the block
248 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
249 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
250 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
251 }
252 else {
253 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
254 cache_entry, TBEs[in_msg.LineAddress]);
255 }
256 }
257 }
258 }
259
260 // ACTIONS
261
262 action(a_issueRequest, "a", desc="Issue a request") {
263 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
264 out_msg.Address := address;
265 out_msg.Type := CoherenceRequestType:GETX;
266 out_msg.Requestor := machineID;
267 out_msg.Destination.add(map_Address_to_Directory(address));
268 out_msg.MessageSize := MessageSizeType:Control;
269 }
270 }
271
272 action(b_issuePUT, "b", desc="Issue a PUT request") {
273 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
274 assert(is_valid(cache_entry));
275 out_msg.Address := address;
276 out_msg.Type := CoherenceRequestType:PUTX;
277 out_msg.Requestor := machineID;
278 out_msg.Destination.add(map_Address_to_Directory(address));
279 out_msg.DataBlk := cache_entry.DataBlk;
280 out_msg.MessageSize := MessageSizeType:Data;
281 }
282 }
283
284 action(e_sendData, "e", desc="Send data from cache to requestor") {
285 peek(forwardRequestNetwork_in, RequestMsg) {
286 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
287 assert(is_valid(cache_entry));
288 out_msg.Address := address;
289 out_msg.Type := CoherenceResponseType:DATA;
290 out_msg.Sender := machineID;
291 out_msg.Destination.add(in_msg.Requestor);
292 out_msg.DataBlk := cache_entry.DataBlk;
293 out_msg.MessageSize := MessageSizeType:Response_Data;
294 }
295 }
296 }
297
298 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
299 peek(forwardRequestNetwork_in, RequestMsg) {
300 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
301 assert(is_valid(tbe));
302 out_msg.Address := address;
303 out_msg.Type := CoherenceResponseType:DATA;
304 out_msg.Sender := machineID;
305 out_msg.Destination.add(in_msg.Requestor);
306 out_msg.DataBlk := tbe.DataBlk;
307 out_msg.MessageSize := MessageSizeType:Response_Data;
308 }
309 }
310 }
311
312 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
313 if (is_valid(cache_entry)) {
314 } else {
315 set_cache_entry(cacheMemory.allocate(address, new Entry));
316 }
317 }
318
319 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
320 if (is_valid(cache_entry)) {
321 cacheMemory.deallocate(address);
322 unset_cache_entry();
323 }
324 }
325
326 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
327 mandatoryQueue_in.dequeue();
328 }
329
330 action(n_popResponseQueue, "n", desc="Pop the response queue") {
331 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
332 }
333
334 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
335 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
336 }
337
338 action(p_profileMiss, "pi", desc="Profile cache miss") {
339 ++cacheMemory.demand_misses;
340 }
341
342 action(p_profileHit, "ph", desc="Profile cache miss") {
343 ++cacheMemory.demand_hits;
344 }
345
346 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
347 assert(is_valid(cache_entry));
348 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
349 sequencer.readCallback(address,
350 GenericMachineType:L1Cache,
351 cache_entry.DataBlk);
352 }
353
354 action(rx_load_hit, "rx", desc="External load completed.") {
355 peek(responseNetwork_in, ResponseMsg) {
356 assert(is_valid(cache_entry));
357 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
358 sequencer.readCallback(address,
359 getNondirectHitMachType(in_msg.Sender),
360 cache_entry.DataBlk);
361 }
362 }
363
364 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
365 assert(is_valid(cache_entry));
366 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
367 sequencer.writeCallback(address,
368 GenericMachineType:L1Cache,
369 cache_entry.DataBlk);
370 }
371
372 action(sx_store_hit, "sx", desc="External store completed.") {
373 peek(responseNetwork_in, ResponseMsg) {
374 assert(is_valid(cache_entry));
375 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
376 sequencer.writeCallback(address,
377 getNondirectHitMachType(in_msg.Sender),
378 cache_entry.DataBlk);
379 }
380 }
381
382 action(u_writeDataToCache, "u", desc="Write data to the cache") {
383 peek(responseNetwork_in, ResponseMsg) {
384 assert(is_valid(cache_entry));
385 cache_entry.DataBlk := in_msg.DataBlk;
386 }
387 }
388
389 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
390 if (send_evictions) {
391 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
392 sequencer.evictionCallback(address);
393 }
394 }
395
396 action(v_allocateTBE, "v", desc="Allocate TBE") {
397 TBEs.allocate(address);
398 set_tbe(TBEs[address]);
399 }
400
401 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
402 TBEs.deallocate(address);
403 unset_tbe();
404 }
405
406 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
407 assert(is_valid(cache_entry));
408 assert(is_valid(tbe));
409 tbe.DataBlk := cache_entry.DataBlk;
410 }
411
412 action(z_stall, "z", desc="stall") {
413 // do nothing
414 }
415
416 // TRANSITIONS
417
418 transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
419 z_stall;
420 }
421
422 transition({IS, IM}, {Fwd_GETX, Inv}) {
423 z_stall;
424 }
425
426 transition(MI, Inv) {
427 o_popForwardedRequestQueue;
428 }
429
430 transition(M, Store) {
431 s_store_hit;
432 p_profileHit;
433 m_popMandatoryQueue;
434 }
435
436 transition(M, {Load, Ifetch}) {
437 r_load_hit;
438 p_profileHit;
439 m_popMandatoryQueue;
440 }
441
442 transition(I, Inv) {
443 o_popForwardedRequestQueue;
444 }
445
446 transition(I, Store, IM) {
447 v_allocateTBE;
448 i_allocateL1CacheBlock;
449 a_issueRequest;
450 p_profileMiss;
451 m_popMandatoryQueue;
452 }
453
454 transition(I, {Load, Ifetch}, IS) {
455 v_allocateTBE;
456 i_allocateL1CacheBlock;
457 a_issueRequest;
458 p_profileMiss;
459 m_popMandatoryQueue;
460 }
461
462 transition(IS, Data, M) {
463 u_writeDataToCache;
464 rx_load_hit;
465 w_deallocateTBE;
466 n_popResponseQueue;
467 }
468
469 transition(IM, Data, M) {
470 u_writeDataToCache;
471 sx_store_hit;
472 w_deallocateTBE;
473 n_popResponseQueue;
474 }
475
476 transition(M, Fwd_GETX, I) {
477 e_sendData;
478 forward_eviction_to_cpu;
479 o_popForwardedRequestQueue;
480 }
481
482 transition(I, Replacement) {
483 h_deallocateL1CacheBlock;
484 }
485
486 transition(M, {Replacement,Inv}, MI) {
487 v_allocateTBE;
488 b_issuePUT;
489 x_copyDataFromCacheToTBE;
490 forward_eviction_to_cpu;
491 h_deallocateL1CacheBlock;
492 }
493
494 transition(MI, Writeback_Ack, I) {
495 w_deallocateTBE;
496 o_popForwardedRequestQueue;
497 }
498
499 transition(MI, Fwd_GETX, II) {
500 ee_sendDataFromTBE;
501 o_popForwardedRequestQueue;
502 }
503
504 transition(MI, Writeback_Nack, MII) {
505 o_popForwardedRequestQueue;
506 }
507
508 transition(MII, Fwd_GETX, I) {
509 ee_sendDataFromTBE;
510 w_deallocateTBE;
511 o_popForwardedRequestQueue;
512 }
513
514 transition(II, Writeback_Nack, I) {
515 w_deallocateTBE;
516 o_popForwardedRequestQueue;
517 }
518 }