ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MESI_SCMP_bankdirectory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MSI_MOSI_CMP_directory-L1cache.sm 1.10 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
32 *
33 */
34
35
36 machine(L1Cache, "MSI Directory L1 Cache CMP") {
37
38 // NODE L1 CACHE
39 // From this node's L1 cache TO the network
40 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
41 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
42 // a local L1 -> this L2 bank
43 MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
44 MessageBuffer unblockFromL1Cache, network="To", virtual_network="4", ordered="false";
45
46
47 // To this node's L1 cache FROM the network
48 // a L2 bank -> this L1
49 MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
50 // a L2 bank -> this L1
51 MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
52
53 // STATES
54 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
55 // Base states
56 NP, desc="Not present in either cache";
57 I, desc="a L1 cache entry Idle";
58 S, desc="a L1 cache entry Shared";
59 E, desc="a L1 cache entry Exclusive";
60 M, desc="a L1 cache entry Modified", format="!b";
61
62 // Transient States
63 IS, desc="L1 idle, issued GETS, have not seen response yet";
64 IM, desc="L1 idle, issued GETX, have not seen response yet";
65 SM, desc="L1 idle, issued GETX, have not seen response yet";
66 IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
67
68 M_I, desc="L1 replacing, waiting for ACK";
69 E_I, desc="L1 replacing, waiting for ACK";
70
71 }
72
73 // EVENTS
74 enumeration(Event, desc="Cache events") {
75 // L1 events
76 Load, desc="Load request from the home processor";
77 Ifetch, desc="I-fetch request from the home processor";
78 Store, desc="Store request from the home processor";
79
80 Inv, desc="Invalidate request from L2 bank";
81
82 // internal generated request
83 L1_Replacement, desc="L1 Replacement", format="!r";
84
85 // other requests
86 Fwd_GETX, desc="GETX from other processor";
87 Fwd_GETS, desc="GETS from other processor";
88 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
89
90 Data, desc="Data for processor";
91 Data_Exclusive, desc="Data for processor";
92 DataS_fromL1, desc="data for GETS request, need to unblock directory";
93 Data_all_Acks, desc="Data for processor, all acks";
94
95 Ack, desc="Ack for processor";
96 Ack_all, desc="Last ack for processor";
97
98 WB_Ack, desc="Ack for replacement";
99 }
100
101 // TYPES
102
103 // CacheEntry
104 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
105 State CacheState, desc="cache state";
106 DataBlock DataBlk, desc="data for the block";
107 bool Dirty, default="false", desc="data is dirty";
108 }
109
110 // TBE fields
111 structure(TBE, desc="...") {
112 Address Address, desc="Physical address for this TBE";
113 State TBEState, desc="Transient state";
114 DataBlock DataBlk, desc="Buffer for the data block";
115 bool Dirty, default="false", desc="data is dirty";
116 bool isPrefetch, desc="Set if this was caused by a prefetch";
117 int pendingAcks, default="0", desc="number of pending acks";
118 }
119
120 external_type(CacheMemory) {
121 bool cacheAvail(Address);
122 Address cacheProbe(Address);
123 void allocate(Address);
124 void deallocate(Address);
125 Entry lookup(Address);
126 void changePermission(Address, AccessPermission);
127 bool isTagPresent(Address);
128 }
129
130 external_type(TBETable) {
131 TBE lookup(Address);
132 void allocate(Address);
133 void deallocate(Address);
134 bool isPresent(Address);
135 }
136
137 TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
138
139 CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
140 CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
141
142 MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
143
144 Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
145
146 int cache_state_to_int(State state);
147
148 // inclusive cache returns L1 entries only
149 Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
150 if (L1DcacheMemory.isTagPresent(addr)) {
151 return L1DcacheMemory[addr];
152 } else {
153 return L1IcacheMemory[addr];
154 }
155 }
156
157 void changeL1Permission(Address addr, AccessPermission permission) {
158 if (L1DcacheMemory.isTagPresent(addr)) {
159 return L1DcacheMemory.changePermission(addr, permission);
160 } else if(L1IcacheMemory.isTagPresent(addr)) {
161 return L1IcacheMemory.changePermission(addr, permission);
162 } else {
163 error("cannot change permission, L1 block not present");
164 }
165 }
166
167 bool isL1CacheTagPresent(Address addr) {
168 return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
169 }
170
171 State getState(Address addr) {
172 if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
173 DEBUG_EXPR(id);
174 DEBUG_EXPR(addr);
175 }
176 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
177
178 if(L1_TBEs.isPresent(addr)) {
179 return L1_TBEs[addr].TBEState;
180 } else if (isL1CacheTagPresent(addr)) {
181 return getL1CacheEntry(addr).CacheState;
182 }
183 return State:NP;
184 }
185
186
187 void setState(Address addr, State state) {
188 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
189
190 // MUST CHANGE
191 if(L1_TBEs.isPresent(addr)) {
192 L1_TBEs[addr].TBEState := state;
193 }
194
195 if (isL1CacheTagPresent(addr)) {
196 getL1CacheEntry(addr).CacheState := state;
197
198 // Set permission
199 if (state == State:I) {
200 changeL1Permission(addr, AccessPermission:Invalid);
201 } else if (state == State:S || state == State:E) {
202 changeL1Permission(addr, AccessPermission:Read_Only);
203 } else if (state == State:M) {
204 changeL1Permission(addr, AccessPermission:Read_Write);
205 } else {
206 changeL1Permission(addr, AccessPermission:Busy);
207 }
208 }
209 }
210
211 Event mandatory_request_type_to_event(CacheRequestType type) {
212 if (type == CacheRequestType:LD) {
213 return Event:Load;
214 } else if (type == CacheRequestType:IFETCH) {
215 return Event:Ifetch;
216 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
217 return Event:Store;
218 } else {
219 error("Invalid CacheRequestType");
220 }
221 }
222
223 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
224 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
225 return GenericMachineType:L1Cache_wCC; // NOTE direct L1 hits should not call this
226 } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
227 return GenericMachineType:L2Cache;
228 } else {
229 return ConvertMachToGenericMach(machineIDToMachineType(sender));
230 }
231 }
232
233
234
235 out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
236 out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
237 out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
238
239 // Response IntraChip L1 Network - response msg to this L1 cache
240 in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
241 if (responseIntraChipL1Network_in.isReady()) {
242 peek(responseIntraChipL1Network_in, ResponseMsg) {
243 assert(in_msg.Destination.isElement(machineID));
244 if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
245 trigger(Event:Data_Exclusive, in_msg.Address);
246 } else if(in_msg.Type == CoherenceResponseType:DATA) {
247 if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
248 machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
249
250 trigger(Event:DataS_fromL1, in_msg.Address);
251
252 } else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
253 trigger(Event:Data_all_Acks, in_msg.Address);
254 } else {
255 trigger(Event:Data, in_msg.Address);
256 }
257 } else if (in_msg.Type == CoherenceResponseType:ACK) {
258 if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
259 trigger(Event:Ack_all, in_msg.Address);
260 } else {
261 trigger(Event:Ack, in_msg.Address);
262 }
263 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
264 trigger(Event:WB_Ack, in_msg.Address);
265 } else {
266 error("Invalid L1 response type");
267 }
268 }
269 }
270 }
271
272 // Request InterChip network - request from this L1 cache to the shared L2
273 in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
274 if(requestIntraChipL1Network_in.isReady()) {
275 peek(requestIntraChipL1Network_in, RequestMsg) {
276 assert(in_msg.Destination.isElement(machineID));
277 if (in_msg.Type == CoherenceRequestType:INV) {
278 trigger(Event:Inv, in_msg.Address);
279 } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
280 // upgrade transforms to GETX due to race
281 trigger(Event:Fwd_GETX, in_msg.Address);
282 } else if (in_msg.Type == CoherenceRequestType:GETS) {
283 trigger(Event:Fwd_GETS, in_msg.Address);
284 } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
285 trigger(Event:Fwd_GET_INSTR, in_msg.Address);
286 } else {
287 error("Invalid forwarded request type");
288 }
289 }
290 }
291 }
292
293 // Mandatory Queue betweens Node's CPU and it's L1 caches
294 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
295 if (mandatoryQueue_in.isReady()) {
296 peek(mandatoryQueue_in, CacheMsg) {
297
298 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
299
300 if (in_msg.Type == CacheRequestType:IFETCH) {
301 // ** INSTRUCTION ACCESS ***
302
303 // Check to see if it is in the OTHER L1
304 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
305 // The block is in the wrong L1, put the request on the queue to the shared L2
306 trigger(Event:L1_Replacement, in_msg.Address);
307 }
308 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
309 // The tag matches for the L1, so the L1 asks the L2 for it.
310 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
311 } else {
312 if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
313 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
314 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
315 } else {
316 // No room in the L1, so we need to make room in the L1
317 trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
318 }
319 }
320 } else {
321 // *** DATA ACCESS ***
322
323 // Check to see if it is in the OTHER L1
324 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
325 // The block is in the wrong L1, put the request on the queue to the shared L2
326 trigger(Event:L1_Replacement, in_msg.Address);
327 }
328 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
329 // The tag matches for the L1, so the L1 ask the L2 for it
330 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
331 } else {
332 if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
333 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
334 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
335 } else {
336 // No room in the L1, so we need to make room in the L1
337 trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
338 }
339 }
340 }
341 }
342 }
343 }
344
345 // ACTIONS
346 action(a_issueGETS, "a", desc="Issue GETS") {
347 peek(mandatoryQueue_in, CacheMsg) {
348 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
349 out_msg.Address := address;
350 out_msg.Type := CoherenceRequestType:GETS;
351 out_msg.Requestor := machineID;
352 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
353 DEBUG_EXPR(address);
354 DEBUG_EXPR(out_msg.Destination);
355 out_msg.MessageSize := MessageSizeType:Control;
356 out_msg.Prefetch := in_msg.Prefetch;
357 out_msg.AccessMode := in_msg.AccessMode;
358 }
359 }
360 }
361
362 action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
363 peek(mandatoryQueue_in, CacheMsg) {
364 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
365 out_msg.Address := address;
366 out_msg.Type := CoherenceRequestType:GET_INSTR;
367 out_msg.Requestor := machineID;
368 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
369 DEBUG_EXPR(address);
370 DEBUG_EXPR(out_msg.Destination);
371 out_msg.MessageSize := MessageSizeType:Control;
372 out_msg.Prefetch := in_msg.Prefetch;
373 out_msg.AccessMode := in_msg.AccessMode;
374 }
375 }
376 }
377
378
379 action(b_issueGETX, "b", desc="Issue GETX") {
380 peek(mandatoryQueue_in, CacheMsg) {
381 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
382 out_msg.Address := address;
383 out_msg.Type := CoherenceRequestType:GETX;
384 out_msg.Requestor := machineID;
385 DEBUG_EXPR(machineID);
386 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
387 DEBUG_EXPR(address);
388 DEBUG_EXPR(out_msg.Destination);
389 out_msg.MessageSize := MessageSizeType:Control;
390 out_msg.Prefetch := in_msg.Prefetch;
391 out_msg.AccessMode := in_msg.AccessMode;
392 }
393 }
394 }
395
396 action(c_issueUPGRADE, "c", desc="Issue GETX") {
397 peek(mandatoryQueue_in, CacheMsg) {
398 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
399 out_msg.Address := address;
400 out_msg.Type := CoherenceRequestType:UPGRADE;
401 out_msg.Requestor := machineID;
402 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
403 DEBUG_EXPR(address);
404 DEBUG_EXPR(out_msg.Destination);
405 out_msg.MessageSize := MessageSizeType:Control;
406 out_msg.Prefetch := in_msg.Prefetch;
407 out_msg.AccessMode := in_msg.AccessMode;
408 }
409 }
410 }
411
412 action(d_sendDataToRequestor, "d", desc="send data to requestor") {
413 peek(requestIntraChipL1Network_in, RequestMsg) {
414 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
415 out_msg.Address := address;
416 out_msg.Type := CoherenceResponseType:DATA;
417 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
418 out_msg.Dirty := getL1CacheEntry(address).Dirty;
419 out_msg.Sender := machineID;
420 out_msg.Destination.add(in_msg.Requestor);
421 out_msg.MessageSize := MessageSizeType:Response_Data;
422 }
423 }
424 }
425
426 action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
427 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
428 out_msg.Address := address;
429 out_msg.Type := CoherenceResponseType:DATA;
430 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
431 out_msg.Dirty := getL1CacheEntry(address).Dirty;
432 out_msg.Sender := machineID;
433 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
434 out_msg.MessageSize := MessageSizeType:Response_Data;
435 }
436 }
437
438 action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
439 peek(requestIntraChipL1Network_in, RequestMsg) {
440 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
441 out_msg.Address := address;
442 out_msg.Type := CoherenceResponseType:DATA;
443 out_msg.DataBlk := L1_TBEs[address].DataBlk;
444 out_msg.Dirty := L1_TBEs[address].Dirty;
445 out_msg.Sender := machineID;
446 out_msg.Destination.add(in_msg.Requestor);
447 out_msg.MessageSize := MessageSizeType:Response_Data;
448 }
449 }
450 }
451
452 action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
453 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
454 out_msg.Address := address;
455 out_msg.Type := CoherenceResponseType:DATA;
456 out_msg.DataBlk := L1_TBEs[address].DataBlk;
457 out_msg.Dirty := L1_TBEs[address].Dirty;
458 out_msg.Sender := machineID;
459 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
460 out_msg.MessageSize := MessageSizeType:Response_Data;
461 }
462 }
463
464 action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
465 peek(requestIntraChipL1Network_in, RequestMsg) {
466 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
467 out_msg.Address := address;
468 out_msg.Type := CoherenceResponseType:ACK;
469 out_msg.Sender := machineID;
470 out_msg.Destination.add(in_msg.Requestor);
471 out_msg.MessageSize := MessageSizeType:Response_Control;
472 }
473 }
474 }
475
476 action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
477 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
478 out_msg.Address := address;
479 out_msg.Type := CoherenceResponseType:DATA;
480 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
481 out_msg.Dirty := getL1CacheEntry(address).Dirty;
482 out_msg.Sender := machineID;
483 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
484 out_msg.MessageSize := MessageSizeType:Writeback_Data;
485 }
486 }
487
488 action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
489 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
490 out_msg.Address := address;
491 out_msg.Type := CoherenceResponseType:DATA;
492 out_msg.DataBlk := L1_TBEs[address].DataBlk;
493 out_msg.Dirty := L1_TBEs[address].Dirty;
494 out_msg.Sender := machineID;
495 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
496 out_msg.MessageSize := MessageSizeType:Writeback_Data;
497 }
498 }
499
500 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
501 peek(requestIntraChipL1Network_in, RequestMsg) {
502 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
503 out_msg.Address := address;
504 out_msg.Type := CoherenceResponseType:ACK;
505 out_msg.Sender := machineID;
506 out_msg.Destination.add(in_msg.Requestor);
507 out_msg.MessageSize := MessageSizeType:Response_Control;
508 out_msg.AckCount := 1;
509 }
510 }
511 }
512
513
514 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
515 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
516 out_msg.Address := address;
517 out_msg.Type := CoherenceRequestType:PUTX;
518 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
519 out_msg.Dirty := getL1CacheEntry(address).Dirty;
520 out_msg.Requestor:= machineID;
521 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
522 if (getL1CacheEntry(address).Dirty) {
523 out_msg.MessageSize := MessageSizeType:Writeback_Data;
524 } else {
525 out_msg.MessageSize := MessageSizeType:Writeback_Control;
526 }
527 }
528 }
529
530 action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
531 enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
532 out_msg.Address := address;
533 out_msg.Type := CoherenceResponseType:UNBLOCK;
534 out_msg.Sender := machineID;
535 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
536 out_msg.MessageSize := MessageSizeType:Response_Control;
537 }
538 }
539
540 action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
541 enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
542 out_msg.Address := address;
543 out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
544 out_msg.Sender := machineID;
545 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
546 out_msg.MessageSize := MessageSizeType:Response_Control;
547 }
548 }
549
550
551
552 action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
553 DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
554 sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
555 }
556
557 action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
558 peek(responseIntraChipL1Network_in, ResponseMsg) {
559 sequencer.readCallback(address, getL1CacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
560 }
561 }
562
563
564 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
565 sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
566 getL1CacheEntry(address).Dirty := true;
567 }
568
569 action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
570 peek(responseIntraChipL1Network_in, ResponseMsg) {
571 sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
572 }
573 getL1CacheEntry(address).Dirty := true;
574 }
575
576
577 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
578 check_allocate(L1_TBEs);
579 L1_TBEs.allocate(address);
580 L1_TBEs[address].isPrefetch := false;
581 L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
582 L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
583 }
584
585 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
586 mandatoryQueue_in.dequeue();
587 }
588
589 action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
590 profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
591 }
592
593 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
594 profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
595 }
596
597 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
598 L1_TBEs.deallocate(address);
599 }
600
601 action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
602 peek(responseIntraChipL1Network_in, ResponseMsg) {
603 getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
604 getL1CacheEntry(address).Dirty := in_msg.Dirty;
605 }
606 }
607
608 action(q_updateAckCount, "q", desc="Update ack count") {
609 peek(responseIntraChipL1Network_in, ResponseMsg) {
610 L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
611 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
612 APPEND_TRANSITION_COMMENT(" p: ");
613 APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
614 }
615 }
616
617 action(z_stall, "z", desc="Stall") {
618 }
619
620 action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
621 if (L1DcacheMemory.isTagPresent(address)) {
622 L1DcacheMemory.deallocate(address);
623 } else {
624 L1IcacheMemory.deallocate(address);
625 }
626 }
627
628 action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
629 if (L1DcacheMemory.isTagPresent(address) == false) {
630 L1DcacheMemory.allocate(address);
631 }
632 }
633
634 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
635 if (L1IcacheMemory.isTagPresent(address) == false) {
636 L1IcacheMemory.allocate(address);
637 }
638 }
639
640 action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
641 requestIntraChipL1Network_in.recycle();
642 }
643
644 action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
645 mandatoryQueue_in.recycle();
646 }
647
648
649 //*****************************************************
650 // TRANSITIONS
651 //*****************************************************
652
653 // Transitions for Load/Store/Replacement/WriteBack from transient states
654 transition({IS, IM, IS_I, M_I, E_I, SM}, {Load, Ifetch, Store, L1_Replacement}) {
655 z_recycleMandatoryQueue;
656 }
657
658 // Transitions from Idle
659 transition({NP,I}, L1_Replacement) {
660 ff_deallocateL1CacheBlock;
661 }
662
663 transition({NP,I}, Load, IS) {
664 oo_allocateL1DCacheBlock;
665 i_allocateTBE;
666 a_issueGETS;
667 k_popMandatoryQueue;
668 }
669
670 transition({NP,I}, Ifetch, IS) {
671 pp_allocateL1ICacheBlock;
672 i_allocateTBE;
673 ai_issueGETINSTR;
674 k_popMandatoryQueue;
675 }
676
677 transition({NP,I}, Store, IM) {
678 oo_allocateL1DCacheBlock;
679 i_allocateTBE;
680 b_issueGETX;
681 k_popMandatoryQueue;
682 }
683
684 transition({NP, I}, Inv) {
685 fi_sendInvAck;
686 l_popRequestQueue;
687 }
688
689 // Transitions from Shared
690 transition(S, {Load,Ifetch}) {
691 h_load_hit;
692 k_popMandatoryQueue;
693 }
694
695 transition(S, Store, SM) {
696 i_allocateTBE;
697 c_issueUPGRADE;
698 k_popMandatoryQueue;
699 }
700
701 transition(S, L1_Replacement, I) {
702 ff_deallocateL1CacheBlock;
703 }
704
705 transition(S, Inv, I) {
706 fi_sendInvAck;
707 l_popRequestQueue;
708 }
709
710 // Transitions from Exclusive
711
712 transition(E, {Load, Ifetch}) {
713 h_load_hit;
714 k_popMandatoryQueue;
715 }
716
717 transition(E, Store, M) {
718 hh_store_hit;
719 k_popMandatoryQueue;
720 }
721
722 transition(E, L1_Replacement, M_I) {
723 // silent E replacement??
724 i_allocateTBE;
725 g_issuePUTX; // send data, but hold in case forwarded request
726 ff_deallocateL1CacheBlock;
727 }
728
729 transition(E, Inv, I) {
730 // don't send data
731 fi_sendInvAck;
732 l_popRequestQueue;
733 }
734
735 transition(E, Fwd_GETX, I) {
736 d_sendDataToRequestor;
737 l_popRequestQueue;
738 }
739
740 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
741 d_sendDataToRequestor;
742 d2_sendDataToL2;
743 l_popRequestQueue;
744 }
745
746 // Transitions from Modified
747 transition(M, {Load, Ifetch}) {
748 h_load_hit;
749 k_popMandatoryQueue;
750 }
751
752 transition(M, Store) {
753 hh_store_hit;
754 k_popMandatoryQueue;
755 }
756
757 transition(M, L1_Replacement, M_I) {
758 i_allocateTBE;
759 g_issuePUTX; // send data, but hold in case forwarded request
760 ff_deallocateL1CacheBlock;
761 }
762
763 transition(M_I, WB_Ack, I) {
764 s_deallocateTBE;
765 o_popIncomingResponseQueue;
766 }
767
768 transition(M, Inv, I) {
769 f_sendDataToL2;
770 l_popRequestQueue;
771 }
772
773 transition(M_I, Inv, I) {
774 ft_sendDataToL2_fromTBE;
775 s_deallocateTBE;
776 l_popRequestQueue;
777 }
778
779 transition(M, Fwd_GETX, I) {
780 d_sendDataToRequestor;
781 l_popRequestQueue;
782 }
783
784 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
785 d_sendDataToRequestor;
786 d2_sendDataToL2;
787 l_popRequestQueue;
788 }
789
790 transition(M_I, Fwd_GETX, I) {
791 dt_sendDataToRequestor_fromTBE;
792 s_deallocateTBE;
793 l_popRequestQueue;
794 }
795
796 transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
797 dt_sendDataToRequestor_fromTBE;
798 d2t_sendDataToL2_fromTBE;
799 s_deallocateTBE;
800 l_popRequestQueue;
801 }
802
803 // Transitions from IS
804 transition({IS, IS_I}, Inv, IS_I) {
805 fi_sendInvAck;
806 l_popRequestQueue;
807 }
808
809 transition(IS, Data_all_Acks, S) {
810 u_writeDataToL1Cache;
811 x_external_load_hit;
812 s_deallocateTBE;
813 j_sendUnblock;
814 o_popIncomingResponseQueue;
815 }
816
817 transition(IS_I, Data_all_Acks, I) {
818 u_writeDataToL1Cache;
819 x_external_load_hit;
820 s_deallocateTBE;
821 j_sendUnblock;
822 o_popIncomingResponseQueue;
823 }
824
825
826 transition(IS, DataS_fromL1, S) {
827 u_writeDataToL1Cache;
828 j_sendUnblock;
829 x_external_load_hit;
830 s_deallocateTBE;
831 o_popIncomingResponseQueue;
832 }
833
834 transition(IS_I, DataS_fromL1, I) {
835 u_writeDataToL1Cache;
836 j_sendUnblock;
837 x_external_load_hit;
838 s_deallocateTBE;
839 o_popIncomingResponseQueue;
840 }
841
842 // directory is blocked when sending exclusive data
843 transition(IS_I, Data_Exclusive, E) {
844 u_writeDataToL1Cache;
845 x_external_load_hit;
846 jj_sendExclusiveUnblock;
847 s_deallocateTBE;
848 o_popIncomingResponseQueue;
849 }
850
851 transition(IS, Data_Exclusive, E) {
852 u_writeDataToL1Cache;
853 x_external_load_hit;
854 jj_sendExclusiveUnblock;
855 s_deallocateTBE;
856 o_popIncomingResponseQueue;
857 }
858
859 // Transitions from IM
860 transition({IM, SM}, Inv, IM) {
861 fi_sendInvAck;
862 l_popRequestQueue;
863 }
864
865 transition(IM, Data, SM) {
866 u_writeDataToL1Cache;
867 q_updateAckCount;
868 o_popIncomingResponseQueue;
869 }
870
871 transition(IM, Data_all_Acks, M) {
872 u_writeDataToL1Cache;
873 xx_external_store_hit;
874 jj_sendExclusiveUnblock;
875 s_deallocateTBE;
876 o_popIncomingResponseQueue;
877 }
878
879 // transitions from SM
880 transition({SM, IM}, Ack) {
881 q_updateAckCount;
882 o_popIncomingResponseQueue;
883 }
884
885 transition(SM, Ack_all, M) {
886 jj_sendExclusiveUnblock;
887 xx_external_store_hit;
888 s_deallocateTBE;
889 o_popIncomingResponseQueue;
890 }
891 }
892
893
894