This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MESI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(L1Cache, "MSI Directory L1 Cache CMP")
31 : Sequencer * sequencer,
32 CacheMemory * L1IcacheMemory,
33 CacheMemory * L1DcacheMemory,
34 int l2_select_num_bits,
35 int l1_request_latency = 2,
36 int l1_response_latency = 2,
37 int to_l2_latency = 1
38 {
39
40
41 // NODE L1 CACHE
42 // From this node's L1 cache TO the network
43 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
44 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
45 // a local L1 -> this L2 bank
46 MessageBuffer responseFromL1Cache, network="To", virtual_network="1", ordered="false";
47 MessageBuffer unblockFromL1Cache, network="To", virtual_network="2", ordered="false";
48
49
50 // To this node's L1 cache FROM the network
51 // a L2 bank -> this L1
52 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false";
53 // a L2 bank -> this L1
54 MessageBuffer responseToL1Cache, network="From", virtual_network="1", ordered="false";
55
56 // STATES
57 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 NP, desc="Not present in either cache";
60 I, desc="a L1 cache entry Idle";
61 S, desc="a L1 cache entry Shared";
62 E, desc="a L1 cache entry Exclusive";
63 M, desc="a L1 cache entry Modified", format="!b";
64
65 // Transient States
66 IS, desc="L1 idle, issued GETS, have not seen response yet";
67 IM, desc="L1 idle, issued GETX, have not seen response yet";
68 SM, desc="L1 idle, issued GETX, have not seen response yet";
69 IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
70
71 M_I, desc="L1 replacing, waiting for ACK";
72 E_I, desc="L1 replacing, waiting for ACK";
73
74 }
75
76 // EVENTS
77 enumeration(Event, desc="Cache events") {
78 // L1 events
79 Load, desc="Load request from the home processor";
80 Ifetch, desc="I-fetch request from the home processor";
81 Store, desc="Store request from the home processor";
82
83 Inv, desc="Invalidate request from L2 bank";
84
85 // internal generated request
86 L1_Replacement, desc="L1 Replacement", format="!r";
87
88 // other requests
89 Fwd_GETX, desc="GETX from other processor";
90 Fwd_GETS, desc="GETS from other processor";
91 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
92
93 Data, desc="Data for processor";
94 Data_Exclusive, desc="Data for processor";
95 DataS_fromL1, desc="data for GETS request, need to unblock directory";
96 Data_all_Acks, desc="Data for processor, all acks";
97
98 Ack, desc="Ack for processor";
99 Ack_all, desc="Last ack for processor";
100
101 WB_Ack, desc="Ack for replacement";
102 }
103
104 // TYPES
105
106 // CacheEntry
107 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
108 State CacheState, desc="cache state";
109 DataBlock DataBlk, desc="data for the block";
110 bool Dirty, default="false", desc="data is dirty";
111 }
112
113 // TBE fields
114 structure(TBE, desc="...") {
115 Address Address, desc="Physical address for this TBE";
116 State TBEState, desc="Transient state";
117 DataBlock DataBlk, desc="Buffer for the data block";
118 bool Dirty, default="false", desc="data is dirty";
119 bool isPrefetch, desc="Set if this was caused by a prefetch";
120 int pendingAcks, default="0", desc="number of pending acks";
121 }
122
123 external_type(TBETable) {
124 TBE lookup(Address);
125 void allocate(Address);
126 void deallocate(Address);
127 bool isPresent(Address);
128 }
129
130 TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
131
132 MessageBuffer mandatoryQueue, ordered="false";
133
134 int cache_state_to_int(State state);
135 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
136
137 // inclusive cache returns L1 entries only
138 Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
139 if (L1DcacheMemory.isTagPresent(addr)) {
140 return static_cast(Entry, L1DcacheMemory[addr]);
141 } else {
142 return static_cast(Entry, L1IcacheMemory[addr]);
143 }
144 }
145
146 void changeL1Permission(Address addr, AccessPermission permission) {
147 if (L1DcacheMemory.isTagPresent(addr)) {
148 return L1DcacheMemory.changePermission(addr, permission);
149 } else if(L1IcacheMemory.isTagPresent(addr)) {
150 return L1IcacheMemory.changePermission(addr, permission);
151 } else {
152 error("cannot change permission, L1 block not present");
153 }
154 }
155
156 bool isL1CacheTagPresent(Address addr) {
157 return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
158 }
159
160 State getState(Address addr) {
161 // if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
162 // DEBUG_EXPR(id);
163 // DEBUG_EXPR(addr);
164 // }
165 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
166
167 if(L1_TBEs.isPresent(addr)) {
168 return L1_TBEs[addr].TBEState;
169 } else if (isL1CacheTagPresent(addr)) {
170 return getL1CacheEntry(addr).CacheState;
171 }
172 return State:NP;
173 }
174
175
176 void setState(Address addr, State state) {
177 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
178
179 // MUST CHANGE
180 if(L1_TBEs.isPresent(addr)) {
181 L1_TBEs[addr].TBEState := state;
182 }
183
184 if (isL1CacheTagPresent(addr)) {
185 getL1CacheEntry(addr).CacheState := state;
186
187 // Set permission
188 if (state == State:I) {
189 changeL1Permission(addr, AccessPermission:Invalid);
190 } else if (state == State:S || state == State:E) {
191 changeL1Permission(addr, AccessPermission:Read_Only);
192 } else if (state == State:M) {
193 changeL1Permission(addr, AccessPermission:Read_Write);
194 } else {
195 changeL1Permission(addr, AccessPermission:Busy);
196 }
197 }
198 }
199
200 Event mandatory_request_type_to_event(CacheRequestType type) {
201 if (type == CacheRequestType:LD) {
202 return Event:Load;
203 } else if (type == CacheRequestType:IFETCH) {
204 return Event:Ifetch;
205 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
206 return Event:Store;
207 } else {
208 error("Invalid CacheRequestType");
209 }
210 }
211
212
213 out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
214 out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
215 out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
216
217 // Response IntraChip L1 Network - response msg to this L1 cache
218 in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
219 if (responseIntraChipL1Network_in.isReady()) {
220 peek(responseIntraChipL1Network_in, ResponseMsg, block_on="Address") {
221 assert(in_msg.Destination.isElement(machineID));
222 if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
223 trigger(Event:Data_Exclusive, in_msg.Address);
224 } else if(in_msg.Type == CoherenceResponseType:DATA) {
225 if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
226 machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
227
228 trigger(Event:DataS_fromL1, in_msg.Address);
229
230 } else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
231 trigger(Event:Data_all_Acks, in_msg.Address);
232 } else {
233 trigger(Event:Data, in_msg.Address);
234 }
235 } else if (in_msg.Type == CoherenceResponseType:ACK) {
236 if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
237 trigger(Event:Ack_all, in_msg.Address);
238 } else {
239 trigger(Event:Ack, in_msg.Address);
240 }
241 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
242 trigger(Event:WB_Ack, in_msg.Address);
243 } else {
244 error("Invalid L1 response type");
245 }
246 }
247 }
248 }
249
250 // Request InterChip network - request from this L1 cache to the shared L2
251 in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
252 if(requestIntraChipL1Network_in.isReady()) {
253 peek(requestIntraChipL1Network_in, RequestMsg, block_on="Address") {
254 assert(in_msg.Destination.isElement(machineID));
255 if (in_msg.Type == CoherenceRequestType:INV) {
256 trigger(Event:Inv, in_msg.Address);
257 } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
258 // upgrade transforms to GETX due to race
259 trigger(Event:Fwd_GETX, in_msg.Address);
260 } else if (in_msg.Type == CoherenceRequestType:GETS) {
261 trigger(Event:Fwd_GETS, in_msg.Address);
262 } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
263 trigger(Event:Fwd_GET_INSTR, in_msg.Address);
264 } else {
265 error("Invalid forwarded request type");
266 }
267 }
268 }
269 }
270
271 // Mandatory Queue betweens Node's CPU and it's L1 caches
272 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
273 if (mandatoryQueue_in.isReady()) {
274 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
275
276 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
277
278 if (in_msg.Type == CacheRequestType:IFETCH) {
279 // ** INSTRUCTION ACCESS ***
280
281 // Check to see if it is in the OTHER L1
282 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
283 // The block is in the wrong L1, put the request on the queue to the shared L2
284 trigger(Event:L1_Replacement, in_msg.LineAddress);
285 }
286 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
287 // The tag matches for the L1, so the L1 asks the L2 for it.
288 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
289 } else {
290 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
291 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
292 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
293 } else {
294 // No room in the L1, so we need to make room in the L1
295 trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
296 }
297 }
298 } else {
299 // *** DATA ACCESS ***
300
301 // Check to see if it is in the OTHER L1
302 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
303 // The block is in the wrong L1, put the request on the queue to the shared L2
304 trigger(Event:L1_Replacement, in_msg.LineAddress);
305 }
306 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
307 // The tag matches for the L1, so the L1 ask the L2 for it
308 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
309 } else {
310 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
311 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
312 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
313 } else {
314 // No room in the L1, so we need to make room in the L1
315 trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
316 }
317 }
318 }
319 }
320 }
321 }
322
323 // ACTIONS
324 action(a_issueGETS, "a", desc="Issue GETS") {
325 peek(mandatoryQueue_in, CacheMsg) {
326 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
327 out_msg.Address := address;
328 out_msg.Type := CoherenceRequestType:GETS;
329 out_msg.Requestor := machineID;
330 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
331 l2_select_low_bit, l2_select_num_bits));
332 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
333 address, out_msg.Destination);
334 out_msg.MessageSize := MessageSizeType:Control;
335 out_msg.Prefetch := in_msg.Prefetch;
336 out_msg.AccessMode := in_msg.AccessMode;
337 }
338 }
339 }
340
341 action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
342 peek(mandatoryQueue_in, CacheMsg) {
343 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
344 out_msg.Address := address;
345 out_msg.Type := CoherenceRequestType:GET_INSTR;
346 out_msg.Requestor := machineID;
347 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
348 l2_select_low_bit, l2_select_num_bits));
349 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
350 address, out_msg.Destination);
351 out_msg.MessageSize := MessageSizeType:Control;
352 out_msg.Prefetch := in_msg.Prefetch;
353 out_msg.AccessMode := in_msg.AccessMode;
354 }
355 }
356 }
357
358
359 action(b_issueGETX, "b", desc="Issue GETX") {
360 peek(mandatoryQueue_in, CacheMsg) {
361 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
362 out_msg.Address := address;
363 out_msg.Type := CoherenceRequestType:GETX;
364 out_msg.Requestor := machineID;
365 DPRINTF(RubySlicc, "%s\n", machineID);
366 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
367 l2_select_low_bit, l2_select_num_bits));
368 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
369 address, out_msg.Destination);
370 out_msg.MessageSize := MessageSizeType:Control;
371 out_msg.Prefetch := in_msg.Prefetch;
372 out_msg.AccessMode := in_msg.AccessMode;
373 }
374 }
375 }
376
377 action(c_issueUPGRADE, "c", desc="Issue GETX") {
378 peek(mandatoryQueue_in, CacheMsg) {
379 enqueue(requestIntraChipL1Network_out, RequestMsg, latency= l1_request_latency) {
380 out_msg.Address := address;
381 out_msg.Type := CoherenceRequestType:UPGRADE;
382 out_msg.Requestor := machineID;
383 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
384 l2_select_low_bit, l2_select_num_bits));
385 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
386 address, out_msg.Destination);
387 out_msg.MessageSize := MessageSizeType:Control;
388 out_msg.Prefetch := in_msg.Prefetch;
389 out_msg.AccessMode := in_msg.AccessMode;
390 }
391 }
392 }
393
394 action(d_sendDataToRequestor, "d", desc="send data to requestor") {
395 peek(requestIntraChipL1Network_in, RequestMsg) {
396 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
397 out_msg.Address := address;
398 out_msg.Type := CoherenceResponseType:DATA;
399 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
400 out_msg.Dirty := getL1CacheEntry(address).Dirty;
401 out_msg.Sender := machineID;
402 out_msg.Destination.add(in_msg.Requestor);
403 out_msg.MessageSize := MessageSizeType:Response_Data;
404 }
405 }
406 }
407
408 action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
409 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
410 out_msg.Address := address;
411 out_msg.Type := CoherenceResponseType:DATA;
412 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
413 out_msg.Dirty := getL1CacheEntry(address).Dirty;
414 out_msg.Sender := machineID;
415 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
416 l2_select_low_bit, l2_select_num_bits));
417 out_msg.MessageSize := MessageSizeType:Response_Data;
418 }
419 }
420
421 action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
422 peek(requestIntraChipL1Network_in, RequestMsg) {
423 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
424 out_msg.Address := address;
425 out_msg.Type := CoherenceResponseType:DATA;
426 out_msg.DataBlk := L1_TBEs[address].DataBlk;
427 out_msg.Dirty := L1_TBEs[address].Dirty;
428 out_msg.Sender := machineID;
429 out_msg.Destination.add(in_msg.Requestor);
430 out_msg.MessageSize := MessageSizeType:Response_Data;
431 }
432 }
433 }
434
435 action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
436 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
437 out_msg.Address := address;
438 out_msg.Type := CoherenceResponseType:DATA;
439 out_msg.DataBlk := L1_TBEs[address].DataBlk;
440 out_msg.Dirty := L1_TBEs[address].Dirty;
441 out_msg.Sender := machineID;
442 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
443 l2_select_low_bit, l2_select_num_bits));
444 out_msg.MessageSize := MessageSizeType:Response_Data;
445 }
446 }
447
448 action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
449 peek(requestIntraChipL1Network_in, RequestMsg) {
450 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
451 out_msg.Address := address;
452 out_msg.Type := CoherenceResponseType:ACK;
453 out_msg.Sender := machineID;
454 out_msg.Destination.add(in_msg.Requestor);
455 out_msg.MessageSize := MessageSizeType:Response_Control;
456 }
457 }
458 }
459
460 action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
461 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
462 out_msg.Address := address;
463 out_msg.Type := CoherenceResponseType:DATA;
464 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
465 out_msg.Dirty := getL1CacheEntry(address).Dirty;
466 out_msg.Sender := machineID;
467 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
468 l2_select_low_bit, l2_select_num_bits));
469 out_msg.MessageSize := MessageSizeType:Writeback_Data;
470 }
471 }
472
473 action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
474 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
475 out_msg.Address := address;
476 out_msg.Type := CoherenceResponseType:DATA;
477 out_msg.DataBlk := L1_TBEs[address].DataBlk;
478 out_msg.Dirty := L1_TBEs[address].Dirty;
479 out_msg.Sender := machineID;
480 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
481 l2_select_low_bit, l2_select_num_bits));
482 out_msg.MessageSize := MessageSizeType:Writeback_Data;
483 }
484 }
485
486 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
487 peek(requestIntraChipL1Network_in, RequestMsg) {
488 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
489 out_msg.Address := address;
490 out_msg.Type := CoherenceResponseType:ACK;
491 out_msg.Sender := machineID;
492 out_msg.Destination.add(in_msg.Requestor);
493 out_msg.MessageSize := MessageSizeType:Response_Control;
494 out_msg.AckCount := 1;
495 }
496 }
497 }
498
499
500 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
501 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_response_latency) {
502 out_msg.Address := address;
503 out_msg.Type := CoherenceRequestType:PUTX;
504 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
505 out_msg.Dirty := getL1CacheEntry(address).Dirty;
506 out_msg.Requestor:= machineID;
507 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
508 l2_select_low_bit, l2_select_num_bits));
509 if (getL1CacheEntry(address).Dirty) {
510 out_msg.MessageSize := MessageSizeType:Writeback_Data;
511 } else {
512 out_msg.MessageSize := MessageSizeType:Writeback_Control;
513 }
514 }
515 }
516
517 action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
518 enqueue(unblockNetwork_out, ResponseMsg, latency=to_l2_latency) {
519 out_msg.Address := address;
520 out_msg.Type := CoherenceResponseType:UNBLOCK;
521 out_msg.Sender := machineID;
522 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
523 l2_select_low_bit, l2_select_num_bits));
524 out_msg.MessageSize := MessageSizeType:Response_Control;
525 DPRINTF(RubySlicc, "%s\n", address);
526
527 }
528 }
529
530 action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
531 enqueue(unblockNetwork_out, ResponseMsg, latency=to_l2_latency) {
532 out_msg.Address := address;
533 out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
534 out_msg.Sender := machineID;
535 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
536 l2_select_low_bit, l2_select_num_bits));
537 out_msg.MessageSize := MessageSizeType:Response_Control;
538 DPRINTF(RubySlicc, "%s\n", address);
539
540 }
541 }
542
543
544
545 action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
546 DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk);
547 sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
548 }
549
550 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
551 DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk);
552 sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
553 getL1CacheEntry(address).Dirty := true;
554 }
555
556 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
557 check_allocate(L1_TBEs);
558 L1_TBEs.allocate(address);
559 L1_TBEs[address].isPrefetch := false;
560 L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
561 L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
562 }
563
564 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
565 mandatoryQueue_in.dequeue();
566 }
567
568 action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
569 profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
570 }
571
572 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
573 profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
574 }
575
576 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
577 L1_TBEs.deallocate(address);
578 }
579
580 action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
581 peek(responseIntraChipL1Network_in, ResponseMsg) {
582 getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
583 getL1CacheEntry(address).Dirty := in_msg.Dirty;
584 }
585 }
586
587 action(q_updateAckCount, "q", desc="Update ack count") {
588 peek(responseIntraChipL1Network_in, ResponseMsg) {
589 L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
590 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
591 APPEND_TRANSITION_COMMENT(" p: ");
592 APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
593 }
594 }
595
596 action(z_stall, "z", desc="Stall") {
597 }
598
599 action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
600 if (L1DcacheMemory.isTagPresent(address)) {
601 L1DcacheMemory.deallocate(address);
602 } else {
603 L1IcacheMemory.deallocate(address);
604 }
605 }
606
607 action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
608 if (L1DcacheMemory.isTagPresent(address) == false) {
609 L1DcacheMemory.allocate(address, new Entry);
610 }
611 }
612
613 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
614 if (L1IcacheMemory.isTagPresent(address) == false) {
615 L1IcacheMemory.allocate(address, new Entry);
616 }
617 }
618
619 action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
620 requestIntraChipL1Network_in.recycle();
621 }
622
623 action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
624 mandatoryQueue_in.recycle();
625 }
626
627
628 //*****************************************************
629 // TRANSITIONS
630 //*****************************************************
631
632 // Transitions for Load/Store/Replacement/WriteBack from transient states
633 transition({IS, IM, IS_I, M_I, E_I, SM}, {Load, Ifetch, Store, L1_Replacement}) {
634 z_recycleMandatoryQueue;
635 }
636
637 // Transitions from Idle
638 transition({NP,I}, L1_Replacement) {
639 ff_deallocateL1CacheBlock;
640 }
641
642 transition({NP,I}, Load, IS) {
643 oo_allocateL1DCacheBlock;
644 i_allocateTBE;
645 a_issueGETS;
646 k_popMandatoryQueue;
647 }
648
649 transition({NP,I}, Ifetch, IS) {
650 pp_allocateL1ICacheBlock;
651 i_allocateTBE;
652 ai_issueGETINSTR;
653 k_popMandatoryQueue;
654 }
655
656 transition({NP,I}, Store, IM) {
657 oo_allocateL1DCacheBlock;
658 i_allocateTBE;
659 b_issueGETX;
660 k_popMandatoryQueue;
661 }
662
663 transition({NP, I}, Inv) {
664 fi_sendInvAck;
665 l_popRequestQueue;
666 }
667
668 // Transitions from Shared
669 transition(S, {Load,Ifetch}) {
670 h_load_hit;
671 k_popMandatoryQueue;
672 }
673
674 transition(S, Store, SM) {
675 i_allocateTBE;
676 c_issueUPGRADE;
677 k_popMandatoryQueue;
678 }
679
680 transition(S, L1_Replacement, I) {
681 ff_deallocateL1CacheBlock;
682 }
683
684 transition(S, Inv, I) {
685 fi_sendInvAck;
686 l_popRequestQueue;
687 }
688
689 // Transitions from Exclusive
690
691 transition(E, {Load, Ifetch}) {
692 h_load_hit;
693 k_popMandatoryQueue;
694 }
695
696 transition(E, Store, M) {
697 hh_store_hit;
698 k_popMandatoryQueue;
699 }
700
701 transition(E, L1_Replacement, M_I) {
702 // silent E replacement??
703 i_allocateTBE;
704 g_issuePUTX; // send data, but hold in case forwarded request
705 ff_deallocateL1CacheBlock;
706 }
707
708 transition(E, Inv, I) {
709 // don't send data
710 fi_sendInvAck;
711 l_popRequestQueue;
712 }
713
714 transition(E, Fwd_GETX, I) {
715 d_sendDataToRequestor;
716 l_popRequestQueue;
717 }
718
719 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
720 d_sendDataToRequestor;
721 d2_sendDataToL2;
722 l_popRequestQueue;
723 }
724
725 // Transitions from Modified
726 transition(M, {Load, Ifetch}) {
727 h_load_hit;
728 k_popMandatoryQueue;
729 }
730
731 transition(M, Store) {
732 hh_store_hit;
733 k_popMandatoryQueue;
734 }
735
736 transition(M, L1_Replacement, M_I) {
737 i_allocateTBE;
738 g_issuePUTX; // send data, but hold in case forwarded request
739 ff_deallocateL1CacheBlock;
740 }
741
742 transition(M_I, WB_Ack, I) {
743 s_deallocateTBE;
744 o_popIncomingResponseQueue;
745 }
746
747 transition(M, Inv, I) {
748 f_sendDataToL2;
749 l_popRequestQueue;
750 }
751
752 transition(M_I, Inv, I) {
753 ft_sendDataToL2_fromTBE;
754 s_deallocateTBE;
755 l_popRequestQueue;
756 }
757
758 transition(M, Fwd_GETX, I) {
759 d_sendDataToRequestor;
760 l_popRequestQueue;
761 }
762
763 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
764 d_sendDataToRequestor;
765 d2_sendDataToL2;
766 l_popRequestQueue;
767 }
768
769 transition(M_I, Fwd_GETX, I) {
770 dt_sendDataToRequestor_fromTBE;
771 s_deallocateTBE;
772 l_popRequestQueue;
773 }
774
775 transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
776 dt_sendDataToRequestor_fromTBE;
777 d2t_sendDataToL2_fromTBE;
778 s_deallocateTBE;
779 l_popRequestQueue;
780 }
781
782 // Transitions from IS
783 transition({IS, IS_I}, Inv, IS_I) {
784 fi_sendInvAck;
785 l_popRequestQueue;
786 }
787
788 transition(IS, Data_all_Acks, S) {
789 u_writeDataToL1Cache;
790 h_load_hit;
791 s_deallocateTBE;
792 o_popIncomingResponseQueue;
793 }
794
795 transition(IS_I, Data_all_Acks, I) {
796 u_writeDataToL1Cache;
797 h_load_hit;
798 s_deallocateTBE;
799 o_popIncomingResponseQueue;
800 }
801
802
803 transition(IS, DataS_fromL1, S) {
804 u_writeDataToL1Cache;
805 j_sendUnblock;
806 h_load_hit;
807 s_deallocateTBE;
808 o_popIncomingResponseQueue;
809 }
810
811 transition(IS_I, DataS_fromL1, I) {
812 u_writeDataToL1Cache;
813 j_sendUnblock;
814 h_load_hit;
815 s_deallocateTBE;
816 o_popIncomingResponseQueue;
817 }
818
819 // directory is blocked when sending exclusive data
820 transition(IS_I, Data_Exclusive, E) {
821 u_writeDataToL1Cache;
822 h_load_hit;
823 jj_sendExclusiveUnblock;
824 s_deallocateTBE;
825 o_popIncomingResponseQueue;
826 }
827
828 transition(IS, Data_Exclusive, E) {
829 u_writeDataToL1Cache;
830 h_load_hit;
831 jj_sendExclusiveUnblock;
832 s_deallocateTBE;
833 o_popIncomingResponseQueue;
834 }
835
836 // Transitions from IM
837 transition({IM, SM}, Inv, IM) {
838 fi_sendInvAck;
839 l_popRequestQueue;
840 }
841
842 transition(IM, Data, SM) {
843 u_writeDataToL1Cache;
844 q_updateAckCount;
845 o_popIncomingResponseQueue;
846 }
847
848 transition(IM, Data_all_Acks, M) {
849 u_writeDataToL1Cache;
850 hh_store_hit;
851 jj_sendExclusiveUnblock;
852 s_deallocateTBE;
853 o_popIncomingResponseQueue;
854 }
855
856 // transitions from SM
857 transition({SM, IM}, Ack) {
858 q_updateAckCount;
859 o_popIncomingResponseQueue;
860 }
861
862 transition(SM, Ack_all, M) {
863 jj_sendExclusiveUnblock;
864 hh_store_hit;
865 s_deallocateTBE;
866 o_popIncomingResponseQueue;
867 }
868 }
869
870
871