ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MSI_MOSI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35
36 machine(L1Cache, "MSI Directory L1 Cache CMP") {
37
38 // NODE L1 CACHE
39 // From this node's L1 cache TO the network
40 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
41 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="true";
42 MessageBuffer dummyFrom1, network="To", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
43 MessageBuffer dummyFrom2, network="To", virtual_network="2", ordered="false"; // dummy buffer that shouldn't be used
44 // a local L1 -> this L2 bank
45 MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
46 MessageBuffer dummyFrom4, network="To", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
47
48
49 // To this node's L1 cache FROM the network
50 MessageBuffer dummyTo0, network="From", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
51 MessageBuffer dummyTo1, network="From", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
52 // a L2 bank -> this L1
53 MessageBuffer requestToL1Cache, network="From", virtual_network="2", ordered="true";
54 // a L2 bank -> this L1
55 MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
56 MessageBuffer dummyTo4, network="From", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
57
58 // STATES
59 enumeration(State, desc="Cache states", default="L1Cache_State_L1_I") {
60 // Base states
61 NP, desc="Not present in either cache";
62 L1_I, desc="a L1 cache entry Idle";
63 L1_S, desc="a L1 cache entry Shared";
64 L1_M, desc="a L1 cache entry Modified", format="!b";
65
66 // Transient States
67 L1_IS, desc="L1 idle, issued GETS, have not seen response yet";
68 L1_ISI, desc="L1 idle, issued GETS, saw INV, still waiting for data";
69 L1_IM, desc="L1 idle, issued GETX, have not seen response yet";
70 L1_IMI, desc="L1 idle, issued GETX, saw INV, still waiting for data";
71 L1_IMS, desc="L1 idle, issued GETX, saw DownGrade, still waiting for data";
72 L1_IMSI, desc="L1 idle, issued GETX, saw DownGrade, saw INV, still waiting for data";
73
74 L1_SI, desc="issued PUTS, waiting for response";
75 L1_MI, desc="issued PUTX, waiting for response";
76 }
77
78 // EVENTS
79 enumeration(Event, desc="Cache events") {
80 // L1 events
81 Load, desc="Load request from the home processor";
82 Ifetch, desc="I-fetch request from the home processor";
83 Store, desc="Store request from the home processor";
84
85 // L1 is required to send response to the L2 immediately
86 L1_INV, "INV", desc="L1 Invalidation of M data", format="!r";
87 L1_INV_S, "INV", desc="L1 Invalidation of S data", format="!r";
88 L1_DownGrade, "Force DownGrade", desc="L2 cache forces an L1 cache in M to downgrade to S and writeback result";
89
90 // receiving of data
91 L1_Data, "Data", desc="Data in response to an L1 request, transistion to M or S depending on request";
92 L1_Data_S, "Data S", desc="Data in response to an L1 request, write data then transistion to S";
93 L1_Data_I, "Data I", desc="Data in response to an L1 request, write data then transistion to I";
94
95 // receiving of acks
96 L1_PutAck, "Put Ack", desc="PutS or PutX ack from L2";
97
98 // internal generated request
99 // L1 request to replace block, results in either a PUTS or PUTX request
100 L1_Replacement, desc="L1 Replacement", format="!r";
101 // Currently same as replacement, request initiated when block is in the wrong L1 cache
102 L1_WriteBack, desc="on-chip L1 cache must write back to shared L2";
103 }
104
105 // TYPES
106
107 // CacheEntry
108 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
109 State CacheState, desc="cache state";
110 DataBlock DataBlk, desc="data for the block";
111 }
112
113 // TBE fields
114 structure(TBE, desc="...") {
115 Address Address, desc="Physical address for this TBE";
116 State TBEState, desc="Transient state";
117 DataBlock DataBlk, desc="Buffer for the data block";
118 bool isPrefetch, desc="Set if this was caused by a prefetch";
119 }
120
121 external_type(CacheMemory) {
122 bool cacheAvail(Address);
123 Address cacheProbe(Address);
124 void allocate(Address);
125 void deallocate(Address);
126 Entry lookup(Address);
127 void changePermission(Address, AccessPermission);
128 bool isTagPresent(Address);
129 }
130
131 external_type(TBETable) {
132 TBE lookup(Address);
133 void allocate(Address);
134 void deallocate(Address);
135 bool isPresent(Address);
136 }
137
138 TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
139
140 CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
141 CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
142
143 MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
144 // the optionalQueue doesn't have to be ordered for correctness
145 // however inforcing order ensures the prefetches reach the L2 in order
146 MessageBuffer optionalQueue, ordered="true", rank="101", abstract_chip_ptr="true";
147
148 Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
149
150 int cache_state_to_int(State state);
151
152 // inclusive cache returns L1 entries only
153 Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
154 if (L1DcacheMemory.isTagPresent(addr)) {
155 return L1DcacheMemory[addr];
156 } else {
157 return L1IcacheMemory[addr];
158 }
159 }
160
161 void changeL1Permission(Address addr, AccessPermission permission) {
162 if (L1DcacheMemory.isTagPresent(addr)) {
163 return L1DcacheMemory.changePermission(addr, permission);
164 } else if(L1IcacheMemory.isTagPresent(addr)) {
165 return L1IcacheMemory.changePermission(addr, permission);
166 } else {
167 error("cannot change permission, L1 block not present");
168 }
169 }
170
171 bool isL1CacheTagPresent(Address addr) {
172 return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
173 }
174
175 State getState(Address addr) {
176 if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
177 DEBUG_EXPR(id);
178 DEBUG_EXPR(addr);
179 }
180 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
181
182 if(L1_TBEs.isPresent(addr)) {
183 return L1_TBEs[addr].TBEState;
184 } else if (isL1CacheTagPresent(addr)) {
185 return getL1CacheEntry(addr).CacheState;
186 }
187 return State:NP;
188 }
189
190 std::string getStateStr(Address addr) {
191 return L1Cache_State_to_string(getState(addr));
192 }
193
194 // when is this called?
195 void setState(Address addr, State state) {
196 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
197
198 // MUST CHANGE
199 if(L1_TBEs.isPresent(addr)) {
200 L1_TBEs[addr].TBEState := state;
201 }
202
203 if (isL1CacheTagPresent(addr)) {
204 getL1CacheEntry(addr).CacheState := state;
205
206 // Set permission
207 if (state == State:L1_I || state == State:L1_SI || state == State:L1_MI) {
208 changeL1Permission(addr, AccessPermission:Invalid);
209 } else if (state == State:L1_S) {
210 changeL1Permission(addr, AccessPermission:Read_Only);
211 } else if (state == State:L1_M) {
212 changeL1Permission(addr, AccessPermission:Read_Write);
213 } else {
214 changeL1Permission(addr, AccessPermission:Busy);
215 }
216 }
217 }
218
219 Event mandatory_request_type_to_event(CacheRequestType type) {
220 if (type == CacheRequestType:LD) {
221 return Event:Load;
222 } else if (type == CacheRequestType:IFETCH) {
223 return Event:Ifetch;
224 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
225 return Event:Store;
226 } else {
227 error("Invalid CacheRequestType");
228 }
229 }
230
231 // ** OUT_PORTS **
232 // All ports are to the same CMP network, queue id numbers determine IntraChip Switch location
233
234 out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
235 out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
236
237 // ** IN_PORTS **
238 in_port(dummyTo0_in, RequestMsg, dummyTo0) {
239 if (dummyTo0_in.isReady()) {
240 peek(dummyTo0_in, RequestMsg) {
241 DEBUG_EXPR(in_msg.Address);
242 DEBUG_EXPR(machineID);
243 DEBUG_EXPR(in_msg.Type);
244 DEBUG_EXPR(getState(in_msg.Address));
245 DEBUG_EXPR(in_msg.RequestorMachId);
246 }
247 error("dummyTo0 port should not be used");
248 }
249 }
250
251 in_port(dummyTo1_in, RequestMsg, dummyTo1) {
252 if (dummyTo1_in.isReady()) {
253 peek(dummyTo1_in, RequestMsg) {
254 DEBUG_EXPR(in_msg.Address);
255 DEBUG_EXPR(machineID);
256 DEBUG_EXPR(in_msg.Type);
257 DEBUG_EXPR(getState(in_msg.Address));
258 DEBUG_EXPR(in_msg.RequestorMachId);
259 }
260 error("dummyTo1 port should not be used");
261 }
262 }
263
264 in_port(dummyTo4_in, ResponseMsg, dummyTo4) {
265 if (dummyTo4_in.isReady()) {
266 peek(dummyTo4_in, ResponseMsg) {
267 DEBUG_EXPR(in_msg.Address);
268 DEBUG_EXPR(machineID);
269 DEBUG_EXPR(in_msg.Type);
270 DEBUG_EXPR(getState(in_msg.Address));
271 DEBUG_EXPR(in_msg.SenderMachId);
272 }
273 error("dummyTo4 port should not be used");
274 }
275 }
276
277 // Response IntraChip L1 Network - response msg to this L1 cache
278 in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
279 if (responseIntraChipL1Network_in.isReady()) {
280 peek(responseIntraChipL1Network_in, ResponseMsg) {
281 DEBUG_EXPR(in_msg.Address);
282 DEBUG_EXPR(in_msg.Destination);
283 DEBUG_EXPR(in_msg.SenderMachId);
284 DEBUG_EXPR(machineID);
285 assert(in_msg.Destination.isElement(machineID));
286 if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache) {
287 if(in_msg.Type == CoherenceResponseType:DATA) {
288 trigger(Event:L1_Data, in_msg.Address); // L1 now has data in its desired state
289 } else if(in_msg.Type == CoherenceResponseType:DATA_S) {
290 trigger(Event:L1_Data_S, in_msg.Address); // L1 now has data but must imediately move to S state
291 } else if(in_msg.Type == CoherenceResponseType:DATA_I) {
292 trigger(Event:L1_Data_I, in_msg.Address); // L1 now has data but must imediately move to INV state
293 } else if(in_msg.Type == CoherenceResponseType:ACK) {
294 trigger(Event:L1_PutAck, in_msg.Address);
295 } else {
296 error("Invalid L1 response type");
297 }
298 } else {
299 error("A non-L2 cache sent a response to a L1 cache");
300 }
301 }
302 }
303 }
304
305 // Request InterChip network - request from this L1 cache to the shared L2
306 in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
307 if(requestIntraChipL1Network_in.isReady()) {
308 peek(requestIntraChipL1Network_in, RequestMsg) {
309 assert(in_msg.Destination.isElement(machineID));
310 if(machineIDToMachineType(in_msg.RequestorMachId) == MachineType:L2Cache) {
311 if(in_msg.Type == CoherenceRequestType:L1_DG) {
312 trigger(Event:L1_DownGrade, in_msg.Address); // Force L1 to downgrade to S state
313 } else if (in_msg.Type == CoherenceRequestType:INV) {
314 trigger(Event:L1_INV, in_msg.Address); // L1 must invalidate it's modified version
315 } else if (in_msg.Type == CoherenceRequestType:INV_S) {
316 trigger(Event:L1_INV_S, in_msg.Address); // L1 must invalidate it's shared version
317 } else {
318 error("Invalid forwarded request type");
319 }
320 } else {
321 error("A non-L2 cache sent a request to a L1 cache");
322 }
323 }
324 }
325 }
326
327 // Mandatory Queue betweens Node's CPU and it's L1 caches
328 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
329 if (mandatoryQueue_in.isReady()) {
330 peek(mandatoryQueue_in, CacheMsg) {
331
332 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
333
334 if (in_msg.Type == CacheRequestType:IFETCH) {
335 // ** INSTRUCTION ACCESS ***
336
337 // Check to see if it is in the OTHER L1
338 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
339 // The block is in the wrong L1, put the request on the queue to the shared L2
340 trigger(Event:L1_WriteBack, in_msg.Address);
341 }
342 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
343 // The tag matches for the L1, so the L1 asks the L2 for it.
344 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
345 } else {
346 if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
347 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
348 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
349 } else {
350 // No room in the L1, so we need to make room in the L1
351 trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
352 }
353 }
354 } else {
355 // *** DATA ACCESS ***
356
357 // Check to see if it is in the OTHER L1
358 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
359 // The block is in the wrong L1, put the request on the queue to the shared L2
360 trigger(Event:L1_WriteBack, in_msg.Address);
361 }
362 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
363 // The tag matches for the L1, so the L1 ask the L2 for it
364 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
365 } else {
366 if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
367 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
368 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
369 } else {
370 // No room in the L1, so we need to make room in the L1
371 trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
372 }
373 }
374 }
375 }
376 }
377 }
378
379 // ACTIONS
380 action(a_issueGETS, "a", desc="Issue GETS") {
381 peek(mandatoryQueue_in, CacheMsg) {
382 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
383 out_msg.Address := address;
384 out_msg.Type := CoherenceRequestType:GETS;
385 out_msg.RequestorMachId := machineID;
386 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
387 DEBUG_EXPR(address);
388 DEBUG_EXPR(out_msg.Destination);
389 out_msg.MessageSize := MessageSizeType:Control;
390 out_msg.L1CacheStateStr := getStateStr(address);
391 out_msg.Prefetch := in_msg.Prefetch;
392 out_msg.AccessMode := in_msg.AccessMode;
393 }
394 }
395 }
396
397 action(b_issueGETX, "b", desc="Issue GETX") {
398 peek(mandatoryQueue_in, CacheMsg) {
399 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
400 out_msg.Address := address;
401 out_msg.Type := CoherenceRequestType:GETX;
402 out_msg.RequestorMachId := machineID;
403 DEBUG_EXPR(machineID);
404 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
405 DEBUG_EXPR(address);
406 DEBUG_EXPR(out_msg.Destination);
407 out_msg.MessageSize := MessageSizeType:Control;
408 out_msg.L1CacheStateStr := getStateStr(address);
409 out_msg.Prefetch := in_msg.Prefetch;
410 out_msg.AccessMode := in_msg.AccessMode;
411 }
412 }
413 }
414
415 action(c_issueUPGRADE, "c", desc="Issue GETX") {
416 peek(mandatoryQueue_in, CacheMsg) {
417 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
418 out_msg.Address := address;
419 out_msg.Type := CoherenceRequestType:UPGRADE;
420 out_msg.RequestorMachId := machineID;
421 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
422 DEBUG_EXPR(address);
423 DEBUG_EXPR(out_msg.Destination);
424 out_msg.MessageSize := MessageSizeType:Control;
425 out_msg.L1CacheStateStr := getStateStr(address);
426 out_msg.Prefetch := in_msg.Prefetch;
427 out_msg.AccessMode := in_msg.AccessMode;
428 }
429 }
430 }
431
432 action(f_issueGETINSTR, "g", desc="Issue GETINSTR") {
433 peek(mandatoryQueue_in, CacheMsg) {
434 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
435 out_msg.Address := address;
436 out_msg.Type := CoherenceRequestType:GET_INSTR;
437 out_msg.RequestorMachId := machineID;
438 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
439 DEBUG_EXPR(address);
440 DEBUG_EXPR(out_msg.Destination);
441 out_msg.MessageSize := MessageSizeType:Control;
442 out_msg.L1CacheStateStr := getStateStr(address);
443 out_msg.Prefetch := in_msg.Prefetch;
444 out_msg.AccessMode := in_msg.AccessMode;
445 }
446 }
447 }
448
449 action(d_issuePUTX, "d", desc="Issue PUTX") {
450 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
451 out_msg.Address := address;
452 out_msg.Type := CoherenceRequestType:PUTX;
453 out_msg.RequestorMachId := machineID;
454 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
455 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
456 DEBUG_EXPR(address);
457 DEBUG_EXPR(out_msg.Destination);
458 DEBUG_EXPR(out_msg.DataBlk);
459 out_msg.MessageSize := MessageSizeType:Data;
460 out_msg.L1CacheStateStr := getStateStr(address);
461 }
462 }
463
464 action(q_issuePUTS, "q", desc="Issue PUTS") {
465 enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
466 out_msg.Address := address;
467 out_msg.Type := CoherenceRequestType:PUTS;
468 out_msg.RequestorMachId := machineID;
469 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
470 DEBUG_EXPR(address);
471 DEBUG_EXPR(out_msg.Destination);
472 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
473 out_msg.MessageSize := MessageSizeType:Data;
474 out_msg.L1CacheStateStr := getStateStr(address);
475 }
476 }
477
478 // L1 responding to a L2 request with data
479 action(e_dataFromL1CacheToL2Cache, "e", desc="Send data from L1 cache to L2 Cache") {
480 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
481 out_msg.Address := address;
482 out_msg.Type := CoherenceResponseType:DATA;
483 out_msg.SenderMachId := machineID;
484 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
485 out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
486 DEBUG_EXPR(address);
487 DEBUG_EXPR(out_msg.Destination);
488 DEBUG_EXPR(out_msg.DataBlk);
489 out_msg.MessageSize := MessageSizeType:Data;
490 }
491 }
492
493 action(f_dataFromTBEToL2Cache, "f", desc="Send data from L1_TBE to L2 Cache") {
494 peek(requestIntraChipL1Network_in, RequestMsg) {
495 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
496 out_msg.Address := address;
497 out_msg.Type := CoherenceResponseType:DATA;
498 out_msg.SenderMachId := machineID;
499 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
500 out_msg.DataBlk := L1_TBEs[in_msg.Address].DataBlk;
501 DEBUG_EXPR(address);
502 DEBUG_EXPR(out_msg.Destination);
503 DEBUG_EXPR(out_msg.DataBlk);
504 out_msg.MessageSize := MessageSizeType:Data;
505 }
506 }
507 }
508
509 // L1 responding to a L2 request with an invadiation ack
510 action(t_sendInvAckToL2Cache, "t", desc="Send Invadiation ack to L2 Cache") {
511 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
512 out_msg.Address := address;
513 out_msg.Type := CoherenceResponseType:INV_ACK;
514 out_msg.SenderMachId := machineID;
515 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
516 DEBUG_EXPR(address);
517 DEBUG_EXPR(out_msg.Destination);
518 out_msg.MessageSize := MessageSizeType:Control;
519 }
520 }
521
522 action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
523 DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
524 sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
525 }
526
527 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
528 DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
529 sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
530 }
531
532 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
533 check_allocate(L1_TBEs);
534 L1_TBEs.allocate(address);
535 L1_TBEs[address].isPrefetch := false;
536 }
537
538 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
539 mandatoryQueue_in.dequeue();
540 }
541
542 action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
543 profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
544 }
545
546 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
547 profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
548 }
549
550 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
551 L1_TBEs.deallocate(address);
552 }
553
554 action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
555 peek(responseIntraChipL1Network_in, ResponseMsg) {
556 getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
557 }
558 }
559
560 action(x_copyDataFromL1CacheToTBE, "x", desc="Copy data from cache to TBE") {
561 L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
562 }
563
564 action(z_stall, "z", desc="Stall") {
565 }
566
567 action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
568 if (L1DcacheMemory.isTagPresent(address)) {
569 L1DcacheMemory.deallocate(address);
570 } else {
571 L1IcacheMemory.deallocate(address);
572 }
573 }
574
575 action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
576 if (L1DcacheMemory.isTagPresent(address) == false) {
577 L1DcacheMemory.allocate(address);
578 }
579 }
580
581 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
582 if (L1IcacheMemory.isTagPresent(address) == false) {
583 L1IcacheMemory.allocate(address);
584 }
585 }
586
587 //*****************************************************
588 // TRANSITIONS
589 //*****************************************************
590
591 // Transitions for Load/Store/Replacement/WriteBack from transient states
592 transition({L1_IS, L1_IM, L1_ISI, L1_IMI, L1_IMS, L1_IMSI, L1_SI, L1_MI}, {Load, Ifetch, Store, L1_Replacement, L1_WriteBack}) {
593 z_stall;
594 }
595
596 // Transitions from Idle
597 transition({NP,L1_I}, {L1_Replacement, L1_WriteBack}) {
598 ff_deallocateL1CacheBlock;
599 }
600
601 transition({NP,L1_I}, Load, L1_IS) {
602 oo_allocateL1DCacheBlock;
603 i_allocateTBE;
604 a_issueGETS;
605 k_popMandatoryQueue;
606 }
607
608 transition({NP,L1_I}, Ifetch, L1_IS) {
609 pp_allocateL1ICacheBlock;
610 i_allocateTBE;
611 f_issueGETINSTR;
612 k_popMandatoryQueue;
613 }
614
615 transition({NP,L1_I}, Store, L1_IM) {
616 oo_allocateL1DCacheBlock;
617 i_allocateTBE;
618 b_issueGETX;
619 k_popMandatoryQueue;
620 }
621
622 // Transitions from Shared
623 transition({L1_S}, {Load,Ifetch}) {
624 h_load_hit;
625 k_popMandatoryQueue;
626 }
627
628 transition(L1_S, Store, L1_IM) {
629 i_allocateTBE;
630 c_issueUPGRADE;
631 k_popMandatoryQueue;
632 }
633
634 transition(L1_S, {L1_Replacement,L1_WriteBack}, L1_SI) {
635 i_allocateTBE;
636 q_issuePUTS;
637 x_copyDataFromL1CacheToTBE;
638 ff_deallocateL1CacheBlock;
639 }
640
641 transition(L1_S, L1_INV_S, L1_I) {
642 t_sendInvAckToL2Cache;
643 l_popRequestQueue;
644 }
645
646 // Transitions from Modified
647 transition(L1_M, {Load, Ifetch}) {
648 h_load_hit;
649 k_popMandatoryQueue;
650 }
651
652 transition(L1_M, Store) {
653 hh_store_hit;
654 k_popMandatoryQueue;
655 }
656
657 transition(L1_M, {L1_Replacement, L1_WriteBack}, L1_MI) {
658 i_allocateTBE;
659 d_issuePUTX;
660 x_copyDataFromL1CacheToTBE;
661 ff_deallocateL1CacheBlock;
662 }
663
664 transition(L1_M, L1_INV, L1_I) {
665 e_dataFromL1CacheToL2Cache;
666 l_popRequestQueue;
667 }
668
669 transition(L1_M, L1_DownGrade, L1_S) {
670 e_dataFromL1CacheToL2Cache;
671 l_popRequestQueue;
672 }
673
674 // Transitions from L1_IS
675 transition(L1_IS, L1_INV_S, L1_ISI) {
676 t_sendInvAckToL2Cache;
677 l_popRequestQueue;
678 }
679
680 transition(L1_IS, L1_Data, L1_S) {
681 u_writeDataToL1Cache;
682 h_load_hit;
683 s_deallocateTBE;
684 o_popIncomingResponseQueue;
685 }
686
687 transition(L1_IS, L1_Data_I, L1_I) {
688 u_writeDataToL1Cache;
689 h_load_hit;
690 s_deallocateTBE;
691 o_popIncomingResponseQueue;
692 }
693
694 // Transitions from L1_ISI
695 transition(L1_ISI, L1_Data, L1_I) {
696 u_writeDataToL1Cache;
697 h_load_hit;
698 s_deallocateTBE;
699 o_popIncomingResponseQueue;
700 }
701
702 // Transitions from L1_IM
703 transition(L1_IM, L1_INV, L1_IMI) { // we don't have to respond immediately because we know the data is coming
704 l_popRequestQueue;
705 }
706
707 transition(L1_IM, L1_INV_S) {
708 t_sendInvAckToL2Cache;
709 l_popRequestQueue;
710 }
711
712 transition(L1_IM, L1_DownGrade, L1_IMS) {
713 l_popRequestQueue;
714 }
715
716 transition(L1_IM, L1_Data, L1_M) {
717 u_writeDataToL1Cache;
718 hh_store_hit;
719 s_deallocateTBE;
720 o_popIncomingResponseQueue;
721 }
722
723 transition(L1_IM, L1_Data_S, L1_S) {
724 u_writeDataToL1Cache;
725 hh_store_hit;
726 s_deallocateTBE;
727 e_dataFromL1CacheToL2Cache;
728 o_popIncomingResponseQueue;
729 }
730
731 transition(L1_IM, L1_Data_I, L1_I) {
732 u_writeDataToL1Cache;
733 hh_store_hit;
734 s_deallocateTBE;
735 e_dataFromL1CacheToL2Cache;
736 o_popIncomingResponseQueue;
737 }
738
739 // Transitions from L1_IMI - data should arrive and no request are possilbe
740 transition(L1_IMI, L1_Data, L1_I) {
741 u_writeDataToL1Cache;
742 hh_store_hit;
743 s_deallocateTBE;
744 e_dataFromL1CacheToL2Cache;
745 o_popIncomingResponseQueue;
746 }
747
748 // Transitions from L1_IMS
749 transition(L1_IMS, L1_Data, L1_S) {
750 u_writeDataToL1Cache;
751 hh_store_hit;
752 s_deallocateTBE;
753 e_dataFromL1CacheToL2Cache;
754 o_popIncomingResponseQueue;
755 }
756
757 transition(L1_IMS, L1_INV_S, L1_IMSI) {
758 l_popRequestQueue;
759 }
760
761 // Transitions from L1_IMSI
762 transition(L1_IMSI, L1_Data, L1_I) {
763 u_writeDataToL1Cache;
764 hh_store_hit;
765 s_deallocateTBE;
766 e_dataFromL1CacheToL2Cache;
767 o_popIncomingResponseQueue;
768 }
769
770 // Transitions from L1_SI
771 transition(L1_SI, L1_INV_S) {
772 t_sendInvAckToL2Cache;
773 l_popRequestQueue;
774 }
775
776 transition(L1_SI, L1_PutAck, L1_I) {
777 s_deallocateTBE;
778 o_popIncomingResponseQueue;
779 }
780
781 // Transitions from L1_MI
782 transition(L1_MI, L1_INV) {
783 f_dataFromTBEToL2Cache;
784 l_popRequestQueue;
785 }
786
787 transition(L1_MI, L1_DownGrade, L1_SI) {
788 f_dataFromTBEToL2Cache;
789 l_popRequestQueue;
790 }
791
792 transition(L1_MI, L1_PutAck, L1_I) {
793 s_deallocateTBE;
794 o_popIncomingResponseQueue;
795 }
796 }
797
798
799