mem-cache: Fix non-virtual base destructor of Repl Entry
[gem5.git] / src / mem / protocol / GPU_VIPER-TCC.sm
1 /*
2 * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Blake Hechtman
34 */
35
36 machine(MachineType:TCC, "TCC Cache")
37 : CacheMemory * L2cache;
38 bool WB; /*is this cache Writeback?*/
39 Cycles l2_request_latency := 50;
40 Cycles l2_response_latency := 20;
41
42 // From the TCPs or SQCs
43 MessageBuffer * requestFromTCP, network="From", virtual_network="1", vnet_type="request";
44 // To the Cores. TCC deals only with TCPs/SQCs.
45 MessageBuffer * responseToCore, network="To", virtual_network="3", vnet_type="response";
46 // From the NB
47 MessageBuffer * probeFromNB, network="From", virtual_network="0", vnet_type="request";
48 MessageBuffer * responseFromNB, network="From", virtual_network="2", vnet_type="response";
49 // To the NB
50 MessageBuffer * requestToNB, network="To", virtual_network="0", vnet_type="request";
51 MessageBuffer * responseToNB, network="To", virtual_network="2", vnet_type="response";
52 MessageBuffer * unblockToNB, network="To", virtual_network="4", vnet_type="unblock";
53
54 MessageBuffer * triggerQueue;
55
56 {
57 // EVENTS
58 enumeration(Event, desc="TCC Events") {
59 // Requests coming from the Cores
60 RdBlk, desc="RdBlk event";
61 WrVicBlk, desc="L1 Write Through";
62 WrVicBlkBack, desc="L1 Write Through(dirty cache)";
63 Atomic, desc="Atomic Op";
64 AtomicDone, desc="AtomicOps Complete";
65 AtomicNotDone, desc="AtomicOps not Complete";
66 Data, desc="data messgae";
67 // Coming from this TCC
68 L2_Repl, desc="L2 Replacement";
69 // Probes
70 PrbInv, desc="Invalidating probe";
71 // Coming from Memory Controller
72 WBAck, desc="writethrough ack from memory";
73 }
74
75 // STATES
76 state_declaration(State, desc="TCC State", default="TCC_State_I") {
77 M, AccessPermission:Read_Write, desc="Modified(dirty cache only)";
78 W, AccessPermission:Read_Write, desc="Written(dirty cache only)";
79 V, AccessPermission:Read_Only, desc="Valid";
80 I, AccessPermission:Invalid, desc="Invalid";
81 IV, AccessPermission:Busy, desc="Waiting for Data";
82 WI, AccessPermission:Busy, desc="Waiting on Writethrough Ack";
83 A, AccessPermission:Busy, desc="Invalid waiting on atomici Data";
84 }
85
86 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
87 DataArrayRead, desc="Read the data array";
88 DataArrayWrite, desc="Write the data array";
89 TagArrayRead, desc="Read the data array";
90 TagArrayWrite, desc="Write the data array";
91 }
92
93
94 // STRUCTURES
95
96 structure(Entry, desc="...", interface="AbstractCacheEntry") {
97 State CacheState, desc="cache state";
98 bool Dirty, desc="Is the data dirty (diff from memory?)";
99 DataBlock DataBlk, desc="Data for the block";
100 WriteMask writeMask, desc="Dirty byte mask";
101 }
102
103 structure(TBE, desc="...") {
104 State TBEState, desc="Transient state";
105 DataBlock DataBlk, desc="data for the block";
106 bool Dirty, desc="Is the data dirty?";
107 bool Shared, desc="Victim hit by shared probe";
108 MachineID From, desc="Waiting for writeback from...";
109 NetDest Destination, desc="Data destination";
110 int numAtomics, desc="number remaining atomics";
111 }
112
113 structure(TBETable, external="yes") {
114 TBE lookup(Addr);
115 void allocate(Addr);
116 void deallocate(Addr);
117 bool isPresent(Addr);
118 }
119
120 TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
121
122 void set_cache_entry(AbstractCacheEntry b);
123 void unset_cache_entry();
124 void set_tbe(TBE b);
125 void unset_tbe();
126 void wakeUpAllBuffers();
127 void wakeUpBuffers(Addr a);
128
129 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
130
131 // FUNCTION DEFINITIONS
132 Tick clockEdge();
133
134 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
135 return static_cast(Entry, "pointer", L2cache.lookup(addr));
136 }
137
138 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
139 return getCacheEntry(addr).DataBlk;
140 }
141
142 bool presentOrAvail(Addr addr) {
143 return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
144 }
145
146 State getState(TBE tbe, Entry cache_entry, Addr addr) {
147 if (is_valid(tbe)) {
148 return tbe.TBEState;
149 } else if (is_valid(cache_entry)) {
150 return cache_entry.CacheState;
151 }
152 return State:I;
153 }
154
155 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
156 if (is_valid(tbe)) {
157 tbe.TBEState := state;
158 }
159
160 if (is_valid(cache_entry)) {
161 cache_entry.CacheState := state;
162 }
163 }
164
165 void functionalRead(Addr addr, Packet *pkt) {
166 TBE tbe := TBEs.lookup(addr);
167 if(is_valid(tbe)) {
168 testAndRead(addr, tbe.DataBlk, pkt);
169 } else {
170 functionalMemoryRead(pkt);
171 }
172 }
173
174 int functionalWrite(Addr addr, Packet *pkt) {
175 int num_functional_writes := 0;
176
177 TBE tbe := TBEs.lookup(addr);
178 if(is_valid(tbe)) {
179 num_functional_writes := num_functional_writes +
180 testAndWrite(addr, tbe.DataBlk, pkt);
181 }
182
183 num_functional_writes := num_functional_writes +
184 functionalMemoryWrite(pkt);
185 return num_functional_writes;
186 }
187
188 AccessPermission getAccessPermission(Addr addr) {
189 TBE tbe := TBEs.lookup(addr);
190 if(is_valid(tbe)) {
191 return TCC_State_to_permission(tbe.TBEState);
192 }
193
194 Entry cache_entry := getCacheEntry(addr);
195 if(is_valid(cache_entry)) {
196 return TCC_State_to_permission(cache_entry.CacheState);
197 }
198
199 return AccessPermission:NotPresent;
200 }
201
202 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
203 if (is_valid(cache_entry)) {
204 cache_entry.changePermission(TCC_State_to_permission(state));
205 }
206 }
207
208 void recordRequestType(RequestType request_type, Addr addr) {
209 if (request_type == RequestType:DataArrayRead) {
210 L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
211 } else if (request_type == RequestType:DataArrayWrite) {
212 L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
213 } else if (request_type == RequestType:TagArrayRead) {
214 L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
215 } else if (request_type == RequestType:TagArrayWrite) {
216 L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
217 }
218 }
219
220 bool checkResourceAvailable(RequestType request_type, Addr addr) {
221 if (request_type == RequestType:DataArrayRead) {
222 return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
223 } else if (request_type == RequestType:DataArrayWrite) {
224 return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
225 } else if (request_type == RequestType:TagArrayRead) {
226 return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
227 } else if (request_type == RequestType:TagArrayWrite) {
228 return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
229 } else {
230 error("Invalid RequestType type in checkResourceAvailable");
231 return true;
232 }
233 }
234
235
236 // ** OUT_PORTS **
237
238 // Three classes of ports
239 // Class 1: downward facing network links to NB
240 out_port(requestToNB_out, CPURequestMsg, requestToNB);
241 out_port(responseToNB_out, ResponseMsg, responseToNB);
242 out_port(unblockToNB_out, UnblockMsg, unblockToNB);
243
244 // Class 2: upward facing ports to GPU cores
245 out_port(responseToCore_out, ResponseMsg, responseToCore);
246
247 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
248 //
249 // request queue going to NB
250 //
251
252
253 // ** IN_PORTS **
254 in_port(triggerQueue_in, TiggerMsg, triggerQueue) {
255 if (triggerQueue_in.isReady(clockEdge())) {
256 peek(triggerQueue_in, TriggerMsg) {
257 TBE tbe := TBEs.lookup(in_msg.addr);
258 Entry cache_entry := getCacheEntry(in_msg.addr);
259 if (tbe.numAtomics == 0) {
260 trigger(Event:AtomicDone, in_msg.addr, cache_entry, tbe);
261 } else {
262 trigger(Event:AtomicNotDone, in_msg.addr, cache_entry, tbe);
263 }
264 }
265 }
266 }
267
268
269
270 in_port(responseFromNB_in, ResponseMsg, responseFromNB) {
271 if (responseFromNB_in.isReady(clockEdge())) {
272 peek(responseFromNB_in, ResponseMsg, block_on="addr") {
273 TBE tbe := TBEs.lookup(in_msg.addr);
274 Entry cache_entry := getCacheEntry(in_msg.addr);
275 if (in_msg.Type == CoherenceResponseType:NBSysResp) {
276 if(presentOrAvail(in_msg.addr)) {
277 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
278 } else {
279 Addr victim := L2cache.cacheProbe(in_msg.addr);
280 trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
281 }
282 } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
283 trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
284 } else {
285 error("Unexpected Response Message to Core");
286 }
287 }
288 }
289 }
290
291 // Finally handling incoming requests (from TCP) and probes (from NB).
292 in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB) {
293 if (probeNetwork_in.isReady(clockEdge())) {
294 peek(probeNetwork_in, NBProbeRequestMsg) {
295 DPRINTF(RubySlicc, "%s\n", in_msg);
296 Entry cache_entry := getCacheEntry(in_msg.addr);
297 TBE tbe := TBEs.lookup(in_msg.addr);
298 trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
299 }
300 }
301 }
302
303 in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
304 if (coreRequestNetwork_in.isReady(clockEdge())) {
305 peek(coreRequestNetwork_in, CPURequestMsg) {
306 TBE tbe := TBEs.lookup(in_msg.addr);
307 Entry cache_entry := getCacheEntry(in_msg.addr);
308 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
309 if(WB) {
310 if(presentOrAvail(in_msg.addr)) {
311 trigger(Event:WrVicBlkBack, in_msg.addr, cache_entry, tbe);
312 } else {
313 Addr victim := L2cache.cacheProbe(in_msg.addr);
314 trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
315 }
316 } else {
317 trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
318 }
319 } else if (in_msg.Type == CoherenceRequestType:Atomic) {
320 trigger(Event:Atomic, in_msg.addr, cache_entry, tbe);
321 } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
322 trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
323 } else {
324 DPRINTF(RubySlicc, "%s\n", in_msg);
325 error("Unexpected Response Message to Core");
326 }
327 }
328 }
329 }
330 // BEGIN ACTIONS
331
332 action(i_invL2, "i", desc="invalidate TCC cache block") {
333 if (is_valid(cache_entry)) {
334 L2cache.deallocate(address);
335 }
336 unset_cache_entry();
337 }
338
339 action(sd_sendData, "sd", desc="send Shared response") {
340 peek(coreRequestNetwork_in, CPURequestMsg) {
341 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
342 out_msg.addr := address;
343 out_msg.Type := CoherenceResponseType:TDSysResp;
344 out_msg.Sender := machineID;
345 out_msg.Destination.add(in_msg.Requestor);
346 out_msg.DataBlk := cache_entry.DataBlk;
347 out_msg.MessageSize := MessageSizeType:Response_Data;
348 out_msg.Dirty := false;
349 out_msg.State := CoherenceState:Shared;
350 DPRINTF(RubySlicc, "%s\n", out_msg);
351 }
352 }
353 }
354
355
356 action(sdr_sendDataResponse, "sdr", desc="send Shared response") {
357 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
358 out_msg.addr := address;
359 out_msg.Type := CoherenceResponseType:TDSysResp;
360 out_msg.Sender := machineID;
361 out_msg.Destination := tbe.Destination;
362 out_msg.DataBlk := cache_entry.DataBlk;
363 out_msg.MessageSize := MessageSizeType:Response_Data;
364 out_msg.Dirty := false;
365 out_msg.State := CoherenceState:Shared;
366 DPRINTF(RubySlicc, "%s\n", out_msg);
367 }
368 enqueue(unblockToNB_out, UnblockMsg, 1) {
369 out_msg.addr := address;
370 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
371 out_msg.MessageSize := MessageSizeType:Unblock_Control;
372 DPRINTF(RubySlicc, "%s\n", out_msg);
373 }
374 }
375
376
377 action(rd_requestData, "r", desc="Miss in L2, pass on") {
378 if(tbe.Destination.count()==1){
379 peek(coreRequestNetwork_in, CPURequestMsg) {
380 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
381 out_msg.addr := address;
382 out_msg.Type := in_msg.Type;
383 out_msg.Requestor := machineID;
384 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
385 out_msg.Shared := false; // unneeded for this request
386 out_msg.MessageSize := in_msg.MessageSize;
387 DPRINTF(RubySlicc, "%s\n", out_msg);
388 }
389 }
390 }
391 }
392
393 action(w_sendResponseWBAck, "w", desc="send WB Ack") {
394 peek(responseFromNB_in, ResponseMsg) {
395 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
396 out_msg.addr := address;
397 out_msg.Type := CoherenceResponseType:TDSysWBAck;
398 out_msg.Destination.clear();
399 out_msg.Destination.add(in_msg.WTRequestor);
400 out_msg.Sender := machineID;
401 out_msg.MessageSize := MessageSizeType:Writeback_Control;
402 }
403 }
404 }
405
406 action(swb_sendWBAck, "swb", desc="send WB Ack") {
407 peek(coreRequestNetwork_in, CPURequestMsg) {
408 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
409 out_msg.addr := address;
410 out_msg.Type := CoherenceResponseType:TDSysWBAck;
411 out_msg.Destination.clear();
412 out_msg.Destination.add(in_msg.Requestor);
413 out_msg.Sender := machineID;
414 out_msg.MessageSize := MessageSizeType:Writeback_Control;
415 }
416 }
417 }
418
419 action(ar_sendAtomicResponse, "ar", desc="send Atomic Ack") {
420 peek(responseFromNB_in, ResponseMsg) {
421 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
422 out_msg.addr := address;
423 out_msg.Type := CoherenceResponseType:TDSysResp;
424 out_msg.Destination.add(in_msg.WTRequestor);
425 out_msg.Sender := machineID;
426 out_msg.MessageSize := in_msg.MessageSize;
427 out_msg.DataBlk := in_msg.DataBlk;
428 }
429 }
430 }
431
432 action(a_allocateBlock, "a", desc="allocate TCC block") {
433 if (is_invalid(cache_entry)) {
434 set_cache_entry(L2cache.allocate(address, new Entry));
435 cache_entry.writeMask.clear();
436 }
437 }
438
439 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
440 if (is_invalid(tbe)) {
441 check_allocate(TBEs);
442 TBEs.allocate(address);
443 set_tbe(TBEs.lookup(address));
444 tbe.Destination.clear();
445 tbe.numAtomics := 0;
446 }
447 if (coreRequestNetwork_in.isReady(clockEdge())) {
448 peek(coreRequestNetwork_in, CPURequestMsg) {
449 if(in_msg.Type == CoherenceRequestType:RdBlk || in_msg.Type == CoherenceRequestType:Atomic){
450 tbe.Destination.add(in_msg.Requestor);
451 }
452 }
453 }
454 }
455
456 action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
457 tbe.Destination.clear();
458 TBEs.deallocate(address);
459 unset_tbe();
460 }
461
462 action(wcb_writeCacheBlock, "wcb", desc="write data to TCC") {
463 peek(responseFromNB_in, ResponseMsg) {
464 cache_entry.DataBlk := in_msg.DataBlk;
465 DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
466 }
467 }
468
469 action(wdb_writeDirtyBytes, "wdb", desc="write data to TCC") {
470 peek(coreRequestNetwork_in, CPURequestMsg) {
471 cache_entry.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
472 cache_entry.writeMask.orMask(in_msg.writeMask);
473 DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
474 }
475 }
476
477 action(wt_writeThrough, "wt", desc="write back data") {
478 peek(coreRequestNetwork_in, CPURequestMsg) {
479 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
480 out_msg.addr := address;
481 out_msg.Requestor := machineID;
482 out_msg.WTRequestor := in_msg.Requestor;
483 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
484 out_msg.MessageSize := MessageSizeType:Data;
485 out_msg.Type := CoherenceRequestType:WriteThrough;
486 out_msg.Dirty := true;
487 out_msg.DataBlk := in_msg.DataBlk;
488 out_msg.writeMask.orMask(in_msg.writeMask);
489 }
490 }
491 }
492
493 action(wb_writeBack, "wb", desc="write back data") {
494 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
495 out_msg.addr := address;
496 out_msg.Requestor := machineID;
497 out_msg.WTRequestor := machineID;
498 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
499 out_msg.MessageSize := MessageSizeType:Data;
500 out_msg.Type := CoherenceRequestType:WriteThrough;
501 out_msg.Dirty := true;
502 out_msg.DataBlk := cache_entry.DataBlk;
503 out_msg.writeMask.orMask(cache_entry.writeMask);
504 }
505 }
506
507 action(at_atomicThrough, "at", desc="write back data") {
508 peek(coreRequestNetwork_in, CPURequestMsg) {
509 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
510 out_msg.addr := address;
511 out_msg.Requestor := machineID;
512 out_msg.WTRequestor := in_msg.Requestor;
513 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
514 out_msg.MessageSize := MessageSizeType:Data;
515 out_msg.Type := CoherenceRequestType:Atomic;
516 out_msg.Dirty := true;
517 out_msg.writeMask.orMask(in_msg.writeMask);
518 }
519 }
520 }
521
522 action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
523 enqueue(responseToNB_out, ResponseMsg, 1) {
524 out_msg.addr := address;
525 out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
526 out_msg.Sender := machineID;
527 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
528 out_msg.Dirty := false;
529 out_msg.Hit := false;
530 out_msg.Ntsl := true;
531 out_msg.State := CoherenceState:NA;
532 out_msg.MessageSize := MessageSizeType:Response_Control;
533 }
534 }
535 action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
536 L2cache.setMRU(address);
537 }
538
539 action(p_popRequestQueue, "p", desc="pop request queue") {
540 coreRequestNetwork_in.dequeue(clockEdge());
541 }
542
543 action(pr_popResponseQueue, "pr", desc="pop response queue") {
544 responseFromNB_in.dequeue(clockEdge());
545 }
546
547 action(pp_popProbeQueue, "pp", desc="pop probe queue") {
548 probeNetwork_in.dequeue(clockEdge());
549 }
550
551 action(z_stall, "z", desc="stall") {
552 // built-in
553 }
554
555
556 action(ina_incrementNumAtomics, "ina", desc="inc num atomics") {
557 tbe.numAtomics := tbe.numAtomics + 1;
558 }
559
560
561 action(dna_decrementNumAtomics, "dna", desc="inc num atomics") {
562 tbe.numAtomics := tbe.numAtomics - 1;
563 if (tbe.numAtomics==0) {
564 enqueue(triggerQueue_out, TriggerMsg, 1) {
565 out_msg.addr := address;
566 out_msg.Type := TriggerType:AtomicDone;
567 }
568 }
569 }
570
571 action(ptr_popTriggerQueue, "ptr", desc="pop Trigger") {
572 triggerQueue_in.dequeue(clockEdge());
573 }
574
575 // END ACTIONS
576
577 // BEGIN TRANSITIONS
578 // transitions from base
579 // Assumptions for ArrayRead/Write
580 // TBE checked before tags
581 // Data Read/Write requires Tag Read
582
583 // Stalling transitions do NOT check the tag array...and if they do,
584 // they can cause a resource stall deadlock!
585
586 transition(WI, {RdBlk, WrVicBlk, Atomic, WrVicBlkBack}) { //TagArrayRead} {
587 z_stall;
588 }
589 transition(A, {RdBlk, WrVicBlk, WrVicBlkBack}) { //TagArrayRead} {
590 z_stall;
591 }
592 transition(IV, {WrVicBlk, Atomic, WrVicBlkBack}) { //TagArrayRead} {
593 z_stall;
594 }
595 transition({M, V}, RdBlk) {TagArrayRead, DataArrayRead} {
596 sd_sendData;
597 ut_updateTag;
598 p_popRequestQueue;
599 }
600 transition(W, RdBlk, WI) {TagArrayRead, DataArrayRead} {
601 t_allocateTBE;
602 wb_writeBack;
603 }
604
605 transition(I, RdBlk, IV) {TagArrayRead} {
606 t_allocateTBE;
607 rd_requestData;
608 p_popRequestQueue;
609 }
610
611 transition(IV, RdBlk) {
612 t_allocateTBE;
613 rd_requestData;
614 p_popRequestQueue;
615 }
616
617 transition({V, I},Atomic, A) {TagArrayRead} {
618 i_invL2;
619 t_allocateTBE;
620 at_atomicThrough;
621 ina_incrementNumAtomics;
622 p_popRequestQueue;
623 }
624
625 transition(A, Atomic) {
626 at_atomicThrough;
627 ina_incrementNumAtomics;
628 p_popRequestQueue;
629 }
630
631 transition({M, W}, Atomic, WI) {TagArrayRead} {
632 t_allocateTBE;
633 wb_writeBack;
634 }
635
636 transition(I, WrVicBlk) {TagArrayRead} {
637 wt_writeThrough;
638 p_popRequestQueue;
639 }
640
641 transition(V, WrVicBlk) {TagArrayRead, DataArrayWrite} {
642 ut_updateTag;
643 wdb_writeDirtyBytes;
644 wt_writeThrough;
645 p_popRequestQueue;
646 }
647
648 transition({V, M}, WrVicBlkBack, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
649 ut_updateTag;
650 swb_sendWBAck;
651 wdb_writeDirtyBytes;
652 p_popRequestQueue;
653 }
654
655 transition(W, WrVicBlkBack) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
656 ut_updateTag;
657 swb_sendWBAck;
658 wdb_writeDirtyBytes;
659 p_popRequestQueue;
660 }
661
662 transition(I, WrVicBlkBack, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
663 a_allocateBlock;
664 ut_updateTag;
665 swb_sendWBAck;
666 wdb_writeDirtyBytes;
667 p_popRequestQueue;
668 }
669
670 transition({W, M}, L2_Repl, WI) {TagArrayRead, DataArrayRead} {
671 t_allocateTBE;
672 wb_writeBack;
673 i_invL2;
674 }
675
676 transition({I, V}, L2_Repl, I) {TagArrayRead, TagArrayWrite} {
677 i_invL2;
678 }
679
680 transition({A, IV, WI}, L2_Repl) {
681 i_invL2;
682 }
683
684 transition({I, V}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
685 pi_sendProbeResponseInv;
686 pp_popProbeQueue;
687 }
688
689 transition(M, PrbInv, W) {TagArrayRead, TagArrayWrite} {
690 pi_sendProbeResponseInv;
691 pp_popProbeQueue;
692 }
693
694 transition(W, PrbInv) {TagArrayRead} {
695 pi_sendProbeResponseInv;
696 pp_popProbeQueue;
697 }
698
699 transition({A, IV, WI}, PrbInv) {
700 pi_sendProbeResponseInv;
701 pp_popProbeQueue;
702 }
703
704 transition(IV, Data, V) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
705 a_allocateBlock;
706 ut_updateTag;
707 wcb_writeCacheBlock;
708 sdr_sendDataResponse;
709 pr_popResponseQueue;
710 dt_deallocateTBE;
711 }
712
713 transition(A, Data) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
714 a_allocateBlock;
715 ar_sendAtomicResponse;
716 dna_decrementNumAtomics;
717 pr_popResponseQueue;
718 }
719
720 transition(A, AtomicDone, I) {TagArrayRead, TagArrayWrite} {
721 dt_deallocateTBE;
722 ptr_popTriggerQueue;
723 }
724
725 transition(A, AtomicNotDone) {TagArrayRead} {
726 ptr_popTriggerQueue;
727 }
728
729 //M,W should not see WBAck as the cache is in WB mode
730 //WBAcks do not need to check tags
731 transition({I, V, IV, A}, WBAck) {
732 w_sendResponseWBAck;
733 pr_popResponseQueue;
734 }
735
736 transition(WI, WBAck,I) {
737 dt_deallocateTBE;
738 pr_popResponseQueue;
739 }
740 }