mem-ruby: Replace SLICC queueMemory calls with enqueue
[gem5.git] / src / mem / ruby / protocol / MI_example-dir.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(MachineType:Directory, "Directory protocol")
31 : DirectoryMemory * directory;
32 Cycles directory_latency := 12;
33 Cycles to_memory_controller_latency := 1;
34
35 MessageBuffer * forwardFromDir, network="To", virtual_network="3",
36 vnet_type="forward";
37 MessageBuffer * responseFromDir, network="To", virtual_network="4",
38 vnet_type="response";
39 MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
40 vnet_type="response";
41
42 MessageBuffer * requestToDir, network="From", virtual_network="2",
43 vnet_type="request";
44 MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
45 vnet_type="request";
46
47 MessageBuffer * requestToMemory;
48 MessageBuffer * responseFromMemory;
49 {
50 // STATES
51 state_declaration(State, desc="Directory states", default="Directory_State_I") {
52 // Base states
53 I, AccessPermission:Read_Write, desc="Invalid";
54 M, AccessPermission:Invalid, desc="Modified";
55
56 M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
57 M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
58
59 M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
60 M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
61
62 IM, AccessPermission:Busy, desc="Intermediate state I-->M";
63 MI, AccessPermission:Busy, desc="Intermediate state M-->I";
64 ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
65 ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
66 }
67
68 // Events
69 enumeration(Event, desc="Directory events") {
70 // processor requests
71 GETX, desc="A GETX arrives";
72 GETS, desc="A GETS arrives";
73 PUTX, desc="A PUTX arrives";
74 PUTX_NotOwner, desc="A PUTX arrives";
75
76 // DMA requests
77 DMA_READ, desc="A DMA Read memory request";
78 DMA_WRITE, desc="A DMA Write memory request";
79
80 // Memory Controller
81 Memory_Data, desc="Fetched data from memory arrives";
82 Memory_Ack, desc="Writeback Ack from memory arrives";
83 }
84
85 // TYPES
86
87 // DirectoryEntry
88 structure(Entry, desc="...", interface="AbstractCacheEntry", main="false") {
89 State DirectoryState, desc="Directory state";
90 NetDest Sharers, desc="Sharers for this block";
91 NetDest Owner, desc="Owner of this block";
92 }
93
94 // TBE entries for DMA requests
95 structure(TBE, desc="TBE entries for outstanding DMA requests") {
96 Addr PhysicalAddress, desc="physical address";
97 State TBEState, desc="Transient State";
98 DataBlock DataBlk, desc="Data to be written (DMA write only)";
99 int Len, desc="...";
100 MachineID DmaRequestor, desc="DMA requestor";
101 }
102
103 structure(TBETable, external="yes") {
104 TBE lookup(Addr);
105 void allocate(Addr);
106 void deallocate(Addr);
107 bool isPresent(Addr);
108 }
109
110 // ** OBJECTS **
111 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
112
113 Tick clockEdge();
114 Cycles ticksToCycles(Tick t);
115 Tick cyclesToTicks(Cycles c);
116 void set_tbe(TBE b);
117 void unset_tbe();
118
119 Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
120 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
121
122 if (is_valid(dir_entry)) {
123 return dir_entry;
124 }
125
126 dir_entry := static_cast(Entry, "pointer",
127 directory.allocate(addr, new Entry));
128 return dir_entry;
129 }
130
131 State getState(TBE tbe, Addr addr) {
132 if (is_valid(tbe)) {
133 return tbe.TBEState;
134 } else if (directory.isPresent(addr)) {
135 return getDirectoryEntry(addr).DirectoryState;
136 } else {
137 return State:I;
138 }
139 }
140
141 void setState(TBE tbe, Addr addr, State state) {
142
143 if (is_valid(tbe)) {
144 tbe.TBEState := state;
145 }
146
147 if (directory.isPresent(addr)) {
148
149 if (state == State:M) {
150 assert(getDirectoryEntry(addr).Owner.count() == 1);
151 assert(getDirectoryEntry(addr).Sharers.count() == 0);
152 }
153
154 getDirectoryEntry(addr).DirectoryState := state;
155
156 if (state == State:I) {
157 assert(getDirectoryEntry(addr).Owner.count() == 0);
158 assert(getDirectoryEntry(addr).Sharers.count() == 0);
159 }
160 }
161 }
162
163 AccessPermission getAccessPermission(Addr addr) {
164 TBE tbe := TBEs[addr];
165 if(is_valid(tbe)) {
166 return Directory_State_to_permission(tbe.TBEState);
167 }
168
169 if(directory.isPresent(addr)) {
170 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
171 }
172
173 return AccessPermission:NotPresent;
174 }
175
176 void setAccessPermission(Addr addr, State state) {
177 if (directory.isPresent(addr)) {
178 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
179 }
180 }
181
182 void functionalRead(Addr addr, Packet *pkt) {
183 TBE tbe := TBEs[addr];
184 if(is_valid(tbe)) {
185 testAndRead(addr, tbe.DataBlk, pkt);
186 } else {
187 functionalMemoryRead(pkt);
188 }
189 }
190
191 int functionalWrite(Addr addr, Packet *pkt) {
192 int num_functional_writes := 0;
193
194 TBE tbe := TBEs[addr];
195 if(is_valid(tbe)) {
196 num_functional_writes := num_functional_writes +
197 testAndWrite(addr, tbe.DataBlk, pkt);
198 }
199
200 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
201 return num_functional_writes;
202 }
203
204 // ** OUT_PORTS **
205 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
206 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
207 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
208 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
209 out_port(memQueue_out, MemoryMsg, requestToMemory);
210
211 // ** IN_PORTS **
212 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
213 if (dmaRequestQueue_in.isReady(clockEdge())) {
214 peek(dmaRequestQueue_in, DMARequestMsg) {
215 TBE tbe := TBEs[in_msg.LineAddress];
216 if (in_msg.Type == DMARequestType:READ) {
217 trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
218 } else if (in_msg.Type == DMARequestType:WRITE) {
219 trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
220 } else {
221 error("Invalid message");
222 }
223 }
224 }
225 }
226
227 in_port(requestQueue_in, RequestMsg, requestToDir) {
228 if (requestQueue_in.isReady(clockEdge())) {
229 peek(requestQueue_in, RequestMsg) {
230 TBE tbe := TBEs[in_msg.addr];
231 if (in_msg.Type == CoherenceRequestType:GETS) {
232 trigger(Event:GETS, in_msg.addr, tbe);
233 } else if (in_msg.Type == CoherenceRequestType:GETX) {
234 trigger(Event:GETX, in_msg.addr, tbe);
235 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
236 if (getDirectoryEntry(in_msg.addr).Owner.isElement(in_msg.Requestor)) {
237 trigger(Event:PUTX, in_msg.addr, tbe);
238 } else {
239 trigger(Event:PUTX_NotOwner, in_msg.addr, tbe);
240 }
241 } else {
242 error("Invalid message");
243 }
244 }
245 }
246 }
247
248 //added by SS
249 // off-chip memory request/response is done
250 in_port(memQueue_in, MemoryMsg, responseFromMemory) {
251 if (memQueue_in.isReady(clockEdge())) {
252 peek(memQueue_in, MemoryMsg) {
253 TBE tbe := TBEs[in_msg.addr];
254 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
255 trigger(Event:Memory_Data, in_msg.addr, tbe);
256 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
257 trigger(Event:Memory_Ack, in_msg.addr, tbe);
258 } else {
259 DPRINTF(RubySlicc,"%s\n", in_msg.Type);
260 error("Invalid message");
261 }
262 }
263 }
264 }
265
266 // Actions
267
268 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
269 peek(requestQueue_in, RequestMsg) {
270 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
271 out_msg.addr := address;
272 out_msg.Type := CoherenceRequestType:WB_ACK;
273 out_msg.Requestor := in_msg.Requestor;
274 out_msg.Destination.add(in_msg.Requestor);
275 out_msg.MessageSize := MessageSizeType:Writeback_Control;
276 }
277 }
278 }
279
280 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
281 peek(memQueue_in, MemoryMsg) {
282 enqueue(forwardNetwork_out, RequestMsg, 1) {
283 out_msg.addr := address;
284 out_msg.Type := CoherenceRequestType:WB_ACK;
285 out_msg.Requestor := in_msg.OriginalRequestorMachId;
286 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
287 out_msg.MessageSize := MessageSizeType:Writeback_Control;
288 }
289 }
290 }
291
292 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
293 peek(requestQueue_in, RequestMsg) {
294 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
295 out_msg.addr := address;
296 out_msg.Type := CoherenceRequestType:WB_NACK;
297 out_msg.Requestor := in_msg.Requestor;
298 out_msg.Destination.add(in_msg.Requestor);
299 out_msg.MessageSize := MessageSizeType:Writeback_Control;
300 }
301 }
302 }
303
304 action(c_clearOwner, "c", desc="Clear the owner field") {
305 getDirectoryEntry(address).Owner.clear();
306 }
307
308 action(d_sendData, "d", desc="Send data to requestor") {
309 peek(memQueue_in, MemoryMsg) {
310 enqueue(responseNetwork_out, ResponseMsg, 1) {
311 out_msg.addr := address;
312 out_msg.Type := CoherenceResponseType:DATA;
313 out_msg.Sender := machineID;
314 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
315 out_msg.DataBlk := in_msg.DataBlk;
316 out_msg.MessageSize := MessageSizeType:Response_Data;
317 }
318 }
319 }
320
321 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
322 peek(memQueue_in, MemoryMsg) {
323 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
324 assert(is_valid(tbe));
325 out_msg.PhysicalAddress := address;
326 out_msg.LineAddress := address;
327 out_msg.Type := DMAResponseType:DATA;
328 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
329 out_msg.Destination.add(tbe.DmaRequestor);
330 out_msg.MessageSize := MessageSizeType:Response_Data;
331 }
332 }
333 }
334
335
336
337 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
338 peek(requestQueue_in, RequestMsg) {
339 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
340 assert(is_valid(tbe));
341 out_msg.PhysicalAddress := address;
342 out_msg.LineAddress := address;
343 out_msg.Type := DMAResponseType:DATA;
344
345 // we send the entire data block and rely on the dma controller
346 // to split it up if need be
347 out_msg.DataBlk := in_msg.DataBlk;
348 out_msg.Destination.add(tbe.DmaRequestor);
349 out_msg.MessageSize := MessageSizeType:Response_Data;
350 }
351 }
352 }
353
354 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
355 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
356 assert(is_valid(tbe));
357 out_msg.PhysicalAddress := address;
358 out_msg.LineAddress := address;
359 out_msg.Type := DMAResponseType:ACK;
360 out_msg.Destination.add(tbe.DmaRequestor);
361 out_msg.MessageSize := MessageSizeType:Writeback_Control;
362 }
363 }
364
365 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
366 peek(requestQueue_in, RequestMsg) {
367 getDirectoryEntry(address).Owner.clear();
368 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
369 }
370 }
371
372 action(f_forwardRequest, "f", desc="Forward request to owner") {
373 peek(requestQueue_in, RequestMsg) {
374 APPEND_TRANSITION_COMMENT("Own: ");
375 APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.addr).Owner);
376 APPEND_TRANSITION_COMMENT("Req: ");
377 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
378 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
379 out_msg.addr := address;
380 out_msg.Type := in_msg.Type;
381 out_msg.Requestor := in_msg.Requestor;
382 out_msg.Destination := getDirectoryEntry(in_msg.addr).Owner;
383 out_msg.MessageSize := MessageSizeType:Writeback_Control;
384 }
385 }
386 }
387
388 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
389 peek(dmaRequestQueue_in, DMARequestMsg) {
390 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
391 out_msg.addr := address;
392 out_msg.Type := CoherenceRequestType:INV;
393 out_msg.Requestor := machineID;
394 out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
395 out_msg.MessageSize := MessageSizeType:Writeback_Control;
396 }
397 }
398 }
399
400 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
401 requestQueue_in.dequeue(clockEdge());
402 }
403
404 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
405 dmaRequestQueue_in.dequeue(clockEdge());
406 }
407
408 action(v_allocateTBE, "v", desc="Allocate TBE") {
409 peek(dmaRequestQueue_in, DMARequestMsg) {
410 TBEs.allocate(address);
411 set_tbe(TBEs[address]);
412 tbe.DataBlk := in_msg.DataBlk;
413 tbe.PhysicalAddress := in_msg.PhysicalAddress;
414 tbe.Len := in_msg.Len;
415 tbe.DmaRequestor := in_msg.Requestor;
416 }
417 }
418
419 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
420 peek(dmaRequestQueue_in, DMARequestMsg) {
421 TBEs.allocate(address);
422 set_tbe(TBEs[address]);
423 tbe.DmaRequestor := in_msg.Requestor;
424 }
425 }
426
427 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
428 peek(requestQueue_in, RequestMsg) {
429 TBEs.allocate(address);
430 set_tbe(TBEs[address]);
431 tbe.DataBlk := in_msg.DataBlk;
432 }
433 }
434
435 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
436 TBEs.deallocate(address);
437 unset_tbe();
438 }
439
440 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
441 requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
442 }
443
444 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
445 dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
446 }
447
448
449 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
450 peek(requestQueue_in, RequestMsg) {
451 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
452 out_msg.addr := address;
453 out_msg.Type := MemoryRequestType:MEMORY_READ;
454 out_msg.Sender := in_msg.Requestor;
455 out_msg.MessageSize := MessageSizeType:Request_Control;
456 out_msg.Len := 0;
457 }
458 }
459 }
460
461 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
462 peek(dmaRequestQueue_in, DMARequestMsg) {
463 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
464 out_msg.addr := address;
465 out_msg.Type := MemoryRequestType:MEMORY_READ;
466 out_msg.Sender := in_msg.Requestor;
467 out_msg.MessageSize := MessageSizeType:Request_Control;
468 out_msg.Len := 0;
469 }
470 }
471 }
472
473 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
474 peek(dmaRequestQueue_in, DMARequestMsg) {
475 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
476 out_msg.addr := address;
477 out_msg.Type := MemoryRequestType:MEMORY_WB;
478 out_msg.Sender := in_msg.Requestor;
479 out_msg.MessageSize := MessageSizeType:Writeback_Data;
480 out_msg.DataBlk := in_msg.DataBlk;
481 out_msg.Len := in_msg.Len;
482 }
483 }
484 }
485
486 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
487 peek(requestQueue_in, RequestMsg) {
488 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
489 out_msg.addr := address;
490 out_msg.Type := MemoryRequestType:MEMORY_WB;
491 out_msg.Sender := in_msg.Requestor;
492 out_msg.MessageSize := MessageSizeType:Writeback_Data;
493 out_msg.DataBlk := tbe.DataBlk;
494 out_msg.Len := tbe.Len;
495 }
496 }
497 }
498
499 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
500 peek(requestQueue_in, RequestMsg) {
501 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
502 out_msg.addr := address;
503 out_msg.Type := MemoryRequestType:MEMORY_WB;
504 out_msg.Sender := in_msg.Requestor;
505 out_msg.MessageSize := MessageSizeType:Writeback_Data;
506 out_msg.DataBlk := in_msg.DataBlk;
507 out_msg.Len := 0;
508 }
509 }
510 }
511
512 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
513 memQueue_in.dequeue(clockEdge());
514 }
515
516 // TRANSITIONS
517 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
518 z_recycleRequestQueue;
519 }
520
521 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
522 z_recycleRequestQueue;
523 }
524
525 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
526 y_recycleDMARequestQueue;
527 }
528
529
530 transition(I, GETX, IM) {
531 //d_sendData;
532 v_allocateTBEFromRequestNet;
533 qf_queueMemoryFetchRequest;
534 e_ownerIsRequestor;
535 i_popIncomingRequestQueue;
536 }
537
538 transition(IM, Memory_Data, M) {
539 d_sendData;
540 //e_ownerIsRequestor;
541 w_deallocateTBE;
542 l_popMemQueue;
543 }
544
545
546 transition(I, DMA_READ, ID) {
547 //dr_sendDMAData;
548 r_allocateTbeForDmaRead;
549 qf_queueMemoryFetchRequestDMA;
550 p_popIncomingDMARequestQueue;
551 }
552
553 transition(ID, Memory_Data, I) {
554 dr_sendDMAData;
555 //p_popIncomingDMARequestQueue;
556 w_deallocateTBE;
557 l_popMemQueue;
558 }
559
560
561
562 transition(I, DMA_WRITE, ID_W) {
563 v_allocateTBE;
564 qw_queueMemoryWBRequest_partial;
565 p_popIncomingDMARequestQueue;
566 }
567
568 transition(ID_W, Memory_Ack, I) {
569 da_sendDMAAck;
570 w_deallocateTBE;
571 l_popMemQueue;
572 }
573
574 transition(M, DMA_READ, M_DRD) {
575 v_allocateTBE;
576 inv_sendCacheInvalidate;
577 p_popIncomingDMARequestQueue;
578 }
579
580 transition(M_DRD, PUTX, M_DRDI) {
581 drp_sendDMAData;
582 c_clearOwner;
583 l_queueMemoryWBRequest;
584 i_popIncomingRequestQueue;
585 }
586
587 transition(M_DRDI, Memory_Ack, I) {
588 l_sendWriteBackAck;
589 w_deallocateTBE;
590 l_popMemQueue;
591 }
592
593
594 transition(M, DMA_WRITE, M_DWR) {
595 v_allocateTBE;
596 inv_sendCacheInvalidate;
597 p_popIncomingDMARequestQueue;
598 }
599
600 transition(M_DWR, PUTX, M_DWRI) {
601 qw_queueMemoryWBRequest_partialTBE;
602 c_clearOwner;
603 i_popIncomingRequestQueue;
604 }
605
606 transition(M_DWRI, Memory_Ack, I) {
607 l_sendWriteBackAck;
608 da_sendDMAAck;
609 w_deallocateTBE;
610 l_popMemQueue;
611 }
612
613 transition(M, GETX, M) {
614 f_forwardRequest;
615 e_ownerIsRequestor;
616 i_popIncomingRequestQueue;
617 }
618
619 transition(M, PUTX, MI) {
620 c_clearOwner;
621 v_allocateTBEFromRequestNet;
622 l_queueMemoryWBRequest;
623 i_popIncomingRequestQueue;
624 }
625
626 transition(MI, Memory_Ack, I) {
627 l_sendWriteBackAck;
628 w_deallocateTBE;
629 l_popMemQueue;
630 }
631
632 transition(M, PUTX_NotOwner, M) {
633 b_sendWriteBackNack;
634 i_popIncomingRequestQueue;
635 }
636
637 transition(I, PUTX_NotOwner, I) {
638 b_sendWriteBackNack;
639 i_popIncomingRequestQueue;
640 }
641 }