241ad3b30e253dfc2e4f380326d9ee3412305ed7
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(Directory, "Directory protocol")
31 : DirectoryMemory * directory,
32 MemoryControl * memBuffer,
33 int directory_latency = 12
34 {
35
36 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
37 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
38 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
39
40 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true", vnet_type="request";
41 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
42
43 // STATES
44 state_declaration(State, desc="Directory states", default="Directory_State_I") {
45 // Base states
46 I, AccessPermission:Read_Write, desc="Invalid";
47 M, AccessPermission:Invalid, desc="Modified";
48
49 M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
50 M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
51
52 M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
53 M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
54
55 IM, AccessPermission:Busy, desc="Intermediate state I-->M";
56 MI, AccessPermission:Busy, desc="Intermediate state M-->I";
57 ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
58 ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
59 }
60
61 // Events
62 enumeration(Event, desc="Directory events") {
63 // processor requests
64 GETX, desc="A GETX arrives";
65 GETS, desc="A GETS arrives";
66 PUTX, desc="A PUTX arrives";
67 PUTX_NotOwner, desc="A PUTX arrives";
68
69 // DMA requests
70 DMA_READ, desc="A DMA Read memory request";
71 DMA_WRITE, desc="A DMA Write memory request";
72
73 // Memory Controller
74 Memory_Data, desc="Fetched data from memory arrives";
75 Memory_Ack, desc="Writeback Ack from memory arrives";
76 }
77
78 // TYPES
79
80 // DirectoryEntry
81 structure(Entry, desc="...", interface="AbstractEntry") {
82 State DirectoryState, desc="Directory state";
83 DataBlock DataBlk, desc="data for the block";
84 NetDest Sharers, desc="Sharers for this block";
85 NetDest Owner, desc="Owner of this block";
86 }
87
88 // TBE entries for DMA requests
89 structure(TBE, desc="TBE entries for outstanding DMA requests") {
90 Address PhysicalAddress, desc="physical address";
91 State TBEState, desc="Transient State";
92 DataBlock DataBlk, desc="Data to be written (DMA write only)";
93 int Len, desc="...";
94 MachineID DmaRequestor, desc="DMA requestor";
95 }
96
97 structure(TBETable, external="yes") {
98 TBE lookup(Address);
99 void allocate(Address);
100 void deallocate(Address);
101 bool isPresent(Address);
102 }
103
104 // ** OBJECTS **
105 TBETable TBEs, template="<Directory_TBE>";
106
107 void set_tbe(TBE b);
108 void unset_tbe();
109
110 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
111 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
112
113 if (is_valid(dir_entry)) {
114 return dir_entry;
115 }
116
117 dir_entry := static_cast(Entry, "pointer",
118 directory.allocate(addr, new Entry));
119 return dir_entry;
120 }
121
122 State getState(TBE tbe, Address addr) {
123 if (is_valid(tbe)) {
124 return tbe.TBEState;
125 } else if (directory.isPresent(addr)) {
126 return getDirectoryEntry(addr).DirectoryState;
127 } else {
128 return State:I;
129 }
130 }
131
132 void setState(TBE tbe, Address addr, State state) {
133
134 if (is_valid(tbe)) {
135 tbe.TBEState := state;
136 }
137
138 if (directory.isPresent(addr)) {
139
140 if (state == State:M) {
141 assert(getDirectoryEntry(addr).Owner.count() == 1);
142 assert(getDirectoryEntry(addr).Sharers.count() == 0);
143 }
144
145 getDirectoryEntry(addr).DirectoryState := state;
146
147 if (state == State:I) {
148 assert(getDirectoryEntry(addr).Owner.count() == 0);
149 assert(getDirectoryEntry(addr).Sharers.count() == 0);
150 directory.invalidateBlock(addr);
151 }
152 }
153 }
154
155 AccessPermission getAccessPermission(Address addr) {
156 TBE tbe := TBEs[addr];
157 if(is_valid(tbe)) {
158 return Directory_State_to_permission(tbe.TBEState);
159 }
160
161 if(directory.isPresent(addr)) {
162 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
163 }
164
165 return AccessPermission:NotPresent;
166 }
167
168 void setAccessPermission(Address addr, State state) {
169 if (directory.isPresent(addr)) {
170 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
171 }
172 }
173
174 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
175 return getDirectoryEntry(addr).DataBlk;
176 }
177
178 // ** OUT_PORTS **
179 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
180 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
181 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
182 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
183
184 //added by SS
185 out_port(memQueue_out, MemoryMsg, memBuffer);
186 // ** IN_PORTS **
187
188 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
189 if (dmaRequestQueue_in.isReady()) {
190 peek(dmaRequestQueue_in, DMARequestMsg) {
191 TBE tbe := TBEs[in_msg.LineAddress];
192 if (in_msg.Type == DMARequestType:READ) {
193 trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
194 } else if (in_msg.Type == DMARequestType:WRITE) {
195 trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
196 } else {
197 error("Invalid message");
198 }
199 }
200 }
201 }
202
203 in_port(requestQueue_in, RequestMsg, requestToDir) {
204 if (requestQueue_in.isReady()) {
205 peek(requestQueue_in, RequestMsg) {
206 TBE tbe := TBEs[in_msg.Address];
207 if (in_msg.Type == CoherenceRequestType:GETS) {
208 trigger(Event:GETS, in_msg.Address, tbe);
209 } else if (in_msg.Type == CoherenceRequestType:GETX) {
210 trigger(Event:GETX, in_msg.Address, tbe);
211 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
212 if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
213 trigger(Event:PUTX, in_msg.Address, tbe);
214 } else {
215 trigger(Event:PUTX_NotOwner, in_msg.Address, tbe);
216 }
217 } else {
218 error("Invalid message");
219 }
220 }
221 }
222 }
223
224 //added by SS
225 // off-chip memory request/response is done
226 in_port(memQueue_in, MemoryMsg, memBuffer) {
227 if (memQueue_in.isReady()) {
228 peek(memQueue_in, MemoryMsg) {
229 TBE tbe := TBEs[in_msg.Address];
230 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
231 trigger(Event:Memory_Data, in_msg.Address, tbe);
232 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
233 trigger(Event:Memory_Ack, in_msg.Address, tbe);
234 } else {
235 DPRINTF(RubySlicc,"%s\n", in_msg.Type);
236 error("Invalid message");
237 }
238 }
239 }
240 }
241
242 // Actions
243
244 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
245 peek(requestQueue_in, RequestMsg) {
246 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
247 out_msg.Address := address;
248 out_msg.Type := CoherenceRequestType:WB_ACK;
249 out_msg.Requestor := in_msg.Requestor;
250 out_msg.Destination.add(in_msg.Requestor);
251 out_msg.MessageSize := MessageSizeType:Writeback_Control;
252 }
253 }
254 }
255
256 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
257 peek(memQueue_in, MemoryMsg) {
258 enqueue(forwardNetwork_out, RequestMsg, latency="1") {
259 out_msg.Address := address;
260 out_msg.Type := CoherenceRequestType:WB_ACK;
261 out_msg.Requestor := in_msg.OriginalRequestorMachId;
262 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
263 out_msg.MessageSize := MessageSizeType:Writeback_Control;
264 }
265 }
266 }
267
268 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
269 peek(requestQueue_in, RequestMsg) {
270 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
271 out_msg.Address := address;
272 out_msg.Type := CoherenceRequestType:WB_NACK;
273 out_msg.Requestor := in_msg.Requestor;
274 out_msg.Destination.add(in_msg.Requestor);
275 out_msg.MessageSize := MessageSizeType:Writeback_Control;
276 }
277 }
278 }
279
280 action(c_clearOwner, "c", desc="Clear the owner field") {
281 getDirectoryEntry(address).Owner.clear();
282 }
283
284 action(d_sendData, "d", desc="Send data to requestor") {
285 peek(memQueue_in, MemoryMsg) {
286 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
287 out_msg.Address := address;
288 out_msg.Type := CoherenceResponseType:DATA;
289 out_msg.Sender := machineID;
290 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
291 out_msg.DataBlk := in_msg.DataBlk;
292 out_msg.MessageSize := MessageSizeType:Response_Data;
293 }
294 }
295 }
296
297 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
298 peek(memQueue_in, MemoryMsg) {
299 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
300 assert(is_valid(tbe));
301 out_msg.PhysicalAddress := address;
302 out_msg.LineAddress := address;
303 out_msg.Type := DMAResponseType:DATA;
304 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
305 out_msg.Destination.add(tbe.DmaRequestor);
306 out_msg.MessageSize := MessageSizeType:Response_Data;
307 }
308 }
309 }
310
311
312
313 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
314 peek(requestQueue_in, RequestMsg) {
315 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
316 assert(is_valid(tbe));
317 out_msg.PhysicalAddress := address;
318 out_msg.LineAddress := address;
319 out_msg.Type := DMAResponseType:DATA;
320 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
321 out_msg.Destination.add(tbe.DmaRequestor);
322 out_msg.MessageSize := MessageSizeType:Response_Data;
323 }
324 }
325 }
326
327 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
328 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
329 assert(is_valid(tbe));
330 out_msg.PhysicalAddress := address;
331 out_msg.LineAddress := address;
332 out_msg.Type := DMAResponseType:ACK;
333 out_msg.Destination.add(tbe.DmaRequestor);
334 out_msg.MessageSize := MessageSizeType:Writeback_Control;
335 }
336 }
337
338 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
339 peek(requestQueue_in, RequestMsg) {
340 getDirectoryEntry(address).Owner.clear();
341 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
342 }
343 }
344
345 action(f_forwardRequest, "f", desc="Forward request to owner") {
346 peek(requestQueue_in, RequestMsg) {
347 APPEND_TRANSITION_COMMENT("Own: ");
348 APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Address).Owner);
349 APPEND_TRANSITION_COMMENT("Req: ");
350 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
351 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
352 out_msg.Address := address;
353 out_msg.Type := in_msg.Type;
354 out_msg.Requestor := in_msg.Requestor;
355 out_msg.Destination := getDirectoryEntry(in_msg.Address).Owner;
356 out_msg.MessageSize := MessageSizeType:Writeback_Control;
357 }
358 }
359 }
360
361 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
362 peek(dmaRequestQueue_in, DMARequestMsg) {
363 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
364 out_msg.Address := address;
365 out_msg.Type := CoherenceRequestType:INV;
366 out_msg.Requestor := machineID;
367 out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
368 out_msg.MessageSize := MessageSizeType:Writeback_Control;
369 }
370 }
371 }
372
373 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
374 requestQueue_in.dequeue();
375 }
376
377 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
378 dmaRequestQueue_in.dequeue();
379 }
380
381 action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
382 peek(requestQueue_in, RequestMsg) {
383 // assert(in_msg.Dirty);
384 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
385 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
386 //getDirectoryEntry(in_msg.Address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
387 }
388 }
389
390 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
391 assert(is_valid(tbe));
392 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
393 }
394
395 action(v_allocateTBE, "v", desc="Allocate TBE") {
396 peek(dmaRequestQueue_in, DMARequestMsg) {
397 TBEs.allocate(address);
398 set_tbe(TBEs[address]);
399 tbe.DataBlk := in_msg.DataBlk;
400 tbe.PhysicalAddress := in_msg.PhysicalAddress;
401 tbe.Len := in_msg.Len;
402 tbe.DmaRequestor := in_msg.Requestor;
403 }
404 }
405
406 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
407 peek(dmaRequestQueue_in, DMARequestMsg) {
408 TBEs.allocate(address);
409 set_tbe(TBEs[address]);
410 tbe.DmaRequestor := in_msg.Requestor;
411 }
412 }
413
414 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
415 peek(requestQueue_in, RequestMsg) {
416 TBEs.allocate(address);
417 set_tbe(TBEs[address]);
418 tbe.DataBlk := in_msg.DataBlk;
419 }
420 }
421
422 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
423 TBEs.deallocate(address);
424 unset_tbe();
425 }
426
427 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
428 requestQueue_in.recycle();
429 }
430
431 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
432 dmaRequestQueue_in.recycle();
433 }
434
435
436 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
437 peek(requestQueue_in, RequestMsg) {
438 enqueue(memQueue_out, MemoryMsg, latency="1") {
439 out_msg.Address := address;
440 out_msg.Type := MemoryRequestType:MEMORY_READ;
441 out_msg.Sender := machineID;
442 out_msg.OriginalRequestorMachId := in_msg.Requestor;
443 out_msg.MessageSize := in_msg.MessageSize;
444 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
445 DPRINTF(RubySlicc,"%s\n", out_msg);
446 }
447 }
448 }
449
450 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
451 peek(dmaRequestQueue_in, DMARequestMsg) {
452 enqueue(memQueue_out, MemoryMsg, latency="1") {
453 out_msg.Address := address;
454 out_msg.Type := MemoryRequestType:MEMORY_READ;
455 out_msg.Sender := machineID;
456 //out_msg.OriginalRequestorMachId := machineID;
457 out_msg.MessageSize := in_msg.MessageSize;
458 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
459 DPRINTF(RubySlicc,"%s\n", out_msg);
460 }
461 }
462 }
463
464 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
465 peek(dmaRequestQueue_in, DMARequestMsg) {
466 enqueue(memQueue_out, MemoryMsg, latency="1") {
467 out_msg.Address := address;
468 out_msg.Type := MemoryRequestType:MEMORY_WB;
469 //out_msg.OriginalRequestorMachId := machineID;
470 //out_msg.DataBlk := in_msg.DataBlk;
471 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
472 out_msg.MessageSize := in_msg.MessageSize;
473 //out_msg.Prefetch := in_msg.Prefetch;
474
475 DPRINTF(RubySlicc,"%s\n", out_msg);
476 }
477 }
478 }
479
480 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
481 peek(requestQueue_in, RequestMsg) {
482 enqueue(memQueue_out, MemoryMsg, latency="1") {
483 assert(is_valid(tbe));
484 out_msg.Address := address;
485 out_msg.Type := MemoryRequestType:MEMORY_WB;
486 out_msg.OriginalRequestorMachId := in_msg.Requestor;
487 // get incoming data
488 // out_msg.DataBlk := in_msg.DataBlk;
489 out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
490 out_msg.MessageSize := in_msg.MessageSize;
491 //out_msg.Prefetch := in_msg.Prefetch;
492
493 DPRINTF(RubySlicc,"%s\n", out_msg);
494 }
495 }
496 }
497
498
499
500 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
501 peek(requestQueue_in, RequestMsg) {
502 enqueue(memQueue_out, MemoryMsg, latency="1") {
503 out_msg.Address := address;
504 out_msg.Type := MemoryRequestType:MEMORY_WB;
505 out_msg.Sender := machineID;
506 out_msg.OriginalRequestorMachId := in_msg.Requestor;
507 out_msg.DataBlk := in_msg.DataBlk;
508 out_msg.MessageSize := in_msg.MessageSize;
509 //out_msg.Prefetch := in_msg.Prefetch;
510
511 DPRINTF(RubySlicc,"%s\n", out_msg);
512 }
513 }
514 }
515
516 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
517 memQueue_in.dequeue();
518 }
519
520 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
521 //getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
522 assert(is_valid(tbe));
523 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
524 addressOffset(tbe.PhysicalAddress),
525 tbe.Len);
526
527 }
528
529 // TRANSITIONS
530
531 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
532 z_recycleRequestQueue;
533 }
534
535 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
536 z_recycleRequestQueue;
537 }
538
539 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
540 y_recycleDMARequestQueue;
541 }
542
543
544 transition(I, GETX, IM) {
545 //d_sendData;
546 qf_queueMemoryFetchRequest;
547 e_ownerIsRequestor;
548 i_popIncomingRequestQueue;
549 }
550
551 transition(IM, Memory_Data, M) {
552 d_sendData;
553 //e_ownerIsRequestor;
554 l_popMemQueue;
555 }
556
557
558 transition(I, DMA_READ, ID) {
559 //dr_sendDMAData;
560 r_allocateTbeForDmaRead;
561 qf_queueMemoryFetchRequestDMA;
562 p_popIncomingDMARequestQueue;
563 }
564
565 transition(ID, Memory_Data, I) {
566 dr_sendDMAData;
567 //p_popIncomingDMARequestQueue;
568 w_deallocateTBE;
569 l_popMemQueue;
570 }
571
572
573
574 transition(I, DMA_WRITE, ID_W) {
575 v_allocateTBE;
576 qw_queueMemoryWBRequest_partial;
577 p_popIncomingDMARequestQueue;
578 }
579
580 transition(ID_W, Memory_Ack, I) {
581 dwt_writeDMADataFromTBE;
582 da_sendDMAAck;
583 w_deallocateTBE;
584 l_popMemQueue;
585 }
586
587 transition(M, DMA_READ, M_DRD) {
588 v_allocateTBE;
589 inv_sendCacheInvalidate;
590 p_popIncomingDMARequestQueue;
591 }
592
593 transition(M_DRD, PUTX, M_DRDI) {
594 l_writeDataToMemory;
595 drp_sendDMAData;
596 c_clearOwner;
597 l_queueMemoryWBRequest;
598 i_popIncomingRequestQueue;
599 }
600
601 transition(M_DRDI, Memory_Ack, I) {
602 l_sendWriteBackAck;
603 w_deallocateTBE;
604 l_popMemQueue;
605 }
606
607
608 transition(M, DMA_WRITE, M_DWR) {
609 v_allocateTBE;
610 inv_sendCacheInvalidate;
611 p_popIncomingDMARequestQueue;
612 }
613
614 transition(M_DWR, PUTX, M_DWRI) {
615 l_writeDataToMemory;
616 qw_queueMemoryWBRequest_partialTBE;
617 c_clearOwner;
618 i_popIncomingRequestQueue;
619 }
620
621 transition(M_DWRI, Memory_Ack, I) {
622 w_writeDataToMemoryFromTBE;
623 l_sendWriteBackAck;
624 da_sendDMAAck;
625 w_deallocateTBE;
626 l_popMemQueue;
627 }
628
629 transition(M, GETX, M) {
630 f_forwardRequest;
631 e_ownerIsRequestor;
632 i_popIncomingRequestQueue;
633 }
634
635 transition(M, PUTX, MI) {
636 l_writeDataToMemory;
637 c_clearOwner;
638 v_allocateTBEFromRequestNet;
639 l_queueMemoryWBRequest;
640 i_popIncomingRequestQueue;
641 }
642
643 transition(MI, Memory_Ack, I) {
644 w_writeDataToMemoryFromTBE;
645 l_sendWriteBackAck;
646 w_deallocateTBE;
647 l_popMemQueue;
648 }
649
650 transition(M, PUTX_NotOwner, M) {
651 b_sendWriteBackNack;
652 i_popIncomingRequestQueue;
653 }
654
655 transition(I, PUTX_NotOwner, I) {
656 b_sendWriteBackNack;
657 i_popIncomingRequestQueue;
658 }
659
660 }