ruby: message buffers: significant changes
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(Directory, "Directory protocol")
31 : DirectoryMemory * directory;
32 MemoryControl * memBuffer;
33 Cycles directory_latency := 12;
34
35 MessageBuffer * forwardFromDir, network="To", virtual_network="3",
36 ordered="false", vnet_type="forward";
37 MessageBuffer * responseFromDir, network="To", virtual_network="4",
38 ordered="false", vnet_type="response";
39 MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
40 ordered="true", vnet_type="response";
41
42 MessageBuffer * requestToDir, network="From", virtual_network="2",
43 ordered="true", vnet_type="request";
44 MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
45 ordered="true", vnet_type="request";
46 {
47 // STATES
48 state_declaration(State, desc="Directory states", default="Directory_State_I") {
49 // Base states
50 I, AccessPermission:Read_Write, desc="Invalid";
51 M, AccessPermission:Invalid, desc="Modified";
52
53 M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
54 M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
55
56 M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
57 M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
58
59 IM, AccessPermission:Busy, desc="Intermediate state I-->M";
60 MI, AccessPermission:Busy, desc="Intermediate state M-->I";
61 ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
62 ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
63 }
64
65 // Events
66 enumeration(Event, desc="Directory events") {
67 // processor requests
68 GETX, desc="A GETX arrives";
69 GETS, desc="A GETS arrives";
70 PUTX, desc="A PUTX arrives";
71 PUTX_NotOwner, desc="A PUTX arrives";
72
73 // DMA requests
74 DMA_READ, desc="A DMA Read memory request";
75 DMA_WRITE, desc="A DMA Write memory request";
76
77 // Memory Controller
78 Memory_Data, desc="Fetched data from memory arrives";
79 Memory_Ack, desc="Writeback Ack from memory arrives";
80 }
81
82 // TYPES
83
84 // DirectoryEntry
85 structure(Entry, desc="...", interface="AbstractEntry") {
86 State DirectoryState, desc="Directory state";
87 DataBlock DataBlk, desc="data for the block";
88 NetDest Sharers, desc="Sharers for this block";
89 NetDest Owner, desc="Owner of this block";
90 }
91
92 // TBE entries for DMA requests
93 structure(TBE, desc="TBE entries for outstanding DMA requests") {
94 Address PhysicalAddress, desc="physical address";
95 State TBEState, desc="Transient State";
96 DataBlock DataBlk, desc="Data to be written (DMA write only)";
97 int Len, desc="...";
98 MachineID DmaRequestor, desc="DMA requestor";
99 }
100
101 structure(TBETable, external="yes") {
102 TBE lookup(Address);
103 void allocate(Address);
104 void deallocate(Address);
105 bool isPresent(Address);
106 }
107
108 // ** OBJECTS **
109 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
110
111 void set_tbe(TBE b);
112 void unset_tbe();
113
114 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
115 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
116
117 if (is_valid(dir_entry)) {
118 return dir_entry;
119 }
120
121 dir_entry := static_cast(Entry, "pointer",
122 directory.allocate(addr, new Entry));
123 return dir_entry;
124 }
125
126 State getState(TBE tbe, Address addr) {
127 if (is_valid(tbe)) {
128 return tbe.TBEState;
129 } else if (directory.isPresent(addr)) {
130 return getDirectoryEntry(addr).DirectoryState;
131 } else {
132 return State:I;
133 }
134 }
135
136 void setState(TBE tbe, Address addr, State state) {
137
138 if (is_valid(tbe)) {
139 tbe.TBEState := state;
140 }
141
142 if (directory.isPresent(addr)) {
143
144 if (state == State:M) {
145 assert(getDirectoryEntry(addr).Owner.count() == 1);
146 assert(getDirectoryEntry(addr).Sharers.count() == 0);
147 }
148
149 getDirectoryEntry(addr).DirectoryState := state;
150
151 if (state == State:I) {
152 assert(getDirectoryEntry(addr).Owner.count() == 0);
153 assert(getDirectoryEntry(addr).Sharers.count() == 0);
154 directory.invalidateBlock(addr);
155 }
156 }
157 }
158
159 AccessPermission getAccessPermission(Address addr) {
160 TBE tbe := TBEs[addr];
161 if(is_valid(tbe)) {
162 return Directory_State_to_permission(tbe.TBEState);
163 }
164
165 if(directory.isPresent(addr)) {
166 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
167 }
168
169 return AccessPermission:NotPresent;
170 }
171
172 void setAccessPermission(Address addr, State state) {
173 if (directory.isPresent(addr)) {
174 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
175 }
176 }
177
178 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
179 TBE tbe := TBEs[addr];
180 if(is_valid(tbe)) {
181 return tbe.DataBlk;
182 }
183
184 return getDirectoryEntry(addr).DataBlk;
185 }
186
187 // ** OUT_PORTS **
188 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
189 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
190 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
191 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
192
193 //added by SS
194 out_port(memQueue_out, MemoryMsg, memBuffer);
195 // ** IN_PORTS **
196
197 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
198 if (dmaRequestQueue_in.isReady()) {
199 peek(dmaRequestQueue_in, DMARequestMsg) {
200 TBE tbe := TBEs[in_msg.LineAddress];
201 if (in_msg.Type == DMARequestType:READ) {
202 trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
203 } else if (in_msg.Type == DMARequestType:WRITE) {
204 trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
205 } else {
206 error("Invalid message");
207 }
208 }
209 }
210 }
211
212 in_port(requestQueue_in, RequestMsg, requestToDir) {
213 if (requestQueue_in.isReady()) {
214 peek(requestQueue_in, RequestMsg) {
215 TBE tbe := TBEs[in_msg.Addr];
216 if (in_msg.Type == CoherenceRequestType:GETS) {
217 trigger(Event:GETS, in_msg.Addr, tbe);
218 } else if (in_msg.Type == CoherenceRequestType:GETX) {
219 trigger(Event:GETX, in_msg.Addr, tbe);
220 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
221 if (getDirectoryEntry(in_msg.Addr).Owner.isElement(in_msg.Requestor)) {
222 trigger(Event:PUTX, in_msg.Addr, tbe);
223 } else {
224 trigger(Event:PUTX_NotOwner, in_msg.Addr, tbe);
225 }
226 } else {
227 error("Invalid message");
228 }
229 }
230 }
231 }
232
233 //added by SS
234 // off-chip memory request/response is done
235 in_port(memQueue_in, MemoryMsg, memBuffer) {
236 if (memQueue_in.isReady()) {
237 peek(memQueue_in, MemoryMsg) {
238 TBE tbe := TBEs[in_msg.Addr];
239 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
240 trigger(Event:Memory_Data, in_msg.Addr, tbe);
241 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
242 trigger(Event:Memory_Ack, in_msg.Addr, tbe);
243 } else {
244 DPRINTF(RubySlicc,"%s\n", in_msg.Type);
245 error("Invalid message");
246 }
247 }
248 }
249 }
250
251 // Actions
252
253 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
254 peek(requestQueue_in, RequestMsg) {
255 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
256 out_msg.Addr := address;
257 out_msg.Type := CoherenceRequestType:WB_ACK;
258 out_msg.Requestor := in_msg.Requestor;
259 out_msg.Destination.add(in_msg.Requestor);
260 out_msg.MessageSize := MessageSizeType:Writeback_Control;
261 }
262 }
263 }
264
265 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
266 peek(memQueue_in, MemoryMsg) {
267 enqueue(forwardNetwork_out, RequestMsg, 1) {
268 out_msg.Addr := address;
269 out_msg.Type := CoherenceRequestType:WB_ACK;
270 out_msg.Requestor := in_msg.OriginalRequestorMachId;
271 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
272 out_msg.MessageSize := MessageSizeType:Writeback_Control;
273 }
274 }
275 }
276
277 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
278 peek(requestQueue_in, RequestMsg) {
279 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
280 out_msg.Addr := address;
281 out_msg.Type := CoherenceRequestType:WB_NACK;
282 out_msg.Requestor := in_msg.Requestor;
283 out_msg.Destination.add(in_msg.Requestor);
284 out_msg.MessageSize := MessageSizeType:Writeback_Control;
285 }
286 }
287 }
288
289 action(c_clearOwner, "c", desc="Clear the owner field") {
290 getDirectoryEntry(address).Owner.clear();
291 }
292
293 action(d_sendData, "d", desc="Send data to requestor") {
294 peek(memQueue_in, MemoryMsg) {
295 enqueue(responseNetwork_out, ResponseMsg, 1) {
296 out_msg.Addr := address;
297 out_msg.Type := CoherenceResponseType:DATA;
298 out_msg.Sender := machineID;
299 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
300 out_msg.DataBlk := in_msg.DataBlk;
301 out_msg.MessageSize := MessageSizeType:Response_Data;
302 }
303 }
304 }
305
306 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
307 peek(memQueue_in, MemoryMsg) {
308 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
309 assert(is_valid(tbe));
310 out_msg.PhysicalAddress := address;
311 out_msg.LineAddress := address;
312 out_msg.Type := DMAResponseType:DATA;
313 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
314 out_msg.Destination.add(tbe.DmaRequestor);
315 out_msg.MessageSize := MessageSizeType:Response_Data;
316 }
317 }
318 }
319
320
321
322 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
323 peek(requestQueue_in, RequestMsg) {
324 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
325 assert(is_valid(tbe));
326 out_msg.PhysicalAddress := address;
327 out_msg.LineAddress := address;
328 out_msg.Type := DMAResponseType:DATA;
329 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
330 out_msg.Destination.add(tbe.DmaRequestor);
331 out_msg.MessageSize := MessageSizeType:Response_Data;
332 }
333 }
334 }
335
336 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
337 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
338 assert(is_valid(tbe));
339 out_msg.PhysicalAddress := address;
340 out_msg.LineAddress := address;
341 out_msg.Type := DMAResponseType:ACK;
342 out_msg.Destination.add(tbe.DmaRequestor);
343 out_msg.MessageSize := MessageSizeType:Writeback_Control;
344 }
345 }
346
347 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
348 peek(requestQueue_in, RequestMsg) {
349 getDirectoryEntry(address).Owner.clear();
350 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
351 }
352 }
353
354 action(f_forwardRequest, "f", desc="Forward request to owner") {
355 peek(requestQueue_in, RequestMsg) {
356 APPEND_TRANSITION_COMMENT("Own: ");
357 APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Addr).Owner);
358 APPEND_TRANSITION_COMMENT("Req: ");
359 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
360 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
361 out_msg.Addr := address;
362 out_msg.Type := in_msg.Type;
363 out_msg.Requestor := in_msg.Requestor;
364 out_msg.Destination := getDirectoryEntry(in_msg.Addr).Owner;
365 out_msg.MessageSize := MessageSizeType:Writeback_Control;
366 }
367 }
368 }
369
370 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
371 peek(dmaRequestQueue_in, DMARequestMsg) {
372 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
373 out_msg.Addr := address;
374 out_msg.Type := CoherenceRequestType:INV;
375 out_msg.Requestor := machineID;
376 out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
377 out_msg.MessageSize := MessageSizeType:Writeback_Control;
378 }
379 }
380 }
381
382 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
383 requestQueue_in.dequeue();
384 }
385
386 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
387 dmaRequestQueue_in.dequeue();
388 }
389
390 action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
391 peek(requestQueue_in, RequestMsg) {
392 // assert(in_msg.Dirty);
393 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
394 getDirectoryEntry(in_msg.Addr).DataBlk := in_msg.DataBlk;
395 //getDirectoryEntry(in_msg.Addr).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Addr), in_msg.Len);
396 }
397 }
398
399 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
400 assert(is_valid(tbe));
401 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
402 }
403
404 action(v_allocateTBE, "v", desc="Allocate TBE") {
405 peek(dmaRequestQueue_in, DMARequestMsg) {
406 TBEs.allocate(address);
407 set_tbe(TBEs[address]);
408 tbe.DataBlk := in_msg.DataBlk;
409 tbe.PhysicalAddress := in_msg.PhysicalAddress;
410 tbe.Len := in_msg.Len;
411 tbe.DmaRequestor := in_msg.Requestor;
412 }
413 }
414
415 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
416 peek(dmaRequestQueue_in, DMARequestMsg) {
417 TBEs.allocate(address);
418 set_tbe(TBEs[address]);
419 tbe.DmaRequestor := in_msg.Requestor;
420 }
421 }
422
423 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
424 peek(requestQueue_in, RequestMsg) {
425 TBEs.allocate(address);
426 set_tbe(TBEs[address]);
427 tbe.DataBlk := in_msg.DataBlk;
428 }
429 }
430
431 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
432 TBEs.deallocate(address);
433 unset_tbe();
434 }
435
436 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
437 requestQueue_in.recycle();
438 }
439
440 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
441 dmaRequestQueue_in.recycle();
442 }
443
444
445 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
446 peek(requestQueue_in, RequestMsg) {
447 enqueue(memQueue_out, MemoryMsg, 1) {
448 out_msg.Addr := address;
449 out_msg.Type := MemoryRequestType:MEMORY_READ;
450 out_msg.Sender := machineID;
451 out_msg.OriginalRequestorMachId := in_msg.Requestor;
452 out_msg.MessageSize := in_msg.MessageSize;
453 out_msg.DataBlk := getDirectoryEntry(in_msg.Addr).DataBlk;
454 DPRINTF(RubySlicc,"%s\n", out_msg);
455 }
456 }
457 }
458
459 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
460 peek(dmaRequestQueue_in, DMARequestMsg) {
461 enqueue(memQueue_out, MemoryMsg, 1) {
462 out_msg.Addr := address;
463 out_msg.Type := MemoryRequestType:MEMORY_READ;
464 out_msg.Sender := machineID;
465 //out_msg.OriginalRequestorMachId := machineID;
466 out_msg.MessageSize := in_msg.MessageSize;
467 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
468 DPRINTF(RubySlicc,"%s\n", out_msg);
469 }
470 }
471 }
472
473 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
474 peek(dmaRequestQueue_in, DMARequestMsg) {
475 enqueue(memQueue_out, MemoryMsg, 1) {
476 out_msg.Addr := address;
477 out_msg.Type := MemoryRequestType:MEMORY_WB;
478 //out_msg.OriginalRequestorMachId := machineID;
479 //out_msg.DataBlk := in_msg.DataBlk;
480 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
481 out_msg.MessageSize := in_msg.MessageSize;
482 //out_msg.Prefetch := in_msg.Prefetch;
483
484 DPRINTF(RubySlicc,"%s\n", out_msg);
485 }
486 }
487 }
488
489 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
490 peek(requestQueue_in, RequestMsg) {
491 enqueue(memQueue_out, MemoryMsg, 1) {
492 assert(is_valid(tbe));
493 out_msg.Addr := address;
494 out_msg.Type := MemoryRequestType:MEMORY_WB;
495 out_msg.OriginalRequestorMachId := in_msg.Requestor;
496 // get incoming data
497 // out_msg.DataBlk := in_msg.DataBlk;
498 out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
499 out_msg.MessageSize := in_msg.MessageSize;
500 //out_msg.Prefetch := in_msg.Prefetch;
501
502 DPRINTF(RubySlicc,"%s\n", out_msg);
503 }
504 }
505 }
506
507
508
509 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
510 peek(requestQueue_in, RequestMsg) {
511 enqueue(memQueue_out, MemoryMsg, 1) {
512 out_msg.Addr := address;
513 out_msg.Type := MemoryRequestType:MEMORY_WB;
514 out_msg.Sender := machineID;
515 out_msg.OriginalRequestorMachId := in_msg.Requestor;
516 out_msg.DataBlk := in_msg.DataBlk;
517 out_msg.MessageSize := in_msg.MessageSize;
518
519 DPRINTF(RubySlicc,"%s\n", out_msg);
520 }
521 }
522 }
523
524 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
525 memQueue_in.dequeue();
526 }
527
528 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
529 assert(is_valid(tbe));
530 getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
531 }
532
533 // TRANSITIONS
534
535 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
536 z_recycleRequestQueue;
537 }
538
539 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
540 z_recycleRequestQueue;
541 }
542
543 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
544 y_recycleDMARequestQueue;
545 }
546
547
548 transition(I, GETX, IM) {
549 //d_sendData;
550 qf_queueMemoryFetchRequest;
551 e_ownerIsRequestor;
552 i_popIncomingRequestQueue;
553 }
554
555 transition(IM, Memory_Data, M) {
556 d_sendData;
557 //e_ownerIsRequestor;
558 l_popMemQueue;
559 }
560
561
562 transition(I, DMA_READ, ID) {
563 //dr_sendDMAData;
564 r_allocateTbeForDmaRead;
565 qf_queueMemoryFetchRequestDMA;
566 p_popIncomingDMARequestQueue;
567 }
568
569 transition(ID, Memory_Data, I) {
570 dr_sendDMAData;
571 //p_popIncomingDMARequestQueue;
572 w_deallocateTBE;
573 l_popMemQueue;
574 }
575
576
577
578 transition(I, DMA_WRITE, ID_W) {
579 v_allocateTBE;
580 qw_queueMemoryWBRequest_partial;
581 p_popIncomingDMARequestQueue;
582 }
583
584 transition(ID_W, Memory_Ack, I) {
585 dwt_writeDMADataFromTBE;
586 da_sendDMAAck;
587 w_deallocateTBE;
588 l_popMemQueue;
589 }
590
591 transition(M, DMA_READ, M_DRD) {
592 v_allocateTBE;
593 inv_sendCacheInvalidate;
594 p_popIncomingDMARequestQueue;
595 }
596
597 transition(M_DRD, PUTX, M_DRDI) {
598 l_writeDataToMemory;
599 drp_sendDMAData;
600 c_clearOwner;
601 l_queueMemoryWBRequest;
602 i_popIncomingRequestQueue;
603 }
604
605 transition(M_DRDI, Memory_Ack, I) {
606 l_sendWriteBackAck;
607 w_deallocateTBE;
608 l_popMemQueue;
609 }
610
611
612 transition(M, DMA_WRITE, M_DWR) {
613 v_allocateTBE;
614 inv_sendCacheInvalidate;
615 p_popIncomingDMARequestQueue;
616 }
617
618 transition(M_DWR, PUTX, M_DWRI) {
619 l_writeDataToMemory;
620 qw_queueMemoryWBRequest_partialTBE;
621 c_clearOwner;
622 i_popIncomingRequestQueue;
623 }
624
625 transition(M_DWRI, Memory_Ack, I) {
626 w_writeDataToMemoryFromTBE;
627 l_sendWriteBackAck;
628 da_sendDMAAck;
629 w_deallocateTBE;
630 l_popMemQueue;
631 }
632
633 transition(M, GETX, M) {
634 f_forwardRequest;
635 e_ownerIsRequestor;
636 i_popIncomingRequestQueue;
637 }
638
639 transition(M, PUTX, MI) {
640 c_clearOwner;
641 v_allocateTBEFromRequestNet;
642 l_queueMemoryWBRequest;
643 i_popIncomingRequestQueue;
644 }
645
646 transition(MI, Memory_Ack, I) {
647 w_writeDataToMemoryFromTBE;
648 l_sendWriteBackAck;
649 w_deallocateTBE;
650 l_popMemQueue;
651 }
652
653 transition(M, PUTX_NotOwner, M) {
654 b_sendWriteBackNack;
655 i_popIncomingRequestQueue;
656 }
657
658 transition(I, PUTX_NotOwner, I) {
659 b_sendWriteBackNack;
660 i_popIncomingRequestQueue;
661 }
662
663 }