x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MESI_CMP_directory-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
32 */
33
34 // This file is copied from Yasuko Watanabe's prefetch / memory protocol
35 // Copied here by aep 12/14/07
36
37
38 machine(Directory, "MESI_CMP_filter_directory protocol")
39 : DirectoryMemory * directory,
40 MemoryControl * memBuffer,
41 Cycles to_mem_ctrl_latency = 1,
42 Cycles directory_latency = 6
43 {
44
45 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type="request";
46 MessageBuffer responseToDir, network="From", virtual_network="1", ordered="false", vnet_type="response";
47
48 MessageBuffer requestFromDir, network="To", virtual_network="0", ordered="false", vnet_type="request";
49 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false", vnet_type="response";
50
51 // STATES
52 state_declaration(State, desc="Directory states", default="Directory_State_I") {
53 // Base states
54 I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
55 ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
56 ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
57
58 M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
59 IM, AccessPermission:Busy, desc="Intermediate State I>M";
60 MI, AccessPermission:Busy, desc="Intermediate State M>I";
61 M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
62 M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
63 M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
64 M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
65 }
66
67 // Events
68 enumeration(Event, desc="Directory events") {
69 Fetch, desc="A memory fetch arrives";
70 Data, desc="writeback data arrives";
71 Memory_Data, desc="Fetched data from memory arrives";
72 Memory_Ack, desc="Writeback Ack from memory arrives";
73 //added by SS for dma
74 DMA_READ, desc="A DMA Read memory request";
75 DMA_WRITE, desc="A DMA Write memory request";
76 CleanReplacement, desc="Clean Replacement in L2 cache";
77
78 }
79
80 // TYPES
81
82 // DirectoryEntry
83 structure(Entry, desc="...", interface="AbstractEntry") {
84 State DirectoryState, desc="Directory state";
85 DataBlock DataBlk, desc="data for the block";
86 NetDest Sharers, desc="Sharers for this block";
87 NetDest Owner, desc="Owner of this block";
88 }
89
90 // TBE entries for DMA requests
91 structure(TBE, desc="TBE entries for outstanding DMA requests") {
92 Address PhysicalAddress, desc="physical address";
93 State TBEState, desc="Transient State";
94 DataBlock DataBlk, desc="Data to be written (DMA write only)";
95 int Len, desc="...";
96 }
97
98 structure(TBETable, external="yes") {
99 TBE lookup(Address);
100 void allocate(Address);
101 void deallocate(Address);
102 bool isPresent(Address);
103 }
104
105
106 // ** OBJECTS **
107
108 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
109
110 void set_tbe(TBE tbe);
111 void unset_tbe();
112 void wakeUpBuffers(Address a);
113
114 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
115 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
116
117 if (is_valid(dir_entry)) {
118 return dir_entry;
119 }
120
121 dir_entry := static_cast(Entry, "pointer",
122 directory.allocate(addr, new Entry));
123 return dir_entry;
124 }
125
126 State getState(TBE tbe, Address addr) {
127 if (is_valid(tbe)) {
128 return tbe.TBEState;
129 } else if (directory.isPresent(addr)) {
130 return getDirectoryEntry(addr).DirectoryState;
131 } else {
132 return State:I;
133 }
134 }
135
136 void setState(TBE tbe, Address addr, State state) {
137
138 if (is_valid(tbe)) {
139 tbe.TBEState := state;
140 }
141
142 if (directory.isPresent(addr)) {
143
144 if (state == State:I) {
145 assert(getDirectoryEntry(addr).Owner.count() == 0);
146 assert(getDirectoryEntry(addr).Sharers.count() == 0);
147 } else if (state == State:M) {
148 assert(getDirectoryEntry(addr).Owner.count() == 1);
149 assert(getDirectoryEntry(addr).Sharers.count() == 0);
150 }
151
152 getDirectoryEntry(addr).DirectoryState := state;
153 }
154 }
155
156 AccessPermission getAccessPermission(Address addr) {
157 TBE tbe := TBEs[addr];
158 if(is_valid(tbe)) {
159 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
160 return Directory_State_to_permission(tbe.TBEState);
161 }
162
163 if(directory.isPresent(addr)) {
164 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
165 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
166 }
167
168 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
169 return AccessPermission:NotPresent;
170 }
171
172 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
173 TBE tbe := TBEs[addr];
174 if(is_valid(tbe)) {
175 return tbe.DataBlk;
176 }
177
178 return getDirectoryEntry(addr).DataBlk;
179 }
180
181 void setAccessPermission(Address addr, State state) {
182 if (directory.isPresent(addr)) {
183 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
184 }
185 }
186
187 bool isGETRequest(CoherenceRequestType type) {
188 return (type == CoherenceRequestType:GETS) ||
189 (type == CoherenceRequestType:GET_INSTR) ||
190 (type == CoherenceRequestType:GETX);
191 }
192
193
194 // ** OUT_PORTS **
195 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
196 out_port(memQueue_out, MemoryMsg, memBuffer);
197
198 // ** IN_PORTS **
199
200 in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
201 if (requestNetwork_in.isReady()) {
202 peek(requestNetwork_in, RequestMsg) {
203 assert(in_msg.Destination.isElement(machineID));
204 if (isGETRequest(in_msg.Type)) {
205 trigger(Event:Fetch, in_msg.Address, TBEs[in_msg.Address]);
206 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
207 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
208 TBEs[makeLineAddress(in_msg.Address)]);
209 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
210 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
211 TBEs[makeLineAddress(in_msg.Address)]);
212 } else {
213 DPRINTF(RubySlicc, "%s\n", in_msg);
214 error("Invalid message");
215 }
216 }
217 }
218 }
219
220 in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
221 if (responseNetwork_in.isReady()) {
222 peek(responseNetwork_in, ResponseMsg) {
223 assert(in_msg.Destination.isElement(machineID));
224 if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
225 trigger(Event:Data, in_msg.Address, TBEs[in_msg.Address]);
226 } else if (in_msg.Type == CoherenceResponseType:ACK) {
227 trigger(Event:CleanReplacement, in_msg.Address, TBEs[in_msg.Address]);
228 } else {
229 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
230 error("Invalid message");
231 }
232 }
233 }
234 }
235
236 // off-chip memory request/response is done
237 in_port(memQueue_in, MemoryMsg, memBuffer, rank = 2) {
238 if (memQueue_in.isReady()) {
239 peek(memQueue_in, MemoryMsg) {
240 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
241 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
242 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
243 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
244 } else {
245 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
246 error("Invalid message");
247 }
248 }
249 }
250 }
251
252
253 // Actions
254 action(a_sendAck, "a", desc="Send ack to L2") {
255 peek(responseNetwork_in, ResponseMsg) {
256 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
257 out_msg.Address := address;
258 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
259 out_msg.Sender := machineID;
260 out_msg.Destination.add(in_msg.Sender);
261 out_msg.MessageSize := MessageSizeType:Response_Control;
262 }
263 }
264 }
265
266 action(d_sendData, "d", desc="Send data to requestor") {
267 peek(memQueue_in, MemoryMsg) {
268 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
269 out_msg.Address := address;
270 out_msg.Type := CoherenceResponseType:MEMORY_DATA;
271 out_msg.Sender := machineID;
272 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
273 out_msg.DataBlk := in_msg.DataBlk;
274 out_msg.Dirty := false;
275 out_msg.MessageSize := MessageSizeType:Response_Data;
276 }
277 }
278 }
279
280 // Actions
281 action(aa_sendAck, "aa", desc="Send ack to L2") {
282 peek(memQueue_in, MemoryMsg) {
283 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
284 out_msg.Address := address;
285 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
286 out_msg.Sender := machineID;
287 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
288 out_msg.MessageSize := MessageSizeType:Response_Control;
289 }
290 }
291 }
292
293 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
294 requestNetwork_in.dequeue();
295 }
296
297 action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
298 responseNetwork_in.dequeue();
299 }
300
301 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
302 memQueue_in.dequeue();
303 }
304
305 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
306 wakeUpBuffers(address);
307 }
308
309 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
310 peek(requestNetwork_in, RequestMsg) {
311 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
312 out_msg.Address := address;
313 out_msg.Type := MemoryRequestType:MEMORY_READ;
314 out_msg.Sender := machineID;
315 out_msg.OriginalRequestorMachId := in_msg.Requestor;
316 out_msg.MessageSize := in_msg.MessageSize;
317 out_msg.Prefetch := in_msg.Prefetch;
318 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
319
320 DPRINTF(RubySlicc, "%s\n", out_msg);
321 }
322 }
323 }
324
325 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
326 peek(responseNetwork_in, ResponseMsg) {
327 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
328 out_msg.Address := address;
329 out_msg.Type := MemoryRequestType:MEMORY_WB;
330 out_msg.Sender := machineID;
331 out_msg.OriginalRequestorMachId := in_msg.Sender;
332 out_msg.DataBlk := in_msg.DataBlk;
333 out_msg.MessageSize := in_msg.MessageSize;
334 //out_msg.Prefetch := in_msg.Prefetch;
335
336 DPRINTF(RubySlicc, "%s\n", out_msg);
337 }
338 }
339 }
340
341 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
342 peek(responseNetwork_in, ResponseMsg) {
343 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
344 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
345 in_msg.Address, in_msg.DataBlk);
346 }
347 }
348 //added by SS for dma
349 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
350 peek(requestNetwork_in, RequestMsg) {
351 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
352 out_msg.Address := address;
353 out_msg.Type := MemoryRequestType:MEMORY_READ;
354 out_msg.Sender := machineID;
355 out_msg.OriginalRequestorMachId := machineID;
356 out_msg.MessageSize := in_msg.MessageSize;
357 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
358 DPRINTF(RubySlicc, "%s\n", out_msg);
359 }
360 }
361 }
362
363 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
364 requestNetwork_in.dequeue();
365 }
366
367 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
368 peek(memQueue_in, MemoryMsg) {
369 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
370 out_msg.Address := address;
371 out_msg.Type := CoherenceResponseType:DATA;
372 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
373 out_msg.Destination.add(map_Address_to_DMA(address));
374 out_msg.MessageSize := MessageSizeType:Response_Data;
375 }
376 }
377 }
378
379 action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
380 peek(requestNetwork_in, RequestMsg) {
381 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
382 }
383 }
384
385 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
386 peek(requestNetwork_in, RequestMsg) {
387 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
388 out_msg.Address := address;
389 out_msg.Type := MemoryRequestType:MEMORY_WB;
390 out_msg.OriginalRequestorMachId := machineID;
391 //out_msg.DataBlk := in_msg.DataBlk;
392 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
393
394
395 out_msg.MessageSize := in_msg.MessageSize;
396 //out_msg.Prefetch := in_msg.Prefetch;
397
398 DPRINTF(RubySlicc, "%s\n", out_msg);
399 }
400 }
401 }
402
403 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
404 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
405 out_msg.Address := address;
406 out_msg.Type := CoherenceResponseType:ACK;
407 out_msg.Destination.add(map_Address_to_DMA(address));
408 out_msg.MessageSize := MessageSizeType:Writeback_Control;
409 }
410 }
411
412 action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
413 stall_and_wait(requestNetwork_in, address);
414 }
415
416 action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
417 requestNetwork_in.recycle();
418 }
419
420
421 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
422 peek(requestNetwork_in, RequestMsg) {
423 getDirectoryEntry(address).Owner.clear();
424 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
425 }
426 }
427
428
429 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
430 peek(requestNetwork_in, RequestMsg) {
431 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {
432 out_msg.Address := address;
433 out_msg.Type := CoherenceResponseType:INV;
434 out_msg.Sender := machineID;
435 out_msg.Destination := getDirectoryEntry(address).Owner;
436 out_msg.MessageSize := MessageSizeType:Response_Control;
437 }
438 }
439 }
440
441
442 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
443 peek(responseNetwork_in, ResponseMsg) {
444 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
445 out_msg.Address := address;
446 out_msg.Type := CoherenceResponseType:DATA;
447 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
448 out_msg.Destination.add(map_Address_to_DMA(address));
449 out_msg.MessageSize := MessageSizeType:Response_Data;
450 }
451 }
452 }
453
454 action(c_clearOwner, "c", desc="Clear the owner field") {
455 getDirectoryEntry(address).Owner.clear();
456 }
457
458 action(v_allocateTBE, "v", desc="Allocate TBE") {
459 peek(requestNetwork_in, RequestMsg) {
460 TBEs.allocate(address);
461 set_tbe(TBEs[address]);
462 tbe.DataBlk := in_msg.DataBlk;
463 tbe.PhysicalAddress := in_msg.Address;
464 tbe.Len := in_msg.Len;
465 }
466 }
467
468 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
469 assert(is_valid(tbe));
470 //getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
471 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
472
473
474 }
475
476
477 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
478 peek(responseNetwork_in, ResponseMsg) {
479 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
480 assert(is_valid(tbe));
481 out_msg.Address := address;
482 out_msg.Type := MemoryRequestType:MEMORY_WB;
483 out_msg.OriginalRequestorMachId := in_msg.Sender;
484 //out_msg.DataBlk := in_msg.DataBlk;
485 //out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
486 out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
487
488 out_msg.MessageSize := in_msg.MessageSize;
489 //out_msg.Prefetch := in_msg.Prefetch;
490
491 DPRINTF(RubySlicc, "%s\n", out_msg);
492 }
493 }
494 }
495
496 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
497 TBEs.deallocate(address);
498 unset_tbe();
499 }
500
501
502 // TRANSITIONS
503
504
505 transition(I, Fetch, IM) {
506 qf_queueMemoryFetchRequest;
507 e_ownerIsRequestor;
508 j_popIncomingRequestQueue;
509 }
510
511 transition(IM, Memory_Data, M) {
512 d_sendData;
513 l_popMemQueue;
514 kd_wakeUpDependents;
515 }
516 //added by SS
517 transition(M, CleanReplacement, I) {
518 c_clearOwner;
519 a_sendAck;
520 k_popIncomingResponseQueue;
521 }
522
523 transition(M, Data, MI) {
524 m_writeDataToMemory;
525 qw_queueMemoryWBRequest;
526 k_popIncomingResponseQueue;
527 }
528
529 transition(MI, Memory_Ack, I) {
530 c_clearOwner;
531 aa_sendAck;
532 l_popMemQueue;
533 kd_wakeUpDependents;
534 }
535
536
537 //added by SS for dma support
538 transition(I, DMA_READ, ID) {
539 qf_queueMemoryFetchRequestDMA;
540 j_popIncomingRequestQueue;
541 }
542
543 transition(ID, Memory_Data, I) {
544 dr_sendDMAData;
545 l_popMemQueue;
546 kd_wakeUpDependents;
547 }
548
549 transition(I, DMA_WRITE, ID_W) {
550 dw_writeDMAData;
551 qw_queueMemoryWBRequest_partial;
552 j_popIncomingRequestQueue;
553 }
554
555 transition(ID_W, Memory_Ack, I) {
556 da_sendDMAAck;
557 l_popMemQueue;
558 kd_wakeUpDependents;
559 }
560
561 transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
562 z_stallAndWaitRequest;
563 }
564
565 transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
566 zz_recycleDMAQueue;
567 }
568
569
570 transition(M, DMA_READ, M_DRD) {
571 inv_sendCacheInvalidate;
572 j_popIncomingRequestQueue;
573 }
574
575 transition(M_DRD, Data, M_DRDI) {
576 drp_sendDMAData;
577 m_writeDataToMemory;
578 qw_queueMemoryWBRequest;
579 k_popIncomingResponseQueue;
580 }
581
582 transition(M_DRDI, Memory_Ack, I) {
583 aa_sendAck;
584 c_clearOwner;
585 l_popMemQueue;
586 kd_wakeUpDependents;
587 }
588
589 transition(M, DMA_WRITE, M_DWR) {
590 v_allocateTBE;
591 inv_sendCacheInvalidate;
592 j_popIncomingRequestQueue;
593 }
594
595 transition(M_DWR, Data, M_DWRI) {
596 m_writeDataToMemory;
597 qw_queueMemoryWBRequest_partialTBE;
598 k_popIncomingResponseQueue;
599 }
600
601 transition(M_DWRI, Memory_Ack, I) {
602 dwt_writeDMADataFromTBE;
603 aa_sendAck;
604 c_clearOwner;
605 da_sendDMAAck;
606 w_deallocateTBE;
607 l_popMemQueue;
608 kd_wakeUpDependents;
609 }
610
611 }