baffe24120a060d02030af86ce6dff3b83bdebfb
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1
2 machine(Directory, "Directory protocol")
3 : DirectoryMemory * directory,
4 MemoryControl * memBuffer,
5 int directory_latency = 12
6 {
7
8 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
9 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
10 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
11
12 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true", vnet_type="request";
13 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
14
15 // STATES
16 state_declaration(State, desc="Directory states", default="Directory_State_I") {
17 // Base states
18 I, AccessPermission:Read_Write, desc="Invalid";
19 M, AccessPermission:Invalid, desc="Modified";
20
21 M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
22 M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
23
24 M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
25 M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
26
27 IM, AccessPermission:Busy, desc="Intermediate state I-->M";
28 MI, AccessPermission:Busy, desc="Intermediate state M-->I";
29 ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
30 ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
31 }
32
33 // Events
34 enumeration(Event, desc="Directory events") {
35 // processor requests
36 GETX, desc="A GETX arrives";
37 GETS, desc="A GETS arrives";
38 PUTX, desc="A PUTX arrives";
39 PUTX_NotOwner, desc="A PUTX arrives";
40
41 // DMA requests
42 DMA_READ, desc="A DMA Read memory request";
43 DMA_WRITE, desc="A DMA Write memory request";
44
45 // Memory Controller
46 Memory_Data, desc="Fetched data from memory arrives";
47 Memory_Ack, desc="Writeback Ack from memory arrives";
48 }
49
50 // TYPES
51
52 // DirectoryEntry
53 structure(Entry, desc="...", interface="AbstractEntry") {
54 State DirectoryState, desc="Directory state";
55 DataBlock DataBlk, desc="data for the block";
56 NetDest Sharers, desc="Sharers for this block";
57 NetDest Owner, desc="Owner of this block";
58 }
59
60 // TBE entries for DMA requests
61 structure(TBE, desc="TBE entries for outstanding DMA requests") {
62 Address PhysicalAddress, desc="physical address";
63 State TBEState, desc="Transient State";
64 DataBlock DataBlk, desc="Data to be written (DMA write only)";
65 int Len, desc="...";
66 MachineID DmaRequestor, desc="DMA requestor";
67 }
68
69 structure(TBETable, external="yes") {
70 TBE lookup(Address);
71 void allocate(Address);
72 void deallocate(Address);
73 bool isPresent(Address);
74 }
75
76 // ** OBJECTS **
77 TBETable TBEs, template_hack="<Directory_TBE>";
78
79 void set_tbe(TBE b);
80 void unset_tbe();
81
82 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
83 return static_cast(Entry, directory[addr]);
84 }
85
86 State getState(TBE tbe, Address addr) {
87 if (is_valid(tbe)) {
88 return tbe.TBEState;
89 } else if (directory.isPresent(addr)) {
90 return getDirectoryEntry(addr).DirectoryState;
91 } else {
92 return State:I;
93 }
94 }
95
96 void setState(TBE tbe, Address addr, State state) {
97
98 if (is_valid(tbe)) {
99 tbe.TBEState := state;
100 }
101
102 if (directory.isPresent(addr)) {
103
104 if (state == State:M) {
105 assert(getDirectoryEntry(addr).Owner.count() == 1);
106 assert(getDirectoryEntry(addr).Sharers.count() == 0);
107 }
108
109 getDirectoryEntry(addr).DirectoryState := state;
110
111 if (state == State:I) {
112 assert(getDirectoryEntry(addr).Owner.count() == 0);
113 assert(getDirectoryEntry(addr).Sharers.count() == 0);
114 directory.invalidateBlock(addr);
115 }
116 }
117 }
118
119 AccessPermission getAccessPermission(Address addr) {
120 TBE tbe := TBEs[addr];
121 if(is_valid(tbe)) {
122 return Directory_State_to_permission(tbe.TBEState);
123 }
124
125 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
126 }
127
128 void setAccessPermission(Address addr, State state) {
129 if (directory.isPresent(addr)) {
130 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
131 }
132 }
133
134 // ** OUT_PORTS **
135 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
136 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
137 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
138 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
139
140 //added by SS
141 out_port(memQueue_out, MemoryMsg, memBuffer);
142 // ** IN_PORTS **
143
144 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
145 if (dmaRequestQueue_in.isReady()) {
146 peek(dmaRequestQueue_in, DMARequestMsg) {
147 TBE tbe := TBEs[in_msg.LineAddress];
148 if (in_msg.Type == DMARequestType:READ) {
149 trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
150 } else if (in_msg.Type == DMARequestType:WRITE) {
151 trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
152 } else {
153 error("Invalid message");
154 }
155 }
156 }
157 }
158
159 in_port(requestQueue_in, RequestMsg, requestToDir) {
160 if (requestQueue_in.isReady()) {
161 peek(requestQueue_in, RequestMsg) {
162 TBE tbe := TBEs[in_msg.Address];
163 if (in_msg.Type == CoherenceRequestType:GETS) {
164 trigger(Event:GETS, in_msg.Address, tbe);
165 } else if (in_msg.Type == CoherenceRequestType:GETX) {
166 trigger(Event:GETX, in_msg.Address, tbe);
167 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
168 if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
169 trigger(Event:PUTX, in_msg.Address, tbe);
170 } else {
171 trigger(Event:PUTX_NotOwner, in_msg.Address, tbe);
172 }
173 } else {
174 error("Invalid message");
175 }
176 }
177 }
178 }
179
180 //added by SS
181 // off-chip memory request/response is done
182 in_port(memQueue_in, MemoryMsg, memBuffer) {
183 if (memQueue_in.isReady()) {
184 peek(memQueue_in, MemoryMsg) {
185 TBE tbe := TBEs[in_msg.Address];
186 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
187 trigger(Event:Memory_Data, in_msg.Address, tbe);
188 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
189 trigger(Event:Memory_Ack, in_msg.Address, tbe);
190 } else {
191 DPRINTF(RubySlicc,"%s\n", in_msg.Type);
192 error("Invalid message");
193 }
194 }
195 }
196 }
197
198 // Actions
199
200 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
201 peek(requestQueue_in, RequestMsg) {
202 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
203 out_msg.Address := address;
204 out_msg.Type := CoherenceRequestType:WB_ACK;
205 out_msg.Requestor := in_msg.Requestor;
206 out_msg.Destination.add(in_msg.Requestor);
207 out_msg.MessageSize := MessageSizeType:Writeback_Control;
208 }
209 }
210 }
211
212 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
213 peek(memQueue_in, MemoryMsg) {
214 enqueue(forwardNetwork_out, RequestMsg, latency="1") {
215 out_msg.Address := address;
216 out_msg.Type := CoherenceRequestType:WB_ACK;
217 out_msg.Requestor := in_msg.OriginalRequestorMachId;
218 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
219 out_msg.MessageSize := MessageSizeType:Writeback_Control;
220 }
221 }
222 }
223
224 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
225 peek(requestQueue_in, RequestMsg) {
226 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
227 out_msg.Address := address;
228 out_msg.Type := CoherenceRequestType:WB_NACK;
229 out_msg.Requestor := in_msg.Requestor;
230 out_msg.Destination.add(in_msg.Requestor);
231 out_msg.MessageSize := MessageSizeType:Writeback_Control;
232 }
233 }
234 }
235
236 action(c_clearOwner, "c", desc="Clear the owner field") {
237 getDirectoryEntry(address).Owner.clear();
238 }
239
240 action(d_sendData, "d", desc="Send data to requestor") {
241 peek(memQueue_in, MemoryMsg) {
242 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
243 out_msg.Address := address;
244 out_msg.Type := CoherenceResponseType:DATA;
245 out_msg.Sender := machineID;
246 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
247 out_msg.DataBlk := in_msg.DataBlk;
248 out_msg.MessageSize := MessageSizeType:Response_Data;
249 }
250 }
251 }
252
253 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
254 peek(memQueue_in, MemoryMsg) {
255 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
256 assert(is_valid(tbe));
257 out_msg.PhysicalAddress := address;
258 out_msg.LineAddress := address;
259 out_msg.Type := DMAResponseType:DATA;
260 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
261 out_msg.Destination.add(tbe.DmaRequestor);
262 out_msg.MessageSize := MessageSizeType:Response_Data;
263 }
264 }
265 }
266
267
268
269 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
270 peek(requestQueue_in, RequestMsg) {
271 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
272 assert(is_valid(tbe));
273 out_msg.PhysicalAddress := address;
274 out_msg.LineAddress := address;
275 out_msg.Type := DMAResponseType:DATA;
276 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
277 out_msg.Destination.add(tbe.DmaRequestor);
278 out_msg.MessageSize := MessageSizeType:Response_Data;
279 }
280 }
281 }
282
283 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
284 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
285 assert(is_valid(tbe));
286 out_msg.PhysicalAddress := address;
287 out_msg.LineAddress := address;
288 out_msg.Type := DMAResponseType:ACK;
289 out_msg.Destination.add(tbe.DmaRequestor);
290 out_msg.MessageSize := MessageSizeType:Writeback_Control;
291 }
292 }
293
294 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
295 peek(requestQueue_in, RequestMsg) {
296 getDirectoryEntry(address).Owner.clear();
297 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
298 }
299 }
300
301 action(f_forwardRequest, "f", desc="Forward request to owner") {
302 peek(requestQueue_in, RequestMsg) {
303 APPEND_TRANSITION_COMMENT("Own: ");
304 APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Address).Owner);
305 APPEND_TRANSITION_COMMENT("Req: ");
306 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
307 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
308 out_msg.Address := address;
309 out_msg.Type := in_msg.Type;
310 out_msg.Requestor := in_msg.Requestor;
311 out_msg.Destination := getDirectoryEntry(in_msg.Address).Owner;
312 out_msg.MessageSize := MessageSizeType:Writeback_Control;
313 }
314 }
315 }
316
317 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
318 peek(dmaRequestQueue_in, DMARequestMsg) {
319 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
320 out_msg.Address := address;
321 out_msg.Type := CoherenceRequestType:INV;
322 out_msg.Requestor := machineID;
323 out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
324 out_msg.MessageSize := MessageSizeType:Writeback_Control;
325 }
326 }
327 }
328
329 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
330 requestQueue_in.dequeue();
331 }
332
333 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
334 dmaRequestQueue_in.dequeue();
335 }
336
337 action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
338 peek(requestQueue_in, RequestMsg) {
339 // assert(in_msg.Dirty);
340 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
341 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
342 //getDirectoryEntry(in_msg.Address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
343 }
344 }
345
346 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
347 assert(is_valid(tbe));
348 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
349 }
350
351 action(v_allocateTBE, "v", desc="Allocate TBE") {
352 peek(dmaRequestQueue_in, DMARequestMsg) {
353 TBEs.allocate(address);
354 set_tbe(TBEs[address]);
355 tbe.DataBlk := in_msg.DataBlk;
356 tbe.PhysicalAddress := in_msg.PhysicalAddress;
357 tbe.Len := in_msg.Len;
358 tbe.DmaRequestor := in_msg.Requestor;
359 }
360 }
361
362 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
363 peek(dmaRequestQueue_in, DMARequestMsg) {
364 TBEs.allocate(address);
365 set_tbe(TBEs[address]);
366 tbe.DmaRequestor := in_msg.Requestor;
367 }
368 }
369
370 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
371 peek(requestQueue_in, RequestMsg) {
372 TBEs.allocate(address);
373 set_tbe(TBEs[address]);
374 tbe.DataBlk := in_msg.DataBlk;
375 }
376 }
377
378 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
379 TBEs.deallocate(address);
380 unset_tbe();
381 }
382
383 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
384 requestQueue_in.recycle();
385 }
386
387 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
388 dmaRequestQueue_in.recycle();
389 }
390
391
392 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
393 peek(requestQueue_in, RequestMsg) {
394 enqueue(memQueue_out, MemoryMsg, latency="1") {
395 out_msg.Address := address;
396 out_msg.Type := MemoryRequestType:MEMORY_READ;
397 out_msg.Sender := machineID;
398 out_msg.OriginalRequestorMachId := in_msg.Requestor;
399 out_msg.MessageSize := in_msg.MessageSize;
400 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
401 DPRINTF(RubySlicc,"%s\n", out_msg);
402 }
403 }
404 }
405
406 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
407 peek(dmaRequestQueue_in, DMARequestMsg) {
408 enqueue(memQueue_out, MemoryMsg, latency="1") {
409 out_msg.Address := address;
410 out_msg.Type := MemoryRequestType:MEMORY_READ;
411 out_msg.Sender := machineID;
412 //out_msg.OriginalRequestorMachId := machineID;
413 out_msg.MessageSize := in_msg.MessageSize;
414 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
415 DPRINTF(RubySlicc,"%s\n", out_msg);
416 }
417 }
418 }
419
420 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
421 peek(dmaRequestQueue_in, DMARequestMsg) {
422 enqueue(memQueue_out, MemoryMsg, latency="1") {
423 out_msg.Address := address;
424 out_msg.Type := MemoryRequestType:MEMORY_WB;
425 //out_msg.OriginalRequestorMachId := machineID;
426 //out_msg.DataBlk := in_msg.DataBlk;
427 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
428 out_msg.MessageSize := in_msg.MessageSize;
429 //out_msg.Prefetch := in_msg.Prefetch;
430
431 DPRINTF(RubySlicc,"%s\n", out_msg);
432 }
433 }
434 }
435
436 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
437 peek(requestQueue_in, RequestMsg) {
438 enqueue(memQueue_out, MemoryMsg, latency="1") {
439 assert(is_valid(tbe));
440 out_msg.Address := address;
441 out_msg.Type := MemoryRequestType:MEMORY_WB;
442 out_msg.OriginalRequestorMachId := in_msg.Requestor;
443 // get incoming data
444 // out_msg.DataBlk := in_msg.DataBlk;
445 out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
446 out_msg.MessageSize := in_msg.MessageSize;
447 //out_msg.Prefetch := in_msg.Prefetch;
448
449 DPRINTF(RubySlicc,"%s\n", out_msg);
450 }
451 }
452 }
453
454
455
456 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
457 peek(requestQueue_in, RequestMsg) {
458 enqueue(memQueue_out, MemoryMsg, latency="1") {
459 out_msg.Address := address;
460 out_msg.Type := MemoryRequestType:MEMORY_WB;
461 out_msg.Sender := machineID;
462 out_msg.OriginalRequestorMachId := in_msg.Requestor;
463 out_msg.DataBlk := in_msg.DataBlk;
464 out_msg.MessageSize := in_msg.MessageSize;
465 //out_msg.Prefetch := in_msg.Prefetch;
466
467 DPRINTF(RubySlicc,"%s\n", out_msg);
468 }
469 }
470 }
471
472 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
473 memQueue_in.dequeue();
474 }
475
476 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
477 //getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
478 assert(is_valid(tbe));
479 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
480 addressOffset(tbe.PhysicalAddress),
481 tbe.Len);
482
483 }
484
485 // TRANSITIONS
486
487 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
488 z_recycleRequestQueue;
489 }
490
491 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
492 z_recycleRequestQueue;
493 }
494
495 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
496 y_recycleDMARequestQueue;
497 }
498
499
500 transition(I, GETX, IM) {
501 //d_sendData;
502 qf_queueMemoryFetchRequest;
503 e_ownerIsRequestor;
504 i_popIncomingRequestQueue;
505 }
506
507 transition(IM, Memory_Data, M) {
508 d_sendData;
509 //e_ownerIsRequestor;
510 l_popMemQueue;
511 }
512
513
514 transition(I, DMA_READ, ID) {
515 //dr_sendDMAData;
516 r_allocateTbeForDmaRead;
517 qf_queueMemoryFetchRequestDMA;
518 p_popIncomingDMARequestQueue;
519 }
520
521 transition(ID, Memory_Data, I) {
522 dr_sendDMAData;
523 //p_popIncomingDMARequestQueue;
524 w_deallocateTBE;
525 l_popMemQueue;
526 }
527
528
529
530 transition(I, DMA_WRITE, ID_W) {
531 v_allocateTBE;
532 qw_queueMemoryWBRequest_partial;
533 p_popIncomingDMARequestQueue;
534 }
535
536 transition(ID_W, Memory_Ack, I) {
537 dwt_writeDMADataFromTBE;
538 da_sendDMAAck;
539 w_deallocateTBE;
540 l_popMemQueue;
541 }
542
543 transition(M, DMA_READ, M_DRD) {
544 v_allocateTBE;
545 inv_sendCacheInvalidate;
546 p_popIncomingDMARequestQueue;
547 }
548
549 transition(M_DRD, PUTX, M_DRDI) {
550 l_writeDataToMemory;
551 drp_sendDMAData;
552 c_clearOwner;
553 l_queueMemoryWBRequest;
554 i_popIncomingRequestQueue;
555 }
556
557 transition(M_DRDI, Memory_Ack, I) {
558 l_sendWriteBackAck;
559 w_deallocateTBE;
560 l_popMemQueue;
561 }
562
563
564 transition(M, DMA_WRITE, M_DWR) {
565 v_allocateTBE;
566 inv_sendCacheInvalidate;
567 p_popIncomingDMARequestQueue;
568 }
569
570 transition(M_DWR, PUTX, M_DWRI) {
571 l_writeDataToMemory;
572 qw_queueMemoryWBRequest_partialTBE;
573 c_clearOwner;
574 i_popIncomingRequestQueue;
575 }
576
577 transition(M_DWRI, Memory_Ack, I) {
578 w_writeDataToMemoryFromTBE;
579 l_sendWriteBackAck;
580 da_sendDMAAck;
581 w_deallocateTBE;
582 l_popMemQueue;
583 }
584
585 transition(M, GETX, M) {
586 f_forwardRequest;
587 e_ownerIsRequestor;
588 i_popIncomingRequestQueue;
589 }
590
591 transition(M, PUTX, MI) {
592 l_writeDataToMemory;
593 c_clearOwner;
594 v_allocateTBEFromRequestNet;
595 l_queueMemoryWBRequest;
596 i_popIncomingRequestQueue;
597 }
598
599 transition(MI, Memory_Ack, I) {
600 w_writeDataToMemoryFromTBE;
601 l_sendWriteBackAck;
602 w_deallocateTBE;
603 l_popMemQueue;
604 }
605
606 transition(M, PUTX_NotOwner, M) {
607 b_sendWriteBackNack;
608 i_popIncomingRequestQueue;
609 }
610
611 transition(I, PUTX_NotOwner, I) {
612 b_sendWriteBackNack;
613 i_popIncomingRequestQueue;
614 }
615
616 }