ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory,
38 CacheMemory * probeFilter,
39 MemoryControl * memBuffer,
40 int memory_controller_latency = 2,
41 bool probe_filter_enabled = false,
42 bool full_bit_dir_enabled = false
43 {
44
45 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
47 //
48 // For a finite buffered network, note that the DMA response network only
49 // works at this relatively lower numbered (lower priority) virtual network
50 // because the trigger queue decouples cache responses from DMA responses.
51 //
52 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true";
53
54 MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false";
55 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
56 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", recycle_latency="1";
57 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
58
59 // STATES
60 enumeration(State, desc="Directory states", default="Directory_State_E") {
61 // Base states
62 NX, desc="Not Owner, probe filter entry exists, block in O at Owner";
63 NO, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
64 S, desc="Data clean, probe filter entry exists pointing to the current owner";
65 O, desc="Data clean, probe filter entry exists";
66 E, desc="Exclusive Owner, no probe filter entry";
67
68 O_R, desc="Was data Owner, replacing probe filter entry";
69 S_R, desc="Was Not Owner or Sharer, replacing probe filter entry";
70 NO_R, desc="Was Not Owner or Sharer, replacing probe filter entry";
71
72 NO_B, "NO^B", desc="Not Owner, Blocked";
73 NO_B_X, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
74 NO_B_S, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
75 NO_B_S_W, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
76 O_B, "O^B", desc="Owner, Blocked";
77 NO_B_W, desc="Not Owner, Blocked, waiting for Dram";
78 O_B_W, desc="Owner, Blocked, waiting for Dram";
79 NO_W, desc="Not Owner, waiting for Dram";
80 O_W, desc="Owner, waiting for Dram";
81 NO_DW_B_W, desc="Not Owner, Dma Write waiting for Dram and cache responses";
82 NO_DR_B_W, desc="Not Owner, Dma Read waiting for Dram and cache responses";
83 NO_DR_B_D, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
84 NO_DR_B, desc="Not Owner, Dma Read waiting for cache responses";
85 NO_DW_W, desc="Not Owner, Dma Write waiting for Dram";
86 O_DR_B_W, desc="Owner, Dma Read waiting for Dram and cache responses";
87 O_DR_B, desc="Owner, Dma Read waiting for cache responses";
88 WB, desc="Blocked on a writeback";
89 WB_O_W, desc="Blocked on memory write, will go to O";
90 WB_E_W, desc="Blocked on memory write, will go to E";
91 }
92
93 // Events
94 enumeration(Event, desc="Directory events") {
95 GETX, desc="A GETX arrives";
96 GETS, desc="A GETS arrives";
97 PUT, desc="A PUT arrives";
98 Unblock, desc="An unblock message arrives";
99 UnblockS, desc="An unblock message arrives";
100 UnblockM, desc="An unblock message arrives";
101 Writeback_Clean, desc="The final part of a PutX (no data)";
102 Writeback_Dirty, desc="The final part of a PutX (data)";
103 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
104 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
105
106 // Probe filter
107 Pf_Replacement, desc="probe filter replacement";
108
109 // DMA requests
110 DMA_READ, desc="A DMA Read memory request";
111 DMA_WRITE, desc="A DMA Write memory request";
112
113 // Memory Controller
114 Memory_Data, desc="Fetched data from memory arrives";
115 Memory_Ack, desc="Writeback Ack from memory arrives";
116
117 // Cache responses required to handle DMA
118 Ack, desc="Received an ack message";
119 Shared_Ack, desc="Received an ack message, responder has a shared copy";
120 Shared_Data, desc="Received a data message, responder has a shared copy";
121 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
122 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
123
124 // Triggers
125 All_acks_and_shared_data, desc="Received shared data and message acks";
126 All_acks_and_owner_data, desc="Received shared data and message acks";
127 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
128 All_Unblocks, desc="Received all unblocks for a merged gets request";
129 }
130
131 // TYPES
132
133 // DirectoryEntry
134 structure(Entry, desc="...", interface="AbstractEntry") {
135 State DirectoryState, desc="Directory state";
136 DataBlock DataBlk, desc="data for the block";
137 }
138
139 // ProbeFilterEntry
140 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
141 State PfState, desc="Directory state";
142 MachineID Owner, desc="Owner node";
143 DataBlock DataBlk, desc="data for the block";
144 Set Sharers, desc="sharing vector for full bit directory";
145 }
146
147 // TBE entries for DMA requests
148 structure(TBE, desc="TBE entries for outstanding DMA requests") {
149 Address PhysicalAddress, desc="physical address";
150 State TBEState, desc="Transient State";
151 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
152 int Acks, default="0", desc="The number of acks that the waiting response represents";
153 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
154 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
155 DataBlock DataBlk, desc="The current view of system memory";
156 int Len, desc="...";
157 MachineID DmaRequestor, desc="DMA requestor";
158 NetDest GetSRequestors, desc="GETS merged requestors";
159 int NumPendingMsgs, desc="Number of pending acks/messages";
160 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
161 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
162 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
163 }
164
165 external_type(TBETable) {
166 TBE lookup(Address);
167 void allocate(Address);
168 void deallocate(Address);
169 bool isPresent(Address);
170 }
171
172 void set_cache_entry(AbstractCacheEntry b);
173 void unset_cache_entry();
174 void set_tbe(TBE a);
175 void unset_tbe();
176
177 // ** OBJECTS **
178
179 Set fwd_set;
180
181 TBETable TBEs, template_hack="<Directory_TBE>";
182
183 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
184 return static_cast(Entry, directory[addr]);
185 }
186
187 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
188 if(probe_filter_enabled) {
189 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
190 return pfEntry;
191 }
192 return OOD;
193 }
194
195 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
196 if (is_valid(tbe)) {
197 return tbe.TBEState;
198 } else {
199 if (probe_filter_enabled || full_bit_dir_enabled) {
200 if (is_valid(pf_entry)) {
201 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
202 } else {
203 assert(getDirectoryEntry(addr).DirectoryState == State:E);
204 }
205 }
206 return getDirectoryEntry(addr).DirectoryState;
207 }
208 }
209
210 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
211 if (is_valid(tbe)) {
212 tbe.TBEState := state;
213 }
214 if (probe_filter_enabled || full_bit_dir_enabled) {
215 if (is_valid(pf_entry)) {
216 pf_entry.PfState := state;
217 }
218 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
219 assert(is_valid(pf_entry));
220 }
221 }
222 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
223 state == State:O) {
224 assert(is_valid(tbe) == false);
225 }
226 getDirectoryEntry(addr).DirectoryState := state;
227 }
228
229 Event cache_request_to_event(CoherenceRequestType type) {
230 if (type == CoherenceRequestType:GETS) {
231 return Event:GETS;
232 } else if (type == CoherenceRequestType:GETX) {
233 return Event:GETX;
234 } else {
235 error("Invalid CoherenceRequestType");
236 }
237 }
238
239 MessageBuffer triggerQueue, ordered="true";
240
241 // ** OUT_PORTS **
242 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
243 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
244 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
245 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
246 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
247
248 //
249 // Memory buffer for memory controller to DIMM communication
250 //
251 out_port(memQueue_out, MemoryMsg, memBuffer);
252
253 // ** IN_PORTS **
254
255 // Trigger Queue
256 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
257 if (triggerQueue_in.isReady()) {
258 peek(triggerQueue_in, TriggerMsg) {
259 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
260 TBE tbe := TBEs[in_msg.Address];
261 if (in_msg.Type == TriggerType:ALL_ACKS) {
262 trigger(Event:All_acks_and_owner_data, in_msg.Address,
263 pf_entry, tbe);
264 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
265 trigger(Event:All_acks_and_shared_data, in_msg.Address,
266 pf_entry, tbe);
267 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
268 trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
269 pf_entry, tbe);
270 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
271 trigger(Event:All_Unblocks, in_msg.Address,
272 pf_entry, tbe);
273 } else {
274 error("Unexpected message");
275 }
276 }
277 }
278 }
279
280 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
281 if (unblockNetwork_in.isReady()) {
282 peek(unblockNetwork_in, ResponseMsg) {
283 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
284 TBE tbe := TBEs[in_msg.Address];
285 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
286 trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
287 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
288 trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
289 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
290 trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
291 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
292 trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
293 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
294 trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
295 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
296 trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
297 pf_entry, tbe);
298 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
299 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
300 pf_entry, tbe);
301 } else {
302 error("Invalid message");
303 }
304 }
305 }
306 }
307
308 // Response Network
309 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
310 if (responseToDir_in.isReady()) {
311 peek(responseToDir_in, ResponseMsg) {
312 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
313 TBE tbe := TBEs[in_msg.Address];
314 if (in_msg.Type == CoherenceResponseType:ACK) {
315 trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
316 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
317 trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
318 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
319 trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
320 } else if (in_msg.Type == CoherenceResponseType:DATA) {
321 trigger(Event:Data, in_msg.Address, pf_entry, tbe);
322 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
323 trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
324 } else {
325 error("Unexpected message");
326 }
327 }
328 }
329 }
330
331 // off-chip memory request/response is done
332 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
333 if (memQueue_in.isReady()) {
334 peek(memQueue_in, MemoryMsg) {
335 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
336 TBE tbe := TBEs[in_msg.Address];
337 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
338 trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
339 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
340 trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
341 } else {
342 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
343 error("Invalid message");
344 }
345 }
346 }
347 }
348
349 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
350 if (requestQueue_in.isReady()) {
351 peek(requestQueue_in, RequestMsg) {
352 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
353 TBE tbe := TBEs[in_msg.Address];
354 if (in_msg.Type == CoherenceRequestType:PUT) {
355 trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
356 } else {
357 if (probe_filter_enabled || full_bit_dir_enabled) {
358 if (is_valid(pf_entry)) {
359 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
360 pf_entry, tbe);
361 } else {
362 if (probeFilter.cacheAvail(in_msg.Address)) {
363 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
364 pf_entry, tbe);
365 } else {
366 trigger(Event:Pf_Replacement,
367 probeFilter.cacheProbe(in_msg.Address),
368 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
369 TBEs[probeFilter.cacheProbe(in_msg.Address)]);
370 }
371 }
372 } else {
373 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
374 pf_entry, tbe);
375 }
376 }
377 }
378 }
379 }
380
381 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
382 if (dmaRequestQueue_in.isReady()) {
383 peek(dmaRequestQueue_in, DMARequestMsg) {
384 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
385 TBE tbe := TBEs[in_msg.LineAddress];
386 if (in_msg.Type == DMARequestType:READ) {
387 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
388 } else if (in_msg.Type == DMARequestType:WRITE) {
389 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
390 } else {
391 error("Invalid message");
392 }
393 }
394 }
395 }
396
397 // Actions
398
399 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
400 if (probe_filter_enabled || full_bit_dir_enabled) {
401 assert(is_valid(cache_entry));
402 probeFilter.setMRU(address);
403 }
404 }
405
406 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
407 if (probe_filter_enabled || full_bit_dir_enabled) {
408 assert(is_valid(cache_entry));
409 peek(unblockNetwork_in, ResponseMsg) {
410 assert(cache_entry.Owner != in_msg.Sender);
411 if (full_bit_dir_enabled) {
412 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
413 }
414 }
415 }
416 }
417
418 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
419 if (probe_filter_enabled || full_bit_dir_enabled) {
420 assert(is_valid(cache_entry));
421 peek(unblockNetwork_in, ResponseMsg) {
422 cache_entry.Owner := in_msg.Sender;
423 if (full_bit_dir_enabled) {
424 cache_entry.Sharers.clear();
425 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
426 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
427 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
428 }
429 }
430 }
431 }
432
433 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
434 if (full_bit_dir_enabled) {
435 assert(probeFilter.isTagPresent(address));
436 peek(unblockNetwork_in, ResponseMsg) {
437 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
438 }
439 }
440 }
441
442 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
443 peek(requestQueue_in, RequestMsg) {
444 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
445 out_msg.Address := address;
446 out_msg.Type := CoherenceRequestType:WB_ACK;
447 out_msg.Requestor := in_msg.Requestor;
448 out_msg.Destination.add(in_msg.Requestor);
449 out_msg.MessageSize := MessageSizeType:Writeback_Control;
450 }
451 }
452 }
453
454 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
455 peek(requestQueue_in, RequestMsg) {
456 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
457 out_msg.Address := address;
458 out_msg.Type := CoherenceRequestType:WB_NACK;
459 out_msg.Requestor := in_msg.Requestor;
460 out_msg.Destination.add(in_msg.Requestor);
461 out_msg.MessageSize := MessageSizeType:Writeback_Control;
462 }
463 }
464 }
465
466 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
467 if (probe_filter_enabled || full_bit_dir_enabled) {
468 peek(requestQueue_in, RequestMsg) {
469 set_cache_entry(probeFilter.allocate(address, new PfEntry));
470 cache_entry.Owner := in_msg.Requestor;
471 }
472 }
473 }
474
475 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
476 if (probe_filter_enabled || full_bit_dir_enabled) {
477 probeFilter.deallocate(address);
478 unset_cache_entry();
479 }
480 }
481
482 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
483 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
484 probeFilter.deallocate(address);
485 unset_cache_entry();
486 }
487 }
488
489 action(v_allocateTBE, "v", desc="Allocate TBE") {
490 peek(requestQueue_in, RequestMsg) {
491 TBEs.allocate(address);
492 set_tbe(TBEs[address]);
493 tbe.PhysicalAddress := address;
494 tbe.ResponseType := CoherenceResponseType:NULL;
495 }
496 }
497
498 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
499 peek(dmaRequestQueue_in, DMARequestMsg) {
500 TBEs.allocate(address);
501 set_tbe(TBEs[address]);
502 tbe.DmaDataBlk := in_msg.DataBlk;
503 tbe.PhysicalAddress := in_msg.PhysicalAddress;
504 tbe.Len := in_msg.Len;
505 tbe.DmaRequestor := in_msg.Requestor;
506 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
507 //
508 // One ack for each last-level cache
509 //
510 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
511 //
512 // Assume initially that the caches store a clean copy and that memory
513 // will provide the data
514 //
515 tbe.CacheDirty := false;
516 }
517 }
518
519 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
520 assert(is_valid(tbe));
521 if (full_bit_dir_enabled) {
522 assert(is_valid(cache_entry));
523 tbe.NumPendingMsgs := cache_entry.Sharers.count();
524 } else {
525 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
526 }
527 }
528
529 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
530 assert(is_valid(tbe));
531 tbe.NumPendingMsgs := 1;
532 }
533
534 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
535 TBEs.deallocate(address);
536 unset_tbe();
537 }
538
539 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
540 assert(is_valid(tbe));
541 peek(requestQueue_in, RequestMsg) {
542 if (full_bit_dir_enabled) {
543 assert(is_valid(cache_entry));
544 //
545 // If we are using the full-bit directory and no sharers exists beyond
546 // the requestor, then we must set the ack number to all, not one
547 //
548 fwd_set := cache_entry.Sharers;
549 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
550 if (fwd_set.count() > 0) {
551 tbe.Acks := 1;
552 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
553 tbe.SilentAcks := tbe.SilentAcks - 1;
554 } else {
555 tbe.Acks := machineCount(MachineType:L1Cache);
556 tbe.SilentAcks := 0;
557 }
558 } else {
559 tbe.Acks := 1;
560 }
561 }
562 }
563
564 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
565 assert(is_valid(tbe));
566 if (probe_filter_enabled || full_bit_dir_enabled) {
567 tbe.Acks := machineCount(MachineType:L1Cache);
568 tbe.SilentAcks := 0;
569 } else {
570 tbe.Acks := 1;
571 }
572 }
573
574 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
575 peek(responseToDir_in, ResponseMsg) {
576 assert(is_valid(tbe));
577 assert(in_msg.Acks > 0);
578 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
579 //
580 // Note that cache data responses will have an ack count of 2. However,
581 // directory DMA requests must wait for acks from all LLC caches, so
582 // only decrement by 1.
583 //
584 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
585 (in_msg.Type == CoherenceResponseType:DATA) ||
586 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
587 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
588 } else {
589 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
590 }
591 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
592 }
593 }
594
595 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
596 peek(unblockNetwork_in, ResponseMsg) {
597 assert(is_valid(tbe));
598 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
599 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
600 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
601 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
602 }
603 }
604
605 action(n_popResponseQueue, "n", desc="Pop response queue") {
606 responseToDir_in.dequeue();
607 }
608
609 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
610 assert(is_valid(tbe));
611 if (tbe.NumPendingMsgs == 0) {
612 enqueue(triggerQueue_out, TriggerMsg) {
613 out_msg.Address := address;
614 if (tbe.Sharers) {
615 if (tbe.Owned) {
616 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
617 } else {
618 out_msg.Type := TriggerType:ALL_ACKS;
619 }
620 } else {
621 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
622 }
623 }
624 }
625 }
626
627 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
628 assert(is_valid(tbe));
629 if (tbe.NumPendingMsgs == 0) {
630 enqueue(triggerQueue_out, TriggerMsg) {
631 out_msg.Address := address;
632 out_msg.Type := TriggerType:ALL_UNBLOCKS;
633 }
634 }
635 }
636
637 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
638 assert(is_valid(tbe));
639 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
640 }
641
642 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
643 if (probe_filter_enabled || full_bit_dir_enabled) {
644 assert(is_valid(tbe));
645 tbe.NumPendingMsgs := 0;
646 }
647 }
648
649 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
650 assert(is_valid(tbe));
651 if (tbe.NumPendingMsgs == 0) {
652 assert(probe_filter_enabled || full_bit_dir_enabled);
653 enqueue(triggerQueue_out, TriggerMsg) {
654 out_msg.Address := address;
655 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
656 }
657 }
658 }
659
660 action(d_sendData, "d", desc="Send data to requestor") {
661 peek(memQueue_in, MemoryMsg) {
662 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
663 assert(is_valid(tbe));
664 out_msg.Address := address;
665 out_msg.Type := tbe.ResponseType;
666 out_msg.Sender := machineID;
667 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
668 out_msg.DataBlk := in_msg.DataBlk;
669 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
670 out_msg.Dirty := false; // By definition, the block is now clean
671 out_msg.Acks := tbe.Acks;
672 out_msg.SilentAcks := tbe.SilentAcks;
673 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
674 assert(out_msg.Acks > 0);
675 out_msg.MessageSize := MessageSizeType:Response_Data;
676 }
677 }
678 }
679
680 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
681 peek(memQueue_in, MemoryMsg) {
682 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
683 assert(is_valid(tbe));
684 out_msg.PhysicalAddress := address;
685 out_msg.LineAddress := address;
686 out_msg.Type := DMAResponseType:DATA;
687 //
688 // we send the entire data block and rely on the dma controller to
689 // split it up if need be
690 //
691 out_msg.DataBlk := in_msg.DataBlk;
692 out_msg.Destination.add(tbe.DmaRequestor);
693 out_msg.MessageSize := MessageSizeType:Response_Data;
694 }
695 }
696 }
697
698 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
699 peek(triggerQueue_in, TriggerMsg) {
700 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
701 assert(is_valid(tbe));
702 out_msg.PhysicalAddress := address;
703 out_msg.LineAddress := address;
704 out_msg.Type := DMAResponseType:DATA;
705 //
706 // we send the entire data block and rely on the dma controller to
707 // split it up if need be
708 //
709 out_msg.DataBlk := tbe.DataBlk;
710 out_msg.Destination.add(tbe.DmaRequestor);
711 out_msg.MessageSize := MessageSizeType:Response_Data;
712 }
713 }
714 }
715
716 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
717 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
718 assert(is_valid(tbe));
719 out_msg.PhysicalAddress := address;
720 out_msg.LineAddress := address;
721 out_msg.Type := DMAResponseType:ACK;
722 out_msg.Destination.add(tbe.DmaRequestor);
723 out_msg.MessageSize := MessageSizeType:Writeback_Control;
724 }
725 }
726
727 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
728 peek(requestQueue_in, RequestMsg) {
729 assert(is_valid(tbe));
730 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
731 }
732 }
733
734 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
735 peek(requestQueue_in, RequestMsg) {
736 assert(is_valid(tbe));
737 if (full_bit_dir_enabled) {
738 fwd_set := cache_entry.Sharers;
739 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
740 if (fwd_set.count() > 0) {
741 tbe.ResponseType := CoherenceResponseType:DATA;
742 } else {
743 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
744 }
745 } else {
746 tbe.ResponseType := CoherenceResponseType:DATA;
747 }
748 }
749 }
750
751 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
752 peek(requestQueue_in, RequestMsg) {
753 assert(is_valid(tbe));
754 tbe.GetSRequestors.add(in_msg.Requestor);
755 }
756 }
757
758 action(r_setSharerBit, "r", desc="We saw other sharers") {
759 assert(is_valid(tbe));
760 tbe.Sharers := true;
761 }
762
763 action(so_setOwnerBit, "so", desc="We saw other sharers") {
764 assert(is_valid(tbe));
765 tbe.Sharers := true;
766 tbe.Owned := true;
767 }
768
769 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
770 peek(requestQueue_in, RequestMsg) {
771 enqueue(memQueue_out, MemoryMsg, latency="1") {
772 out_msg.Address := address;
773 out_msg.Type := MemoryRequestType:MEMORY_READ;
774 out_msg.Sender := machineID;
775 out_msg.OriginalRequestorMachId := in_msg.Requestor;
776 out_msg.MessageSize := in_msg.MessageSize;
777 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
778 DPRINTF(RubySlicc, "%s\n", out_msg);
779 }
780 }
781 }
782
783 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
784 peek(dmaRequestQueue_in, DMARequestMsg) {
785 enqueue(memQueue_out, MemoryMsg, latency="1") {
786 out_msg.Address := address;
787 out_msg.Type := MemoryRequestType:MEMORY_READ;
788 out_msg.Sender := machineID;
789 out_msg.OriginalRequestorMachId := in_msg.Requestor;
790 out_msg.MessageSize := in_msg.MessageSize;
791 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
792 DPRINTF(RubySlicc, "%s\n", out_msg);
793 }
794 }
795 }
796
797 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
798 assert(is_valid(tbe));
799 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
800 if (full_bit_dir_enabled) {
801 assert(is_valid(cache_entry));
802 peek(requestQueue_in, RequestMsg) {
803 fwd_set := cache_entry.Sharers;
804 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
805 if (fwd_set.count() > 0) {
806 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
807 out_msg.Address := address;
808 out_msg.Type := in_msg.Type;
809 out_msg.Requestor := in_msg.Requestor;
810 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
811 out_msg.MessageSize := MessageSizeType:Multicast_Control;
812 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
813 out_msg.ForwardRequestTime := get_time();
814 assert(tbe.SilentAcks > 0);
815 out_msg.SilentAcks := tbe.SilentAcks;
816 }
817 }
818 }
819 } else {
820 peek(requestQueue_in, RequestMsg) {
821 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
822 out_msg.Address := address;
823 out_msg.Type := in_msg.Type;
824 out_msg.Requestor := in_msg.Requestor;
825 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
826 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
827 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
828 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
829 out_msg.ForwardRequestTime := get_time();
830 }
831 }
832 }
833 }
834 }
835
836 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
837 if (machineCount(MachineType:L1Cache) > 1) {
838 if (full_bit_dir_enabled) {
839 assert(cache_entry.Sharers.count() > 0);
840 peek(requestQueue_in, RequestMsg) {
841 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
842 out_msg.Address := address;
843 out_msg.Type := CoherenceRequestType:INV;
844 out_msg.Requestor := machineID;
845 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
846 out_msg.MessageSize := MessageSizeType:Multicast_Control;
847 }
848 }
849 } else {
850 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
851 out_msg.Address := address;
852 out_msg.Type := CoherenceRequestType:INV;
853 out_msg.Requestor := machineID;
854 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
855 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
856 }
857 }
858 }
859 }
860
861 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
862 if (machineCount(MachineType:L1Cache) > 1) {
863 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
864 assert(is_valid(cache_entry));
865 out_msg.Address := address;
866 out_msg.Type := CoherenceRequestType:INV;
867 out_msg.Requestor := machineID;
868 out_msg.Destination.add(cache_entry.Owner);
869 out_msg.MessageSize := MessageSizeType:Request_Control;
870 out_msg.DirectedProbe := true;
871 }
872 }
873 }
874
875 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
876 if (machineCount(MachineType:L1Cache) > 1) {
877 peek(requestQueue_in, RequestMsg) {
878 if (full_bit_dir_enabled) {
879 fwd_set := cache_entry.Sharers;
880 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
881 if (fwd_set.count() > 0) {
882 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
883 out_msg.Address := address;
884 out_msg.Type := in_msg.Type;
885 out_msg.Requestor := in_msg.Requestor;
886 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
887 out_msg.MessageSize := MessageSizeType:Multicast_Control;
888 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
889 out_msg.ForwardRequestTime := get_time();
890 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
891 out_msg.SilentAcks := out_msg.SilentAcks - 1;
892 }
893 }
894 } else {
895 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
896 out_msg.Address := address;
897 out_msg.Type := in_msg.Type;
898 out_msg.Requestor := in_msg.Requestor;
899 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
900 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
901 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
902 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
903 out_msg.ForwardRequestTime := get_time();
904 }
905 }
906 }
907 }
908 }
909
910 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
911 assert(machineCount(MachineType:L1Cache) > 1);
912 //
913 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
914 // decouple the two.
915 //
916 peek(unblockNetwork_in, ResponseMsg) {
917 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
918 assert(is_valid(tbe));
919 out_msg.Address := address;
920 out_msg.Type := CoherenceRequestType:MERGED_GETS;
921 out_msg.MergedRequestors := tbe.GetSRequestors;
922 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
923 out_msg.Destination.add(in_msg.CurOwner);
924 } else {
925 out_msg.Destination.add(in_msg.Sender);
926 }
927 out_msg.MessageSize := MessageSizeType:Request_Control;
928 out_msg.InitialRequestTime := zero_time();
929 out_msg.ForwardRequestTime := get_time();
930 }
931 }
932 }
933
934 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
935 assert(machineCount(MachineType:L1Cache) > 1);
936 if (probe_filter_enabled || full_bit_dir_enabled) {
937 peek(requestQueue_in, RequestMsg) {
938 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
939 assert(is_valid(cache_entry));
940 out_msg.Address := address;
941 out_msg.Type := in_msg.Type;
942 out_msg.Requestor := in_msg.Requestor;
943 out_msg.Destination.add(cache_entry.Owner);
944 out_msg.MessageSize := MessageSizeType:Request_Control;
945 out_msg.DirectedProbe := true;
946 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
947 out_msg.ForwardRequestTime := get_time();
948 }
949 }
950 } else {
951 peek(requestQueue_in, RequestMsg) {
952 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
953 out_msg.Address := address;
954 out_msg.Type := in_msg.Type;
955 out_msg.Requestor := in_msg.Requestor;
956 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
957 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
958 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
959 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
960 out_msg.ForwardRequestTime := get_time();
961 }
962 }
963 }
964 }
965
966 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
967 assert(is_valid(tbe));
968 if (tbe.NumPendingMsgs > 0) {
969 peek(dmaRequestQueue_in, DMARequestMsg) {
970 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
971 out_msg.Address := address;
972 out_msg.Type := CoherenceRequestType:GETX;
973 //
974 // Send to all L1 caches, since the requestor is the memory controller
975 // itself
976 //
977 out_msg.Requestor := machineID;
978 out_msg.Destination.broadcast(MachineType:L1Cache);
979 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
980 }
981 }
982 }
983 }
984
985 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
986 assert(is_valid(tbe));
987 if (tbe.NumPendingMsgs > 0) {
988 peek(dmaRequestQueue_in, DMARequestMsg) {
989 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
990 out_msg.Address := address;
991 out_msg.Type := CoherenceRequestType:GETS;
992 //
993 // Send to all L1 caches, since the requestor is the memory controller
994 // itself
995 //
996 out_msg.Requestor := machineID;
997 out_msg.Destination.broadcast(MachineType:L1Cache);
998 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
999 }
1000 }
1001 }
1002 }
1003
1004 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1005 requestQueue_in.dequeue();
1006 }
1007
1008 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1009 peek(unblockNetwork_in, ResponseMsg) {
1010 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1011 }
1012 unblockNetwork_in.dequeue();
1013 }
1014
1015 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1016 wake_up_dependents(address);
1017 }
1018
1019 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1020 memQueue_in.dequeue();
1021 }
1022
1023 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1024 triggerQueue_in.dequeue();
1025 }
1026
1027 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1028 dmaRequestQueue_in.dequeue();
1029 }
1030
1031 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1032 peek(dmaRequestQueue_in, DMARequestMsg) {
1033 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1034 }
1035 stall_and_wait(dmaRequestQueue_in, address);
1036 }
1037
1038 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1039 peek(memQueue_in, MemoryMsg) {
1040 assert(is_valid(tbe));
1041 if (tbe.CacheDirty == false) {
1042 tbe.DataBlk := in_msg.DataBlk;
1043 }
1044 }
1045 }
1046
1047 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1048 peek(responseToDir_in, ResponseMsg) {
1049 assert(is_valid(tbe));
1050 tbe.CacheDirty := true;
1051 tbe.DataBlk := in_msg.DataBlk;
1052 }
1053 }
1054
1055 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1056 peek(responseToDir_in, ResponseMsg) {
1057 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1058 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1059 in_msg.Address, in_msg.DataBlk);
1060 }
1061 }
1062
1063 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1064 peek(unblockNetwork_in, ResponseMsg) {
1065 assert(in_msg.Dirty);
1066 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1067 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1068 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1069 in_msg.Address, in_msg.DataBlk);
1070 }
1071 }
1072
1073 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1074 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1075 assert(is_valid(tbe));
1076 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1077 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1078 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1079 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1080 }
1081
1082 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1083 assert(is_valid(tbe));
1084 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1085 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1086 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1087 }
1088
1089 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1090 assert(is_valid(tbe));
1091 assert(tbe.CacheDirty);
1092 }
1093
1094 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1095 if (probe_filter_enabled || full_bit_dir_enabled) {
1096 peek(requestQueue_in, RequestMsg) {
1097 assert(is_valid(cache_entry));
1098 assert(cache_entry.Owner != in_msg.Requestor);
1099 }
1100 }
1101 }
1102
1103 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1104 if (full_bit_dir_enabled) {
1105 peek(requestQueue_in, RequestMsg) {
1106 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1107 }
1108 }
1109 }
1110
1111 action(rs_removeSharer, "s", desc="remove current sharer") {
1112 if (full_bit_dir_enabled) {
1113 peek(unblockNetwork_in, ResponseMsg) {
1114 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1115 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1116 }
1117 }
1118 }
1119
1120 action(cs_clearSharers, "cs", desc="clear current sharers") {
1121 if (full_bit_dir_enabled) {
1122 peek(requestQueue_in, RequestMsg) {
1123 cache_entry.Sharers.clear();
1124 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1125 }
1126 }
1127 }
1128
1129 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1130 peek(unblockNetwork_in, ResponseMsg) {
1131 enqueue(memQueue_out, MemoryMsg, latency="1") {
1132 out_msg.Address := address;
1133 out_msg.Type := MemoryRequestType:MEMORY_WB;
1134 DPRINTF(RubySlicc, "%s\n", out_msg);
1135 }
1136 }
1137 }
1138
1139 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1140 enqueue(memQueue_out, MemoryMsg, latency="1") {
1141 assert(is_valid(tbe));
1142 out_msg.Address := address;
1143 out_msg.Type := MemoryRequestType:MEMORY_WB;
1144 // first, initialize the data blk to the current version of system memory
1145 out_msg.DataBlk := tbe.DataBlk;
1146 // then add the dma write data
1147 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1148 DPRINTF(RubySlicc, "%s\n", out_msg);
1149 }
1150 }
1151
1152 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1153 peek(unblockNetwork_in, ResponseMsg) {
1154 assert(in_msg.Dirty == false);
1155 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1156
1157 // NOTE: The following check would not be valid in a real
1158 // implementation. We include the data in the "dataless"
1159 // message so we can assert the clean data matches the datablock
1160 // in memory
1161 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1162 }
1163 }
1164
1165 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1166 peek(requestQueue_in, RequestMsg) {
1167 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1168 }
1169 stall_and_wait(requestQueue_in, address);
1170 }
1171
1172 // TRANSITIONS
1173
1174 // Transitions out of E state
1175 transition(E, GETX, NO_B_W) {
1176 pfa_probeFilterAllocate;
1177 v_allocateTBE;
1178 rx_recordExclusiveInTBE;
1179 saa_setAcksToAllIfPF;
1180 qf_queueMemoryFetchRequest;
1181 fn_forwardRequestIfNecessary;
1182 i_popIncomingRequestQueue;
1183 }
1184
1185 transition(E, GETS, NO_B_W) {
1186 pfa_probeFilterAllocate;
1187 v_allocateTBE;
1188 rx_recordExclusiveInTBE;
1189 saa_setAcksToAllIfPF;
1190 qf_queueMemoryFetchRequest;
1191 fn_forwardRequestIfNecessary;
1192 i_popIncomingRequestQueue;
1193 }
1194
1195 transition(E, DMA_READ, NO_DR_B_W) {
1196 vd_allocateDmaRequestInTBE;
1197 qd_queueMemoryRequestFromDmaRead;
1198 spa_setPendingAcksToZeroIfPF;
1199 f_forwardReadFromDma;
1200 p_popDmaRequestQueue;
1201 }
1202
1203 transition(E, DMA_WRITE, NO_DW_B_W) {
1204 vd_allocateDmaRequestInTBE;
1205 spa_setPendingAcksToZeroIfPF;
1206 sc_signalCompletionIfPF;
1207 f_forwardWriteFromDma;
1208 p_popDmaRequestQueue;
1209 }
1210
1211 // Transitions out of O state
1212 transition(O, GETX, NO_B_W) {
1213 r_setMRU;
1214 v_allocateTBE;
1215 r_recordDataInTBE;
1216 sa_setAcksToOne;
1217 qf_queueMemoryFetchRequest;
1218 fb_forwardRequestBcast;
1219 cs_clearSharers;
1220 i_popIncomingRequestQueue;
1221 }
1222
1223 // This transition is dumb, if a shared copy exists on-chip, then that should
1224 // provide data, not slow off-chip dram. The problem is that the current
1225 // caches don't provide data in S state
1226 transition(O, GETS, O_B_W) {
1227 r_setMRU;
1228 v_allocateTBE;
1229 r_recordDataInTBE;
1230 saa_setAcksToAllIfPF;
1231 qf_queueMemoryFetchRequest;
1232 fn_forwardRequestIfNecessary;
1233 i_popIncomingRequestQueue;
1234 }
1235
1236 transition(O, DMA_READ, O_DR_B_W) {
1237 vd_allocateDmaRequestInTBE;
1238 spa_setPendingAcksToZeroIfPF;
1239 qd_queueMemoryRequestFromDmaRead;
1240 f_forwardReadFromDma;
1241 p_popDmaRequestQueue;
1242 }
1243
1244 transition(O, Pf_Replacement, O_R) {
1245 v_allocateTBE;
1246 pa_setPendingMsgsToAll;
1247 ia_invalidateAllRequest;
1248 pfd_probeFilterDeallocate;
1249 }
1250
1251 transition(S, Pf_Replacement, S_R) {
1252 v_allocateTBE;
1253 pa_setPendingMsgsToAll;
1254 ia_invalidateAllRequest;
1255 pfd_probeFilterDeallocate;
1256 }
1257
1258 transition(NO, Pf_Replacement, NO_R) {
1259 v_allocateTBE;
1260 po_setPendingMsgsToOne;
1261 io_invalidateOwnerRequest;
1262 pfd_probeFilterDeallocate;
1263 }
1264
1265 transition(NX, Pf_Replacement, NO_R) {
1266 v_allocateTBE;
1267 pa_setPendingMsgsToAll;
1268 ia_invalidateAllRequest;
1269 pfd_probeFilterDeallocate;
1270 }
1271
1272 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1273 vd_allocateDmaRequestInTBE;
1274 f_forwardWriteFromDma;
1275 p_popDmaRequestQueue;
1276 }
1277
1278 // Transitions out of NO state
1279 transition(NX, GETX, NO_B) {
1280 r_setMRU;
1281 fb_forwardRequestBcast;
1282 cs_clearSharers;
1283 i_popIncomingRequestQueue;
1284 }
1285
1286 // Transitions out of NO state
1287 transition(NO, GETX, NO_B) {
1288 r_setMRU;
1289 ano_assertNotOwner;
1290 fc_forwardRequestConditionalOwner;
1291 cs_clearSharers;
1292 i_popIncomingRequestQueue;
1293 }
1294
1295 transition(S, GETX, NO_B) {
1296 r_setMRU;
1297 fb_forwardRequestBcast;
1298 cs_clearSharers;
1299 i_popIncomingRequestQueue;
1300 }
1301
1302 transition(S, GETS, NO_B) {
1303 r_setMRU;
1304 ano_assertNotOwner;
1305 fb_forwardRequestBcast;
1306 i_popIncomingRequestQueue;
1307 }
1308
1309 transition(NO, GETS, NO_B) {
1310 r_setMRU;
1311 ano_assertNotOwner;
1312 ans_assertNotSharer;
1313 fc_forwardRequestConditionalOwner;
1314 i_popIncomingRequestQueue;
1315 }
1316
1317 transition(NX, GETS, NO_B) {
1318 r_setMRU;
1319 ano_assertNotOwner;
1320 fc_forwardRequestConditionalOwner;
1321 i_popIncomingRequestQueue;
1322 }
1323
1324 transition({NO, NX, S}, PUT, WB) {
1325 //
1326 // note that the PUT requestor may not be the current owner if an invalidate
1327 // raced with PUT
1328 //
1329 a_sendWriteBackAck;
1330 i_popIncomingRequestQueue;
1331 }
1332
1333 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1334 vd_allocateDmaRequestInTBE;
1335 f_forwardReadFromDma;
1336 p_popDmaRequestQueue;
1337 }
1338
1339 // Nack PUT requests when races cause us to believe we own the data
1340 transition({O, E}, PUT) {
1341 b_sendWriteBackNack;
1342 i_popIncomingRequestQueue;
1343 }
1344
1345 // Blocked transient states
1346 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1347 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1348 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
1349 {GETS, GETX, PUT, Pf_Replacement}) {
1350 z_stallAndWaitRequest;
1351 }
1352
1353 transition(NO_B, GETX, NO_B_X) {
1354 z_stallAndWaitRequest;
1355 }
1356
1357 transition(NO_B, {PUT, Pf_Replacement}) {
1358 z_stallAndWaitRequest;
1359 }
1360
1361 transition(NO_B_S, {GETX, PUT, Pf_Replacement}) {
1362 z_stallAndWaitRequest;
1363 }
1364
1365 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1366 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1367 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
1368 {DMA_READ, DMA_WRITE}) {
1369 zd_stallAndWaitDMARequest;
1370 }
1371
1372 // merge GETS into one response
1373 transition(NO_B, GETS, NO_B_S) {
1374 v_allocateTBE;
1375 rs_recordGetSRequestor;
1376 i_popIncomingRequestQueue;
1377 }
1378
1379 transition(NO_B_S, GETS) {
1380 rs_recordGetSRequestor;
1381 i_popIncomingRequestQueue;
1382 }
1383
1384 // unblock responses
1385 transition({NO_B, NO_B_X}, UnblockS, NX) {
1386 us_updateSharerIfFBD;
1387 k_wakeUpDependents;
1388 j_popIncomingUnblockQueue;
1389 }
1390
1391 transition({NO_B, NO_B_X}, UnblockM, NO) {
1392 uo_updateOwnerIfPf;
1393 us_updateSharerIfFBD;
1394 k_wakeUpDependents;
1395 j_popIncomingUnblockQueue;
1396 }
1397
1398 transition(NO_B_S, UnblockS, NO_B_S_W) {
1399 us_updateSharerIfFBD;
1400 fr_forwardMergeReadRequestsToOwner;
1401 sp_setPendingMsgsToMergedSharers;
1402 j_popIncomingUnblockQueue;
1403 }
1404
1405 transition(NO_B_S, UnblockM, NO_B_S_W) {
1406 uo_updateOwnerIfPf;
1407 fr_forwardMergeReadRequestsToOwner;
1408 sp_setPendingMsgsToMergedSharers;
1409 j_popIncomingUnblockQueue;
1410 }
1411
1412 transition(NO_B_S_W, UnblockS) {
1413 us_updateSharerIfFBD;
1414 mu_decrementNumberOfUnblocks;
1415 os_checkForMergedGetSCompletion;
1416 j_popIncomingUnblockQueue;
1417 }
1418
1419 transition(NO_B_S_W, All_Unblocks, NX) {
1420 w_deallocateTBE;
1421 k_wakeUpDependents;
1422 g_popTriggerQueue;
1423 }
1424
1425 transition(O_B, UnblockS, O) {
1426 us_updateSharerIfFBD;
1427 k_wakeUpDependents;
1428 j_popIncomingUnblockQueue;
1429 }
1430
1431 transition(O_B, UnblockM, NO) {
1432 us_updateSharerIfFBD;
1433 uo_updateOwnerIfPf;
1434 k_wakeUpDependents;
1435 j_popIncomingUnblockQueue;
1436 }
1437
1438 transition(NO_B_W, Memory_Data, NO_B) {
1439 d_sendData;
1440 w_deallocateTBE;
1441 l_popMemQueue;
1442 }
1443
1444 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1445 r_recordMemoryData;
1446 o_checkForCompletion;
1447 l_popMemQueue;
1448 }
1449
1450 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1451 r_recordMemoryData;
1452 dr_sendDmaData;
1453 o_checkForCompletion;
1454 l_popMemQueue;
1455 }
1456
1457 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1458 m_decrementNumberOfMessages;
1459 o_checkForCompletion;
1460 n_popResponseQueue;
1461 }
1462
1463 transition({O_R, S_R, NO_R}, Ack) {
1464 m_decrementNumberOfMessages;
1465 o_checkForCompletion;
1466 n_popResponseQueue;
1467 }
1468
1469 transition(S_R, Data) {
1470 wr_writeResponseDataToMemory;
1471 m_decrementNumberOfMessages;
1472 o_checkForCompletion;
1473 n_popResponseQueue;
1474 }
1475
1476 transition(NO_R, {Data, Exclusive_Data}) {
1477 wr_writeResponseDataToMemory;
1478 m_decrementNumberOfMessages;
1479 o_checkForCompletion;
1480 n_popResponseQueue;
1481 }
1482
1483 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1484 w_deallocateTBE;
1485 k_wakeUpDependents;
1486 g_popTriggerQueue;
1487 }
1488
1489 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1490 m_decrementNumberOfMessages;
1491 n_popResponseQueue;
1492 }
1493
1494 transition(NO_DR_B_W, Shared_Ack) {
1495 m_decrementNumberOfMessages;
1496 r_setSharerBit;
1497 n_popResponseQueue;
1498 }
1499
1500 transition(O_DR_B, Shared_Ack) {
1501 m_decrementNumberOfMessages;
1502 so_setOwnerBit;
1503 o_checkForCompletion;
1504 n_popResponseQueue;
1505 }
1506
1507 transition(O_DR_B_W, Shared_Ack) {
1508 m_decrementNumberOfMessages;
1509 so_setOwnerBit;
1510 n_popResponseQueue;
1511 }
1512
1513 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1514 m_decrementNumberOfMessages;
1515 r_setSharerBit;
1516 o_checkForCompletion;
1517 n_popResponseQueue;
1518 }
1519
1520 transition(NO_DR_B_W, Shared_Data) {
1521 r_recordCacheData;
1522 m_decrementNumberOfMessages;
1523 so_setOwnerBit;
1524 o_checkForCompletion;
1525 n_popResponseQueue;
1526 }
1527
1528 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1529 r_recordCacheData;
1530 m_decrementNumberOfMessages;
1531 so_setOwnerBit;
1532 o_checkForCompletion;
1533 n_popResponseQueue;
1534 }
1535
1536 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1537 r_recordCacheData;
1538 m_decrementNumberOfMessages;
1539 n_popResponseQueue;
1540 }
1541
1542 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1543 r_recordCacheData;
1544 m_decrementNumberOfMessages;
1545 o_checkForCompletion;
1546 n_popResponseQueue;
1547 }
1548
1549 transition(NO_DR_B, All_acks_and_owner_data, O) {
1550 //
1551 // Note that the DMA consistency model allows us to send the DMA device
1552 // a response as soon as we receive valid data and prior to receiving
1553 // all acks. However, to simplify the protocol we wait for all acks.
1554 //
1555 dt_sendDmaDataFromTbe;
1556 wdt_writeDataFromTBE;
1557 w_deallocateTBE;
1558 k_wakeUpDependents;
1559 g_popTriggerQueue;
1560 }
1561
1562 transition(NO_DR_B, All_acks_and_shared_data, S) {
1563 //
1564 // Note that the DMA consistency model allows us to send the DMA device
1565 // a response as soon as we receive valid data and prior to receiving
1566 // all acks. However, to simplify the protocol we wait for all acks.
1567 //
1568 dt_sendDmaDataFromTbe;
1569 wdt_writeDataFromTBE;
1570 w_deallocateTBE;
1571 k_wakeUpDependents;
1572 g_popTriggerQueue;
1573 }
1574
1575 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1576 //
1577 // Note that the DMA consistency model allows us to send the DMA device
1578 // a response as soon as we receive valid data and prior to receiving
1579 // all acks. However, to simplify the protocol we wait for all acks.
1580 //
1581 dt_sendDmaDataFromTbe;
1582 wdt_writeDataFromTBE;
1583 w_deallocateTBE;
1584 k_wakeUpDependents;
1585 g_popTriggerQueue;
1586 }
1587
1588 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1589 //
1590 // Note that the DMA consistency model allows us to send the DMA device
1591 // a response as soon as we receive valid data and prior to receiving
1592 // all acks. However, to simplify the protocol we wait for all acks.
1593 //
1594 dt_sendDmaDataFromTbe;
1595 wdt_writeDataFromTBE;
1596 w_deallocateTBE;
1597 k_wakeUpDependents;
1598 g_popTriggerQueue;
1599 }
1600
1601 transition(O_DR_B, All_acks_and_owner_data, O) {
1602 wdt_writeDataFromTBE;
1603 w_deallocateTBE;
1604 k_wakeUpDependents;
1605 g_popTriggerQueue;
1606 }
1607
1608 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1609 wdt_writeDataFromTBE;
1610 w_deallocateTBE;
1611 pfd_probeFilterDeallocate;
1612 k_wakeUpDependents;
1613 g_popTriggerQueue;
1614 }
1615
1616 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1617 //
1618 // Note that the DMA consistency model allows us to send the DMA device
1619 // a response as soon as we receive valid data and prior to receiving
1620 // all acks. However, to simplify the protocol we wait for all acks.
1621 //
1622 dt_sendDmaDataFromTbe;
1623 wdt_writeDataFromTBE;
1624 w_deallocateTBE;
1625 ppfd_possibleProbeFilterDeallocate;
1626 k_wakeUpDependents;
1627 g_popTriggerQueue;
1628 }
1629
1630 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1631 a_assertCacheData;
1632 //
1633 // Note that the DMA consistency model allows us to send the DMA device
1634 // a response as soon as we receive valid data and prior to receiving
1635 // all acks. However, to simplify the protocol we wait for all acks.
1636 //
1637 dt_sendDmaDataFromTbe;
1638 wdt_writeDataFromTBE;
1639 w_deallocateTBE;
1640 ppfd_possibleProbeFilterDeallocate;
1641 k_wakeUpDependents;
1642 g_popTriggerQueue;
1643 }
1644
1645 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1646 dwt_writeDmaDataFromTBE;
1647 ld_queueMemoryDmaWrite;
1648 g_popTriggerQueue;
1649 }
1650
1651 transition(NO_DW_W, Memory_Ack, E) {
1652 da_sendDmaAck;
1653 w_deallocateTBE;
1654 ppfd_possibleProbeFilterDeallocate;
1655 k_wakeUpDependents;
1656 l_popMemQueue;
1657 }
1658
1659 transition(O_B_W, Memory_Data, O_B) {
1660 d_sendData;
1661 w_deallocateTBE;
1662 l_popMemQueue;
1663 }
1664
1665 transition(NO_B_W, UnblockM, NO_W) {
1666 uo_updateOwnerIfPf;
1667 j_popIncomingUnblockQueue;
1668 }
1669
1670 transition(NO_B_W, UnblockS, NO_W) {
1671 us_updateSharerIfFBD;
1672 j_popIncomingUnblockQueue;
1673 }
1674
1675 transition(O_B_W, UnblockS, O_W) {
1676 us_updateSharerIfFBD;
1677 j_popIncomingUnblockQueue;
1678 }
1679
1680 transition(NO_W, Memory_Data, NO) {
1681 w_deallocateTBE;
1682 k_wakeUpDependents;
1683 l_popMemQueue;
1684 }
1685
1686 transition(O_W, Memory_Data, O) {
1687 w_deallocateTBE;
1688 k_wakeUpDependents;
1689 l_popMemQueue;
1690 }
1691
1692 // WB State Transistions
1693 transition(WB, Writeback_Dirty, WB_O_W) {
1694 l_writeDataToMemory;
1695 rs_removeSharer;
1696 l_queueMemoryWBRequest;
1697 j_popIncomingUnblockQueue;
1698 }
1699
1700 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1701 l_writeDataToMemory;
1702 rs_removeSharer;
1703 l_queueMemoryWBRequest;
1704 j_popIncomingUnblockQueue;
1705 }
1706
1707 transition(WB_E_W, Memory_Ack, E) {
1708 pfd_probeFilterDeallocate;
1709 k_wakeUpDependents;
1710 l_popMemQueue;
1711 }
1712
1713 transition(WB_O_W, Memory_Ack, O) {
1714 k_wakeUpDependents;
1715 l_popMemQueue;
1716 }
1717
1718 transition(WB, Writeback_Clean, O) {
1719 ll_checkIncomingWriteback;
1720 rs_removeSharer;
1721 k_wakeUpDependents;
1722 j_popIncomingUnblockQueue;
1723 }
1724
1725 transition(WB, Writeback_Exclusive_Clean, E) {
1726 ll_checkIncomingWriteback;
1727 rs_removeSharer;
1728 pfd_probeFilterDeallocate;
1729 k_wakeUpDependents;
1730 j_popIncomingUnblockQueue;
1731 }
1732
1733 transition(WB, Unblock, NX) {
1734 auno_assertUnblockerNotOwner;
1735 k_wakeUpDependents;
1736 j_popIncomingUnblockQueue;
1737 }
1738 }