2 machine(Directory, "Directory protocol")
3 : int directory_latency,
4 int dma_select_low_bit,
5 int dma_select_num_bits
8 MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
9 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
10 MessageBuffer dmaRequestFromDir, network="To", virtual_network="4", ordered="true";
12 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="true";
13 MessageBuffer dmaRequestToDir, network="From", virtual_network="5", ordered="true";
16 enumeration(State, desc="Directory states", default="Directory_State_I") {
21 M_DRD, desc="Blocked on an invalidation for a DMA read";
22 M_DWR, desc="Blocked on an invalidation for a DMA write";
24 M_DWRI, desc="Intermediate state M_DWR-->I";
26 IM, desc="Intermediate state I-->M";
27 MI, desc="Intermediate state M-->I";
28 ID, desc="Intermediate state for DMA_READ when in I";
29 ID_W, desc="Intermediate state for DMA_WRITE when in I";
33 enumeration(Event, desc="Directory events") {
35 GETX, desc="A GETX arrives";
36 GETS, desc="A GETS arrives";
37 PUTX, desc="A PUTX arrives";
38 PUTX_NotOwner, desc="A PUTX arrives";
41 DMA_READ, desc="A DMA Read memory request";
42 DMA_WRITE, desc="A DMA Write memory request";
45 Memory_Data, desc="Fetched data from memory arrives";
46 Memory_Ack, desc="Writeback Ack from memory arrives";
52 structure(Entry, desc="...") {
53 State DirectoryState, desc="Directory state";
54 DataBlock DataBlk, desc="data for the block";
55 NetDest Sharers, desc="Sharers for this block";
56 NetDest Owner, desc="Owner of this block";
59 external_type(DirectoryMemory) {
60 Entry lookup(Address);
61 bool isPresent(Address);
62 void invalidateBlock(Address);
65 external_type(MemoryControl, inport="yes", outport="yes") {
70 // TBE entries for DMA requests
71 structure(TBE, desc="TBE entries for outstanding DMA requests") {
72 Address PhysicalAddress, desc="physical address";
73 State TBEState, desc="Transient State";
74 DataBlock DataBlk, desc="Data to be written (DMA write only)";
78 external_type(TBETable) {
80 void allocate(Address);
81 void deallocate(Address);
82 bool isPresent(Address);
86 DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
88 MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
90 TBETable TBEs, template_hack="<Directory_TBE>";
92 State getState(Address addr) {
93 if (TBEs.isPresent(addr)) {
94 return TBEs[addr].TBEState;
95 } else if (directory.isPresent(addr)) {
96 return directory[addr].DirectoryState;
102 void setState(Address addr, State state) {
104 if (TBEs.isPresent(addr)) {
105 TBEs[addr].TBEState := state;
108 if (directory.isPresent(addr)) {
110 if (state == State:I) {
111 assert(directory[addr].Owner.count() == 0);
112 assert(directory[addr].Sharers.count() == 0);
113 } else if (state == State:M) {
114 assert(directory[addr].Owner.count() == 1);
115 assert(directory[addr].Sharers.count() == 0);
118 directory[addr].DirectoryState := state;
123 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
124 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
125 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
126 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaRequestFromDir);
129 out_port(memQueue_out, MemoryMsg, memBuffer);
132 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
133 if (dmaRequestQueue_in.isReady()) {
134 peek(dmaRequestQueue_in, DMARequestMsg) {
135 if (in_msg.Type == DMARequestType:READ) {
136 trigger(Event:DMA_READ, in_msg.LineAddress);
137 } else if (in_msg.Type == DMARequestType:WRITE) {
138 trigger(Event:DMA_WRITE, in_msg.LineAddress);
140 error("Invalid message");
146 in_port(requestQueue_in, RequestMsg, requestToDir) {
147 if (requestQueue_in.isReady()) {
148 peek(requestQueue_in, RequestMsg) {
149 if (in_msg.Type == CoherenceRequestType:GETS) {
150 trigger(Event:GETS, in_msg.Address);
151 } else if (in_msg.Type == CoherenceRequestType:GETX) {
152 trigger(Event:GETX, in_msg.Address);
153 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
154 if (directory[in_msg.Address].Owner.isElement(in_msg.Requestor)) {
155 trigger(Event:PUTX, in_msg.Address);
157 trigger(Event:PUTX_NotOwner, in_msg.Address);
160 error("Invalid message");
167 // off-chip memory request/response is done
168 in_port(memQueue_in, MemoryMsg, memBuffer) {
169 if (memQueue_in.isReady()) {
170 peek(memQueue_in, MemoryMsg) {
171 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
172 trigger(Event:Memory_Data, in_msg.Address);
173 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
174 trigger(Event:Memory_Ack, in_msg.Address);
176 DEBUG_EXPR(in_msg.Type);
177 error("Invalid message");
185 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
186 peek(requestQueue_in, RequestMsg) {
187 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
188 out_msg.Address := address;
189 out_msg.Type := CoherenceRequestType:WB_ACK;
190 out_msg.Requestor := in_msg.Requestor;
191 out_msg.Destination.add(in_msg.Requestor);
192 out_msg.MessageSize := MessageSizeType:Writeback_Control;
197 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
198 peek(memQueue_in, MemoryMsg) {
199 enqueue(forwardNetwork_out, RequestMsg, latency="1") {
200 out_msg.Address := address;
201 out_msg.Type := CoherenceRequestType:WB_ACK;
202 out_msg.Requestor := in_msg.OriginalRequestorMachId;
203 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
204 out_msg.MessageSize := MessageSizeType:Writeback_Control;
209 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
210 peek(requestQueue_in, RequestMsg) {
211 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
212 out_msg.Address := address;
213 out_msg.Type := CoherenceRequestType:WB_NACK;
214 out_msg.Requestor := in_msg.Requestor;
215 out_msg.Destination.add(in_msg.Requestor);
216 out_msg.MessageSize := MessageSizeType:Writeback_Control;
221 action(c_clearOwner, "c", desc="Clear the owner field") {
222 directory[address].Owner.clear();
225 action(d_sendData, "d", desc="Send data to requestor") {
226 peek(memQueue_in, MemoryMsg) {
227 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
228 out_msg.Address := address;
229 out_msg.Type := CoherenceResponseType:DATA;
230 out_msg.Sender := machineID;
231 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
232 out_msg.DataBlk := in_msg.DataBlk;
233 out_msg.MessageSize := MessageSizeType:Response_Data;
238 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
239 peek(memQueue_in, MemoryMsg) {
240 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
241 out_msg.PhysicalAddress := address;
242 out_msg.LineAddress := address;
243 out_msg.Type := DMAResponseType:DATA;
244 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
245 out_msg.Destination.add(mapAddressToRange(address, MachineType:DMA,
246 dma_select_low_bit, dma_select_num_bits));
247 out_msg.MessageSize := MessageSizeType:Response_Data;
254 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
255 peek(requestQueue_in, RequestMsg) {
256 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
257 out_msg.PhysicalAddress := address;
258 out_msg.LineAddress := address;
259 out_msg.Type := DMAResponseType:DATA;
260 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
261 out_msg.Destination.add(mapAddressToRange(address, MachineType:DMA,
262 dma_select_low_bit, dma_select_num_bits));
263 out_msg.MessageSize := MessageSizeType:Response_Data;
268 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
269 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
270 out_msg.PhysicalAddress := address;
271 out_msg.LineAddress := address;
272 out_msg.Type := DMAResponseType:ACK;
273 out_msg.Destination.add(mapAddressToRange(address, MachineType:DMA,
274 dma_select_low_bit, dma_select_num_bits));
275 out_msg.MessageSize := MessageSizeType:Writeback_Control;
279 action(d_deallocateDirectory, "\d", desc="Deallocate Directory Entry") {
280 directory.invalidateBlock(address);
283 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
284 peek(requestQueue_in, RequestMsg) {
285 directory[address].Owner.clear();
286 directory[address].Owner.add(in_msg.Requestor);
290 action(f_forwardRequest, "f", desc="Forward request to owner") {
291 peek(requestQueue_in, RequestMsg) {
292 APPEND_TRANSITION_COMMENT("Own: ");
293 APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
294 APPEND_TRANSITION_COMMENT("Req: ");
295 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
296 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
297 out_msg.Address := address;
298 out_msg.Type := in_msg.Type;
299 out_msg.Requestor := in_msg.Requestor;
300 out_msg.Destination := directory[in_msg.Address].Owner;
301 out_msg.MessageSize := MessageSizeType:Writeback_Control;
306 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
307 peek(dmaRequestQueue_in, DMARequestMsg) {
308 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
309 out_msg.Address := address;
310 out_msg.Type := CoherenceRequestType:INV;
311 out_msg.Requestor := machineID;
312 out_msg.Destination := directory[in_msg.PhysicalAddress].Owner;
313 out_msg.MessageSize := MessageSizeType:Writeback_Control;
318 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
319 requestQueue_in.dequeue();
322 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
323 dmaRequestQueue_in.dequeue();
326 action(l_writeDataToMemory, "l", desc="Write PUTX data to memory") {
327 peek(requestQueue_in, RequestMsg) {
328 // assert(in_msg.Dirty);
329 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
330 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
331 DEBUG_EXPR(in_msg.Address);
332 DEBUG_EXPR(in_msg.DataBlk);
336 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
337 directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
340 action(v_allocateTBE, "v", desc="Allocate TBE") {
341 peek(dmaRequestQueue_in, DMARequestMsg) {
342 TBEs.allocate(address);
343 TBEs[address].DataBlk := in_msg.DataBlk;
344 TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
345 TBEs[address].Len := in_msg.Len;
349 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
350 peek(requestQueue_in, RequestMsg) {
351 TBEs.allocate(address);
352 TBEs[address].DataBlk := in_msg.DataBlk;
356 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
357 TBEs.deallocate(address);
360 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
361 requestQueue_in.recycle();
365 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
366 peek(requestQueue_in, RequestMsg) {
367 enqueue(memQueue_out, MemoryMsg, latency="1") {
368 out_msg.Address := address;
369 out_msg.Type := MemoryRequestType:MEMORY_READ;
370 out_msg.Sender := machineID;
371 out_msg.OriginalRequestorMachId := in_msg.Requestor;
372 out_msg.MessageSize := in_msg.MessageSize;
373 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
379 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
380 peek(dmaRequestQueue_in, DMARequestMsg) {
381 enqueue(memQueue_out, MemoryMsg, latency="1") {
382 out_msg.Address := address;
383 out_msg.Type := MemoryRequestType:MEMORY_READ;
384 out_msg.Sender := machineID;
385 //out_msg.OriginalRequestorMachId := machineID;
386 out_msg.MessageSize := in_msg.MessageSize;
387 out_msg.DataBlk := directory[address].DataBlk;
393 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
394 peek(dmaRequestQueue_in, DMARequestMsg) {
395 enqueue(memQueue_out, MemoryMsg, latency="1") {
396 out_msg.Address := address;
397 out_msg.Type := MemoryRequestType:MEMORY_WB;
398 //out_msg.OriginalRequestorMachId := machineID;
399 //out_msg.DataBlk := in_msg.DataBlk;
400 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
401 out_msg.MessageSize := in_msg.MessageSize;
402 //out_msg.Prefetch := in_msg.Prefetch;
409 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
410 peek(requestQueue_in, RequestMsg) {
411 enqueue(memQueue_out, MemoryMsg, latency="1") {
412 out_msg.Address := address;
413 out_msg.Type := MemoryRequestType:MEMORY_WB;
414 out_msg.OriginalRequestorMachId := in_msg.Requestor;
415 //out_msg.DataBlk := in_msg.DataBlk;
416 out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
417 out_msg.MessageSize := in_msg.MessageSize;
418 //out_msg.Prefetch := in_msg.Prefetch;
427 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
428 peek(requestQueue_in, RequestMsg) {
429 enqueue(memQueue_out, MemoryMsg, latency="1") {
430 out_msg.Address := address;
431 out_msg.Type := MemoryRequestType:MEMORY_WB;
432 out_msg.OriginalRequestorMachId := in_msg.Requestor;
433 out_msg.DataBlk := in_msg.DataBlk;
434 out_msg.MessageSize := in_msg.MessageSize;
435 //out_msg.Prefetch := in_msg.Prefetch;
442 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
443 memQueue_in.dequeue();
446 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
447 directory[address].DataBlk := TBEs[address].DataBlk;
452 transition({M_DRD, M_DWR}, GETX) {
453 z_recycleRequestQueue;
456 transition({IM, MI, ID, ID_W}, {GETX, GETS, DMA_READ, DMA_WRITE, PUTX, PUTX_NotOwner} ) {
457 z_recycleRequestQueue;
460 transition(I, GETX, IM) {
462 qf_queueMemoryFetchRequest;
464 i_popIncomingRequestQueue;
467 transition(IM, Memory_Data, M) {
469 //e_ownerIsRequestor;
474 transition(I, DMA_READ, ID) {
476 qf_queueMemoryFetchRequestDMA;
477 p_popIncomingDMARequestQueue;
480 transition(ID, Memory_Data, I) {
482 //p_popIncomingDMARequestQueue;
488 transition(I, DMA_WRITE, ID_W) {
490 qw_queueMemoryWBRequest_partial;
491 p_popIncomingDMARequestQueue;
494 transition(ID_W, Memory_Ack, I) {
495 dwt_writeDMADataFromTBE;
501 transition(M, DMA_READ, M_DRD) {
502 inv_sendCacheInvalidate;
503 p_popIncomingDMARequestQueue;
506 transition(M_DRD, PUTX, I) {
510 d_deallocateDirectory;
511 i_popIncomingRequestQueue;
514 transition(M, DMA_WRITE, M_DWR) {
516 inv_sendCacheInvalidate;
517 p_popIncomingDMARequestQueue;
520 transition(M_DWR, PUTX, M_DWRI) {
521 qw_queueMemoryWBRequest_partialTBE;
523 i_popIncomingRequestQueue;
526 transition(M_DWRI, Memory_Ack, I) {
527 w_writeDataToMemoryFromTBE;
531 d_deallocateDirectory;
535 transition(M, GETX, M) {
538 i_popIncomingRequestQueue;
541 transition(M, PUTX, MI) {
543 v_allocateTBEFromRequestNet;
544 l_queueMemoryWBRequest;
545 i_popIncomingRequestQueue;
548 transition(MI, Memory_Ack, I) {
549 w_writeDataToMemoryFromTBE;
552 d_deallocateDirectory;
556 transition(M, PUTX_NotOwner, M) {
558 i_popIncomingRequestQueue;
561 transition(I, PUTX_NotOwner, I) {
563 i_popIncomingRequestQueue;