2 machine(DMA, "DMA Controller")
3 : DMASequencer * dma_sequencer,
4 int request_latency = 14,
5 int response_latency = 14
8 MessageBuffer goo1, network="From", virtual_network="0", ordered="false", vnet_type="goo";
9 MessageBuffer goo2, network="From", virtual_network="1", ordered="false", vnet_type="goo";
10 MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
12 MessageBuffer foo1, network="To", virtual_network="0", ordered="false", vnet_type="foo";
13 MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
14 MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
16 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
17 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
18 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
19 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
22 enumeration(Event, desc="DMA events") {
23 ReadRequest, desc="A new read request";
24 WriteRequest, desc="A new write request";
25 Data, desc="Data from a DMA memory read";
26 DMA_Ack, desc="DMA write to memory completed";
27 Inv_Ack, desc="Invalidation Ack from a sharer";
28 All_Acks, desc="All acks received";
31 structure(TBE, desc="...") {
32 Address address, desc="Physical address";
33 int NumAcks, default="0", desc="Number of Acks pending";
34 DataBlock DataBlk, desc="Data";
37 structure(DMASequencer, external = "yes") {
39 void dataCallback(DataBlock);
42 structure(TBETable, external = "yes") {
44 void allocate(Address);
45 void deallocate(Address);
46 bool isPresent(Address);
49 MessageBuffer mandatoryQueue, ordered="false";
50 MessageBuffer triggerQueue, ordered="true";
51 TBETable TBEs, template_hack="<DMA_TBE>";
57 State getState(TBE tbe, Address addr) {
60 void setState(TBE tbe, Address addr, State state) {
64 AccessPermission getAccessPermission(Address addr) {
65 return AccessPermission:NotPresent;
68 void setAccessPermission(Address addr, State state) {
71 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
72 error("DMA Controller does not support getDataBlock().\n");
75 out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
76 out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
77 out_port(foo1_out, ResponseMsg, foo1, desc="...");
78 out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
80 in_port(goo1_in, RequestMsg, goo1) {
81 if (goo1_in.isReady()) {
82 peek(goo1_in, RequestMsg) {
88 in_port(goo2_in, RequestMsg, goo2) {
89 if (goo2_in.isReady()) {
90 peek(goo2_in, RequestMsg) {
96 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
97 if (dmaRequestQueue_in.isReady()) {
98 peek(dmaRequestQueue_in, SequencerMsg) {
99 if (in_msg.Type == SequencerRequestType:LD ) {
100 trigger(Event:ReadRequest, in_msg.LineAddress,
101 TBEs[in_msg.LineAddress]);
102 } else if (in_msg.Type == SequencerRequestType:ST) {
103 trigger(Event:WriteRequest, in_msg.LineAddress,
104 TBEs[in_msg.LineAddress]);
106 error("Invalid request type");
112 in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
113 if (dmaResponseQueue_in.isReady()) {
114 peek( dmaResponseQueue_in, ResponseMsg) {
115 if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
116 trigger(Event:DMA_Ack, makeLineAddress(in_msg.Address),
117 TBEs[makeLineAddress(in_msg.Address)]);
118 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
119 in_msg.Type == CoherenceResponseType:DATA) {
120 trigger(Event:Data, makeLineAddress(in_msg.Address),
121 TBEs[makeLineAddress(in_msg.Address)]);
122 } else if (in_msg.Type == CoherenceResponseType:ACK) {
123 trigger(Event:Inv_Ack, makeLineAddress(in_msg.Address),
124 TBEs[makeLineAddress(in_msg.Address)]);
126 error("Invalid response type");
133 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
134 if (triggerQueue_in.isReady()) {
135 peek(triggerQueue_in, TriggerMsg) {
136 if (in_msg.Type == TriggerType:ALL_ACKS) {
137 trigger(Event:All_Acks, in_msg.Address, TBEs[in_msg.Address]);
139 error("Unexpected message");
145 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
146 peek(dmaRequestQueue_in, SequencerMsg) {
147 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
148 out_msg.Address := in_msg.PhysicalAddress;
149 out_msg.Type := CoherenceRequestType:DMA_READ;
150 out_msg.DataBlk := in_msg.DataBlk;
151 out_msg.Len := in_msg.Len;
152 out_msg.Destination.add(map_Address_to_Directory(address));
153 out_msg.Requestor := machineID;
154 out_msg.RequestorMachine := MachineType:DMA;
155 out_msg.MessageSize := MessageSizeType:Writeback_Control;
160 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
161 peek(dmaRequestQueue_in, SequencerMsg) {
162 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
163 out_msg.Address := in_msg.PhysicalAddress;
164 out_msg.Type := CoherenceRequestType:DMA_WRITE;
165 out_msg.DataBlk := in_msg.DataBlk;
166 out_msg.Len := in_msg.Len;
167 out_msg.Destination.add(map_Address_to_Directory(address));
168 out_msg.Requestor := machineID;
169 out_msg.RequestorMachine := MachineType:DMA;
170 out_msg.MessageSize := MessageSizeType:Writeback_Control;
175 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
176 dma_sequencer.ackCallback();
179 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
180 assert(is_valid(tbe));
181 if (tbe.NumAcks == 0) {
182 enqueue(triggerQueue_out, TriggerMsg) {
183 out_msg.Address := address;
184 out_msg.Type := TriggerType:ALL_ACKS;
189 action(u_updateAckCount, "u", desc="Update ack count") {
190 peek(dmaResponseQueue_in, ResponseMsg) {
191 assert(is_valid(tbe));
192 tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
196 action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
197 enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
198 out_msg.Address := address;
199 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
200 out_msg.Destination.add(map_Address_to_Directory(address));
201 out_msg.Sender := machineID;
202 out_msg.SenderMachine := MachineType:DMA;
203 out_msg.MessageSize := MessageSizeType:Writeback_Control;
207 action(p_popRequestQueue, "p", desc="Pop request queue") {
208 dmaRequestQueue_in.dequeue();
211 action(p_popResponseQueue, "\p", desc="Pop request queue") {
212 dmaResponseQueue_in.dequeue();
215 action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
216 triggerQueue_in.dequeue();
219 action(t_updateTBEData, "t", desc="Update TBE Data") {
220 peek(dmaResponseQueue_in, ResponseMsg) {
221 assert(is_valid(tbe));
222 tbe.DataBlk := in_msg.DataBlk;
226 action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
227 assert(is_valid(tbe));
228 dma_sequencer.dataCallback(tbe.DataBlk);
231 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
232 TBEs.allocate(address);
233 set_tbe(TBEs[address]);
236 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
237 TBEs.deallocate(address);
241 action(z_stall, "z", desc="dma is busy..stall") {
247 transition(READY, ReadRequest, BUSY_RD) {
253 transition(BUSY_RD, Inv_Ack) {
255 o_checkForCompletion;
259 transition(BUSY_RD, Data, READY) {
261 d_dataCallbackFromTBE;
264 //o_checkForCompletion;
268 transition(BUSY_RD, All_Acks, READY) {
269 d_dataCallbackFromTBE;
270 //u_sendExclusiveUnblockToDir;
275 transition(READY, WriteRequest, BUSY_WR) {
281 transition(BUSY_WR, Inv_Ack) {
283 o_checkForCompletion;
287 transition(BUSY_WR, DMA_Ack) {
288 u_updateAckCount; // actually increases
289 o_checkForCompletion;
293 transition(BUSY_WR, All_Acks, READY) {
295 u_sendExclusiveUnblockToDir;