2 machine(DMA, "DMA Controller")
3 : DMASequencer * dma_sequencer,
4 Cycles request_latency = 14,
5 Cycles response_latency = 14
7 MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
9 MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
10 MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
12 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
13 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
14 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
15 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
18 enumeration(Event, desc="DMA events") {
19 ReadRequest, desc="A new read request";
20 WriteRequest, desc="A new write request";
21 Data, desc="Data from a DMA memory read";
22 DMA_Ack, desc="DMA write to memory completed";
23 Inv_Ack, desc="Invalidation Ack from a sharer";
24 All_Acks, desc="All acks received";
27 structure(TBE, desc="...") {
28 Address address, desc="Physical address";
29 int NumAcks, default="0", desc="Number of Acks pending";
30 DataBlock DataBlk, desc="Data";
33 structure(DMASequencer, external = "yes") {
35 void dataCallback(DataBlock);
38 structure(TBETable, external = "yes") {
40 void allocate(Address);
41 void deallocate(Address);
42 bool isPresent(Address);
45 MessageBuffer mandatoryQueue, ordered="false";
46 MessageBuffer triggerQueue, ordered="true";
47 TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
53 State getState(TBE tbe, Address addr) {
56 void setState(TBE tbe, Address addr, State state) {
60 AccessPermission getAccessPermission(Address addr) {
61 return AccessPermission:NotPresent;
64 void setAccessPermission(Address addr, State state) {
67 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
68 error("DMA Controller does not support getDataBlock().\n");
71 out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
72 out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
73 out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
75 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
76 if (dmaRequestQueue_in.isReady()) {
77 peek(dmaRequestQueue_in, SequencerMsg) {
78 if (in_msg.Type == SequencerRequestType:LD ) {
79 trigger(Event:ReadRequest, in_msg.LineAddress,
80 TBEs[in_msg.LineAddress]);
81 } else if (in_msg.Type == SequencerRequestType:ST) {
82 trigger(Event:WriteRequest, in_msg.LineAddress,
83 TBEs[in_msg.LineAddress]);
85 error("Invalid request type");
91 in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
92 if (dmaResponseQueue_in.isReady()) {
93 peek( dmaResponseQueue_in, ResponseMsg) {
94 if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
95 trigger(Event:DMA_Ack, makeLineAddress(in_msg.Address),
96 TBEs[makeLineAddress(in_msg.Address)]);
97 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
98 in_msg.Type == CoherenceResponseType:DATA) {
99 trigger(Event:Data, makeLineAddress(in_msg.Address),
100 TBEs[makeLineAddress(in_msg.Address)]);
101 } else if (in_msg.Type == CoherenceResponseType:ACK) {
102 trigger(Event:Inv_Ack, makeLineAddress(in_msg.Address),
103 TBEs[makeLineAddress(in_msg.Address)]);
105 error("Invalid response type");
112 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
113 if (triggerQueue_in.isReady()) {
114 peek(triggerQueue_in, TriggerMsg) {
115 if (in_msg.Type == TriggerType:ALL_ACKS) {
116 trigger(Event:All_Acks, in_msg.Address, TBEs[in_msg.Address]);
118 error("Unexpected message");
124 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
125 peek(dmaRequestQueue_in, SequencerMsg) {
126 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
127 out_msg.Address := in_msg.PhysicalAddress;
128 out_msg.Type := CoherenceRequestType:DMA_READ;
129 out_msg.DataBlk := in_msg.DataBlk;
130 out_msg.Len := in_msg.Len;
131 out_msg.Destination.add(map_Address_to_Directory(address));
132 out_msg.Requestor := machineID;
133 out_msg.RequestorMachine := MachineType:DMA;
134 out_msg.MessageSize := MessageSizeType:Writeback_Control;
139 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
140 peek(dmaRequestQueue_in, SequencerMsg) {
141 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
142 out_msg.Address := in_msg.PhysicalAddress;
143 out_msg.Type := CoherenceRequestType:DMA_WRITE;
144 out_msg.DataBlk := in_msg.DataBlk;
145 out_msg.Len := in_msg.Len;
146 out_msg.Destination.add(map_Address_to_Directory(address));
147 out_msg.Requestor := machineID;
148 out_msg.RequestorMachine := MachineType:DMA;
149 out_msg.MessageSize := MessageSizeType:Writeback_Control;
154 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
155 dma_sequencer.ackCallback();
158 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
159 assert(is_valid(tbe));
160 if (tbe.NumAcks == 0) {
161 enqueue(triggerQueue_out, TriggerMsg) {
162 out_msg.Address := address;
163 out_msg.Type := TriggerType:ALL_ACKS;
168 action(u_updateAckCount, "u", desc="Update ack count") {
169 peek(dmaResponseQueue_in, ResponseMsg) {
170 assert(is_valid(tbe));
171 tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
175 action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
176 enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
177 out_msg.Address := address;
178 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
179 out_msg.Destination.add(map_Address_to_Directory(address));
180 out_msg.Sender := machineID;
181 out_msg.SenderMachine := MachineType:DMA;
182 out_msg.MessageSize := MessageSizeType:Writeback_Control;
186 action(p_popRequestQueue, "p", desc="Pop request queue") {
187 dmaRequestQueue_in.dequeue();
190 action(p_popResponseQueue, "\p", desc="Pop request queue") {
191 dmaResponseQueue_in.dequeue();
194 action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
195 triggerQueue_in.dequeue();
198 action(t_updateTBEData, "t", desc="Update TBE Data") {
199 peek(dmaResponseQueue_in, ResponseMsg) {
200 assert(is_valid(tbe));
201 tbe.DataBlk := in_msg.DataBlk;
205 action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
206 assert(is_valid(tbe));
207 dma_sequencer.dataCallback(tbe.DataBlk);
210 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
211 TBEs.allocate(address);
212 set_tbe(TBEs[address]);
215 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
216 TBEs.deallocate(address);
220 action(z_stall, "z", desc="dma is busy..stall") {
226 transition(READY, ReadRequest, BUSY_RD) {
232 transition(BUSY_RD, Inv_Ack) {
234 o_checkForCompletion;
238 transition(BUSY_RD, Data, READY) {
240 d_dataCallbackFromTBE;
243 //o_checkForCompletion;
247 transition(BUSY_RD, All_Acks, READY) {
248 d_dataCallbackFromTBE;
249 //u_sendExclusiveUnblockToDir;
254 transition(READY, WriteRequest, BUSY_WR) {
260 transition(BUSY_WR, Inv_Ack) {
262 o_checkForCompletion;
266 transition(BUSY_WR, DMA_Ack) {
267 u_updateAckCount; // actually increases
268 o_checkForCompletion;
272 transition(BUSY_WR, All_Acks, READY) {
274 u_sendExclusiveUnblockToDir;