2 * Copyright (c) 2009-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 machine(DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer;
32 Cycles request_latency := 14;
33 Cycles response_latency := 14;
35 MessageBuffer * responseFromDir, network="From", virtual_network="2",
38 MessageBuffer * reqToDir, network="To", virtual_network="1",
40 MessageBuffer * respToDir, network="To", virtual_network="2",
41 vnet_type="dmaresponse";
44 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
45 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
46 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
47 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
50 enumeration(Event, desc="DMA events") {
51 ReadRequest, desc="A new read request";
52 WriteRequest, desc="A new write request";
53 Data, desc="Data from a DMA memory read";
54 DMA_Ack, desc="DMA write to memory completed";
55 Inv_Ack, desc="Invalidation Ack from a sharer";
56 All_Acks, desc="All acks received";
59 structure(TBE, desc="...") {
60 Addr address, desc="Physical address";
61 int NumAcks, default="0", desc="Number of Acks pending";
62 DataBlock DataBlk, desc="Data";
65 structure(TBETable, external = "yes") {
68 void deallocate(Addr);
72 MessageBuffer mandatoryQueue;
73 MessageBuffer triggerQueue;
74 TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
80 State getState(TBE tbe, Addr addr) {
83 void setState(TBE tbe, Addr addr, State state) {
87 AccessPermission getAccessPermission(Addr addr) {
88 return AccessPermission:NotPresent;
91 void setAccessPermission(Addr addr, State state) {
94 void functionalRead(Addr addr, Packet *pkt) {
95 error("DMA does not support functional read.");
98 int functionalWrite(Addr addr, Packet *pkt) {
99 error("DMA does not support functional write.");
102 out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
103 out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
104 out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
106 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
107 if (dmaRequestQueue_in.isReady()) {
108 peek(dmaRequestQueue_in, SequencerMsg) {
109 if (in_msg.Type == SequencerRequestType:LD ) {
110 trigger(Event:ReadRequest, in_msg.LineAddress,
111 TBEs.lookup(in_msg.LineAddress));
112 } else if (in_msg.Type == SequencerRequestType:ST) {
113 trigger(Event:WriteRequest, in_msg.LineAddress,
114 TBEs.lookup(in_msg.LineAddress));
116 error("Invalid request type");
122 in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
123 if (dmaResponseQueue_in.isReady()) {
124 peek( dmaResponseQueue_in, ResponseMsg) {
125 if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
126 trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
127 TBEs.lookup(makeLineAddress(in_msg.addr)));
128 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
129 in_msg.Type == CoherenceResponseType:DATA) {
130 trigger(Event:Data, makeLineAddress(in_msg.addr),
131 TBEs.lookup(makeLineAddress(in_msg.addr)));
132 } else if (in_msg.Type == CoherenceResponseType:ACK) {
133 trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
134 TBEs.lookup(makeLineAddress(in_msg.addr)));
136 error("Invalid response type");
143 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
144 if (triggerQueue_in.isReady()) {
145 peek(triggerQueue_in, TriggerMsg) {
146 if (in_msg.Type == TriggerType:ALL_ACKS) {
147 trigger(Event:All_Acks, in_msg.addr, TBEs.lookup(in_msg.addr));
149 error("Unexpected message");
155 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
156 peek(dmaRequestQueue_in, SequencerMsg) {
157 enqueue(reqToDirectory_out, RequestMsg, request_latency) {
158 out_msg.addr := in_msg.PhysicalAddress;
159 out_msg.Type := CoherenceRequestType:DMA_READ;
160 out_msg.DataBlk := in_msg.DataBlk;
161 out_msg.Len := in_msg.Len;
162 out_msg.Destination.add(map_Address_to_Directory(address));
163 out_msg.Requestor := machineID;
164 out_msg.RequestorMachine := MachineType:DMA;
165 out_msg.MessageSize := MessageSizeType:Writeback_Control;
170 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
171 peek(dmaRequestQueue_in, SequencerMsg) {
172 enqueue(reqToDirectory_out, RequestMsg, request_latency) {
173 out_msg.addr := in_msg.PhysicalAddress;
174 out_msg.Type := CoherenceRequestType:DMA_WRITE;
175 out_msg.DataBlk := in_msg.DataBlk;
176 out_msg.Len := in_msg.Len;
177 out_msg.Destination.add(map_Address_to_Directory(address));
178 out_msg.Requestor := machineID;
179 out_msg.RequestorMachine := MachineType:DMA;
180 out_msg.MessageSize := MessageSizeType:Writeback_Control;
185 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
186 dma_sequencer.ackCallback();
189 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
190 assert(is_valid(tbe));
191 if (tbe.NumAcks == 0) {
192 enqueue(triggerQueue_out, TriggerMsg) {
193 out_msg.addr := address;
194 out_msg.Type := TriggerType:ALL_ACKS;
199 action(u_updateAckCount, "u", desc="Update ack count") {
200 peek(dmaResponseQueue_in, ResponseMsg) {
201 assert(is_valid(tbe));
202 tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
206 action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
207 enqueue(respToDirectory_out, ResponseMsg, response_latency) {
208 out_msg.addr := address;
209 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
210 out_msg.Destination.add(map_Address_to_Directory(address));
211 out_msg.Sender := machineID;
212 out_msg.SenderMachine := MachineType:DMA;
213 out_msg.MessageSize := MessageSizeType:Writeback_Control;
217 action(p_popRequestQueue, "p", desc="Pop request queue") {
218 dmaRequestQueue_in.dequeue();
221 action(p_popResponseQueue, "\p", desc="Pop request queue") {
222 dmaResponseQueue_in.dequeue();
225 action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
226 triggerQueue_in.dequeue();
229 action(t_updateTBEData, "t", desc="Update TBE Data") {
230 peek(dmaResponseQueue_in, ResponseMsg) {
231 assert(is_valid(tbe));
232 tbe.DataBlk := in_msg.DataBlk;
236 action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
237 assert(is_valid(tbe));
238 dma_sequencer.dataCallback(tbe.DataBlk);
241 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
242 TBEs.allocate(address);
243 set_tbe(TBEs.lookup(address));
246 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
247 TBEs.deallocate(address);
252 transition(READY, ReadRequest, BUSY_RD) {
258 transition(BUSY_RD, Inv_Ack) {
260 o_checkForCompletion;
264 transition(BUSY_RD, Data, READY) {
266 d_dataCallbackFromTBE;
269 //o_checkForCompletion;
273 transition(BUSY_RD, All_Acks, READY) {
274 d_dataCallbackFromTBE;
275 //u_sendExclusiveUnblockToDir;
280 transition(READY, WriteRequest, BUSY_WR) {
286 transition(BUSY_WR, Inv_Ack) {
288 o_checkForCompletion;
292 transition(BUSY_WR, DMA_Ack) {
293 u_updateAckCount; // actually increases
294 o_checkForCompletion;
298 transition(BUSY_WR, All_Acks, READY) {
300 u_sendExclusiveUnblockToDir;