ruby: restrict Address to being a type and not a variable name
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dma.sm
1 /*
2 * Copyright (c) 2009-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer,
32 Cycles request_latency = 14,
33 Cycles response_latency = 14
34 {
35 MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
36
37 MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
38 MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
39
40 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
41 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
42 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
43 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
44 }
45
46 enumeration(Event, desc="DMA events") {
47 ReadRequest, desc="A new read request";
48 WriteRequest, desc="A new write request";
49 Data, desc="Data from a DMA memory read";
50 DMA_Ack, desc="DMA write to memory completed";
51 Inv_Ack, desc="Invalidation Ack from a sharer";
52 All_Acks, desc="All acks received";
53 }
54
55 structure(TBE, desc="...") {
56 Address address, desc="Physical address";
57 int NumAcks, default="0", desc="Number of Acks pending";
58 DataBlock DataBlk, desc="Data";
59 }
60
61 structure(DMASequencer, external = "yes") {
62 void ackCallback();
63 void dataCallback(DataBlock);
64 }
65
66 structure(TBETable, external = "yes") {
67 TBE lookup(Address);
68 void allocate(Address);
69 void deallocate(Address);
70 bool isPresent(Address);
71 }
72
73 MessageBuffer mandatoryQueue, ordered="false";
74 MessageBuffer triggerQueue, ordered="true";
75 TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
76 State cur_state;
77
78 void set_tbe(TBE b);
79 void unset_tbe();
80
81 State getState(TBE tbe, Address addr) {
82 return cur_state;
83 }
84 void setState(TBE tbe, Address addr, State state) {
85 cur_state := state;
86 }
87
88 AccessPermission getAccessPermission(Address addr) {
89 return AccessPermission:NotPresent;
90 }
91
92 void setAccessPermission(Address addr, State state) {
93 }
94
95 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
96 error("DMA Controller does not support getDataBlock().\n");
97 }
98
99 out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
100 out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
101 out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
102
103 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
104 if (dmaRequestQueue_in.isReady()) {
105 peek(dmaRequestQueue_in, SequencerMsg) {
106 if (in_msg.Type == SequencerRequestType:LD ) {
107 trigger(Event:ReadRequest, in_msg.LineAddress,
108 TBEs[in_msg.LineAddress]);
109 } else if (in_msg.Type == SequencerRequestType:ST) {
110 trigger(Event:WriteRequest, in_msg.LineAddress,
111 TBEs[in_msg.LineAddress]);
112 } else {
113 error("Invalid request type");
114 }
115 }
116 }
117 }
118
119 in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
120 if (dmaResponseQueue_in.isReady()) {
121 peek( dmaResponseQueue_in, ResponseMsg) {
122 if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
123 trigger(Event:DMA_Ack, makeLineAddress(in_msg.Addr),
124 TBEs[makeLineAddress(in_msg.Addr)]);
125 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
126 in_msg.Type == CoherenceResponseType:DATA) {
127 trigger(Event:Data, makeLineAddress(in_msg.Addr),
128 TBEs[makeLineAddress(in_msg.Addr)]);
129 } else if (in_msg.Type == CoherenceResponseType:ACK) {
130 trigger(Event:Inv_Ack, makeLineAddress(in_msg.Addr),
131 TBEs[makeLineAddress(in_msg.Addr)]);
132 } else {
133 error("Invalid response type");
134 }
135 }
136 }
137 }
138
139 // Trigger Queue
140 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
141 if (triggerQueue_in.isReady()) {
142 peek(triggerQueue_in, TriggerMsg) {
143 if (in_msg.Type == TriggerType:ALL_ACKS) {
144 trigger(Event:All_Acks, in_msg.Addr, TBEs[in_msg.Addr]);
145 } else {
146 error("Unexpected message");
147 }
148 }
149 }
150 }
151
152 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
153 peek(dmaRequestQueue_in, SequencerMsg) {
154 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
155 out_msg.Addr := in_msg.PhysicalAddress;
156 out_msg.Type := CoherenceRequestType:DMA_READ;
157 out_msg.DataBlk := in_msg.DataBlk;
158 out_msg.Len := in_msg.Len;
159 out_msg.Destination.add(map_Address_to_Directory(address));
160 out_msg.Requestor := machineID;
161 out_msg.RequestorMachine := MachineType:DMA;
162 out_msg.MessageSize := MessageSizeType:Writeback_Control;
163 }
164 }
165 }
166
167 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
168 peek(dmaRequestQueue_in, SequencerMsg) {
169 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
170 out_msg.Addr := in_msg.PhysicalAddress;
171 out_msg.Type := CoherenceRequestType:DMA_WRITE;
172 out_msg.DataBlk := in_msg.DataBlk;
173 out_msg.Len := in_msg.Len;
174 out_msg.Destination.add(map_Address_to_Directory(address));
175 out_msg.Requestor := machineID;
176 out_msg.RequestorMachine := MachineType:DMA;
177 out_msg.MessageSize := MessageSizeType:Writeback_Control;
178 }
179 }
180 }
181
182 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
183 dma_sequencer.ackCallback();
184 }
185
186 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
187 assert(is_valid(tbe));
188 if (tbe.NumAcks == 0) {
189 enqueue(triggerQueue_out, TriggerMsg) {
190 out_msg.Addr := address;
191 out_msg.Type := TriggerType:ALL_ACKS;
192 }
193 }
194 }
195
196 action(u_updateAckCount, "u", desc="Update ack count") {
197 peek(dmaResponseQueue_in, ResponseMsg) {
198 assert(is_valid(tbe));
199 tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
200 }
201 }
202
203 action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
204 enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
205 out_msg.Addr := address;
206 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
207 out_msg.Destination.add(map_Address_to_Directory(address));
208 out_msg.Sender := machineID;
209 out_msg.SenderMachine := MachineType:DMA;
210 out_msg.MessageSize := MessageSizeType:Writeback_Control;
211 }
212 }
213
214 action(p_popRequestQueue, "p", desc="Pop request queue") {
215 dmaRequestQueue_in.dequeue();
216 }
217
218 action(p_popResponseQueue, "\p", desc="Pop request queue") {
219 dmaResponseQueue_in.dequeue();
220 }
221
222 action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
223 triggerQueue_in.dequeue();
224 }
225
226 action(t_updateTBEData, "t", desc="Update TBE Data") {
227 peek(dmaResponseQueue_in, ResponseMsg) {
228 assert(is_valid(tbe));
229 tbe.DataBlk := in_msg.DataBlk;
230 }
231 }
232
233 action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
234 assert(is_valid(tbe));
235 dma_sequencer.dataCallback(tbe.DataBlk);
236 }
237
238 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
239 TBEs.allocate(address);
240 set_tbe(TBEs[address]);
241 }
242
243 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
244 TBEs.deallocate(address);
245 unset_tbe();
246 }
247
248 action(z_stall, "z", desc="dma is busy..stall") {
249 // do nothing
250 }
251
252
253
254 transition(READY, ReadRequest, BUSY_RD) {
255 s_sendReadRequest;
256 v_allocateTBE;
257 p_popRequestQueue;
258 }
259
260 transition(BUSY_RD, Inv_Ack) {
261 u_updateAckCount;
262 o_checkForCompletion;
263 p_popResponseQueue;
264 }
265
266 transition(BUSY_RD, Data, READY) {
267 t_updateTBEData;
268 d_dataCallbackFromTBE;
269 w_deallocateTBE;
270 //u_updateAckCount;
271 //o_checkForCompletion;
272 p_popResponseQueue;
273 }
274
275 transition(BUSY_RD, All_Acks, READY) {
276 d_dataCallbackFromTBE;
277 //u_sendExclusiveUnblockToDir;
278 w_deallocateTBE;
279 p_popTriggerQueue;
280 }
281
282 transition(READY, WriteRequest, BUSY_WR) {
283 s_sendWriteRequest;
284 v_allocateTBE;
285 p_popRequestQueue;
286 }
287
288 transition(BUSY_WR, Inv_Ack) {
289 u_updateAckCount;
290 o_checkForCompletion;
291 p_popResponseQueue;
292 }
293
294 transition(BUSY_WR, DMA_Ack) {
295 u_updateAckCount; // actually increases
296 o_checkForCompletion;
297 p_popResponseQueue;
298 }
299
300 transition(BUSY_WR, All_Acks, READY) {
301 a_ackCallback;
302 u_sendExclusiveUnblockToDir;
303 w_deallocateTBE;
304 p_popTriggerQueue;
305 }
306 }