mem-cache: Add match functions to QueueEntry
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dma.sm
1 /*
2 * Copyright (c) 2009-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(MachineType:DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer;
32 Cycles request_latency := 14;
33 Cycles response_latency := 14;
34
35 MessageBuffer * responseFromDir, network="From", virtual_network="2",
36 vnet_type="response";
37
38 MessageBuffer * reqToDir, network="To", virtual_network="1",
39 vnet_type="request";
40 MessageBuffer * respToDir, network="To", virtual_network="2",
41 vnet_type="dmaresponse";
42
43 MessageBuffer * mandatoryQueue;
44 MessageBuffer * triggerQueue;
45 {
46 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
47 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
48 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
49 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
50 }
51
52 enumeration(Event, desc="DMA events") {
53 ReadRequest, desc="A new read request";
54 WriteRequest, desc="A new write request";
55 Data, desc="Data from a DMA memory read";
56 DMA_Ack, desc="DMA write to memory completed";
57 Inv_Ack, desc="Invalidation Ack from a sharer";
58 All_Acks, desc="All acks received";
59 }
60
61 structure(TBE, desc="...") {
62 Addr address, desc="Physical address";
63 int NumAcks, default="0", desc="Number of Acks pending";
64 DataBlock DataBlk, desc="Data";
65 }
66
67 structure(TBETable, external = "yes") {
68 TBE lookup(Addr);
69 void allocate(Addr);
70 void deallocate(Addr);
71 bool isPresent(Addr);
72 }
73
74 TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
75 State cur_state;
76
77 Tick clockEdge();
78 void set_tbe(TBE b);
79 void unset_tbe();
80 void wakeUpAllBuffers();
81 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
82
83 State getState(TBE tbe, Addr addr) {
84 return cur_state;
85 }
86 void setState(TBE tbe, Addr addr, State state) {
87 cur_state := state;
88 }
89
90 AccessPermission getAccessPermission(Addr addr) {
91 return AccessPermission:NotPresent;
92 }
93
94 void setAccessPermission(Addr addr, State state) {
95 }
96
97 void functionalRead(Addr addr, Packet *pkt) {
98 error("DMA does not support functional read.");
99 }
100
101 int functionalWrite(Addr addr, Packet *pkt) {
102 error("DMA does not support functional write.");
103 }
104
105 out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
106 out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
107 out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
108
109 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
110 if (dmaRequestQueue_in.isReady(clockEdge())) {
111 peek(dmaRequestQueue_in, SequencerMsg) {
112 if (in_msg.Type == SequencerRequestType:LD ) {
113 trigger(Event:ReadRequest, in_msg.LineAddress,
114 TBEs[in_msg.LineAddress]);
115 } else if (in_msg.Type == SequencerRequestType:ST) {
116 trigger(Event:WriteRequest, in_msg.LineAddress,
117 TBEs[in_msg.LineAddress]);
118 } else {
119 error("Invalid request type");
120 }
121 }
122 }
123 }
124
125 in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
126 if (dmaResponseQueue_in.isReady(clockEdge())) {
127 peek( dmaResponseQueue_in, ResponseMsg) {
128 if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
129 trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
130 TBEs[makeLineAddress(in_msg.addr)]);
131 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
132 in_msg.Type == CoherenceResponseType:DATA) {
133 trigger(Event:Data, makeLineAddress(in_msg.addr),
134 TBEs[makeLineAddress(in_msg.addr)]);
135 } else if (in_msg.Type == CoherenceResponseType:ACK) {
136 trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
137 TBEs[makeLineAddress(in_msg.addr)]);
138 } else {
139 error("Invalid response type");
140 }
141 }
142 }
143 }
144
145 // Trigger Queue
146 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
147 if (triggerQueue_in.isReady(clockEdge())) {
148 peek(triggerQueue_in, TriggerMsg) {
149 if (in_msg.Type == TriggerType:ALL_ACKS) {
150 trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
151 } else {
152 error("Unexpected message");
153 }
154 }
155 }
156 }
157
158 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
159 peek(dmaRequestQueue_in, SequencerMsg) {
160 enqueue(reqToDirectory_out, RequestMsg, request_latency) {
161 out_msg.addr := in_msg.PhysicalAddress;
162 out_msg.Type := CoherenceRequestType:DMA_READ;
163 out_msg.DataBlk := in_msg.DataBlk;
164 out_msg.Len := in_msg.Len;
165 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
166 out_msg.Requestor := machineID;
167 out_msg.RequestorMachine := MachineType:DMA;
168 out_msg.MessageSize := MessageSizeType:Writeback_Control;
169 }
170 }
171 }
172
173 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
174 peek(dmaRequestQueue_in, SequencerMsg) {
175 enqueue(reqToDirectory_out, RequestMsg, request_latency) {
176 out_msg.addr := in_msg.PhysicalAddress;
177 out_msg.Type := CoherenceRequestType:DMA_WRITE;
178 out_msg.DataBlk := in_msg.DataBlk;
179 out_msg.Len := in_msg.Len;
180 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
181 out_msg.Requestor := machineID;
182 out_msg.RequestorMachine := MachineType:DMA;
183 out_msg.MessageSize := MessageSizeType:Writeback_Control;
184 }
185 }
186 }
187
188 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
189 dma_sequencer.ackCallback(address);
190 }
191
192 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
193 assert(is_valid(tbe));
194 if (tbe.NumAcks == 0) {
195 enqueue(triggerQueue_out, TriggerMsg) {
196 out_msg.addr := address;
197 out_msg.Type := TriggerType:ALL_ACKS;
198 }
199 }
200 }
201
202 action(u_updateAckCount, "u", desc="Update ack count") {
203 peek(dmaResponseQueue_in, ResponseMsg) {
204 assert(is_valid(tbe));
205 tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
206 }
207 }
208
209 action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
210 enqueue(respToDirectory_out, ResponseMsg, response_latency) {
211 out_msg.addr := address;
212 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
213 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
214 out_msg.Sender := machineID;
215 out_msg.SenderMachine := MachineType:DMA;
216 out_msg.MessageSize := MessageSizeType:Writeback_Control;
217 }
218 }
219
220 action(p_popRequestQueue, "p", desc="Pop request queue") {
221 dmaRequestQueue_in.dequeue(clockEdge());
222 }
223
224 action(p_popResponseQueue, "\p", desc="Pop request queue") {
225 dmaResponseQueue_in.dequeue(clockEdge());
226 }
227
228 action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
229 triggerQueue_in.dequeue(clockEdge());
230 }
231
232 action(t_updateTBEData, "t", desc="Update TBE Data") {
233 peek(dmaResponseQueue_in, ResponseMsg) {
234 assert(is_valid(tbe));
235 tbe.DataBlk := in_msg.DataBlk;
236 }
237 }
238
239 action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
240 assert(is_valid(tbe));
241 dma_sequencer.dataCallback(tbe.DataBlk, address);
242 }
243
244 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
245 TBEs.allocate(address);
246 set_tbe(TBEs[address]);
247 }
248
249 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
250 TBEs.deallocate(address);
251 unset_tbe();
252 }
253
254 action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
255 stall_and_wait(dmaRequestQueue_in, address);
256 }
257
258 action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
259 wakeUpAllBuffers();
260 }
261
262 transition(READY, ReadRequest, BUSY_RD) {
263 s_sendReadRequest;
264 v_allocateTBE;
265 p_popRequestQueue;
266 }
267
268 transition(BUSY_RD, Inv_Ack) {
269 u_updateAckCount;
270 o_checkForCompletion;
271 p_popResponseQueue;
272 }
273
274 transition(BUSY_RD, Data, READY) {
275 t_updateTBEData;
276 d_dataCallbackFromTBE;
277 w_deallocateTBE;
278 //u_updateAckCount;
279 //o_checkForCompletion;
280 p_popResponseQueue;
281 wkad_wakeUpAllDependents;
282 }
283
284 transition(BUSY_RD, All_Acks, READY) {
285 d_dataCallbackFromTBE;
286 //u_sendExclusiveUnblockToDir;
287 w_deallocateTBE;
288 p_popTriggerQueue;
289 wkad_wakeUpAllDependents;
290 }
291
292 transition(READY, WriteRequest, BUSY_WR) {
293 s_sendWriteRequest;
294 v_allocateTBE;
295 p_popRequestQueue;
296 }
297
298 transition(BUSY_WR, Inv_Ack) {
299 u_updateAckCount;
300 o_checkForCompletion;
301 p_popResponseQueue;
302 }
303
304 transition(BUSY_WR, DMA_Ack) {
305 u_updateAckCount; // actually increases
306 o_checkForCompletion;
307 p_popResponseQueue;
308 }
309
310 transition(BUSY_WR, All_Acks, READY) {
311 a_ackCallback;
312 u_sendExclusiveUnblockToDir;
313 w_deallocateTBE;
314 p_popTriggerQueue;
315 wkad_wakeUpAllDependents;
316 }
317
318 transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
319 zz_stallAndWaitRequestQueue;
320 }
321 }