ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dma.sm
1
2 machine(DMA, "DMA Controller")
3 : DMASequencer * dma_sequencer,
4 int request_latency = 14,
5 int response_latency = 14
6 {
7
8 MessageBuffer goo1, network="From", virtual_network="0", ordered="false";
9 MessageBuffer goo2, network="From", virtual_network="1", ordered="false";
10 MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false";
11
12 MessageBuffer foo1, network="To", virtual_network="0", ordered="false";
13 MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false";
14 MessageBuffer respToDir, network="To", virtual_network="2", ordered="false";
15
16 enumeration(State, desc="DMA states", default="DMA_State_READY") {
17 READY, desc="Ready to accept a new request";
18 BUSY_RD, desc="Busy: currently processing a request";
19 BUSY_WR, desc="Busy: currently processing a request";
20 }
21
22 enumeration(Event, desc="DMA events") {
23 ReadRequest, desc="A new read request";
24 WriteRequest, desc="A new write request";
25 Data, desc="Data from a DMA memory read";
26 DMA_Ack, desc="DMA write to memory completed";
27 Inv_Ack, desc="Invalidation Ack from a sharer";
28 All_Acks, desc="All acks received";
29 }
30
31 structure(TBE, desc="...") {
32 Address address, desc="Physical address";
33 int NumAcks, default="0", desc="Number of Acks pending";
34 DataBlock DataBlk, desc="Data";
35 }
36
37 external_type(DMASequencer) {
38 void ackCallback();
39 void dataCallback(DataBlock);
40 }
41
42 external_type(TBETable) {
43 TBE lookup(Address);
44 void allocate(Address);
45 void deallocate(Address);
46 bool isPresent(Address);
47 }
48
49 MessageBuffer mandatoryQueue, ordered="false";
50 MessageBuffer triggerQueue, ordered="true";
51 TBETable TBEs, template_hack="<DMA_TBE>";
52 State cur_state;
53
54 void set_tbe(TBE b);
55 void unset_tbe();
56
57 State getState(TBE tbe, Address addr) {
58 return cur_state;
59 }
60 void setState(TBE tbe, Address addr, State state) {
61 cur_state := state;
62 }
63
64 out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
65 out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
66 out_port(foo1_out, ResponseMsg, foo1, desc="...");
67 out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
68
69 in_port(goo1_in, RequestMsg, goo1) {
70 if (goo1_in.isReady()) {
71 peek(goo1_in, RequestMsg) {
72 assert(false);
73 }
74 }
75 }
76
77 in_port(goo2_in, RequestMsg, goo2) {
78 if (goo2_in.isReady()) {
79 peek(goo2_in, RequestMsg) {
80 assert(false);
81 }
82 }
83 }
84
85 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
86 if (dmaRequestQueue_in.isReady()) {
87 peek(dmaRequestQueue_in, SequencerMsg) {
88 if (in_msg.Type == SequencerRequestType:LD ) {
89 trigger(Event:ReadRequest, in_msg.LineAddress,
90 TBEs[in_msg.LineAddress]);
91 } else if (in_msg.Type == SequencerRequestType:ST) {
92 trigger(Event:WriteRequest, in_msg.LineAddress,
93 TBEs[in_msg.LineAddress]);
94 } else {
95 error("Invalid request type");
96 }
97 }
98 }
99 }
100
101 in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
102 if (dmaResponseQueue_in.isReady()) {
103 peek( dmaResponseQueue_in, ResponseMsg) {
104 if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
105 trigger(Event:DMA_Ack, makeLineAddress(in_msg.Address),
106 TBEs[makeLineAddress(in_msg.Address)]);
107 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
108 in_msg.Type == CoherenceResponseType:DATA) {
109 trigger(Event:Data, makeLineAddress(in_msg.Address),
110 TBEs[makeLineAddress(in_msg.Address)]);
111 } else if (in_msg.Type == CoherenceResponseType:ACK) {
112 trigger(Event:Inv_Ack, makeLineAddress(in_msg.Address),
113 TBEs[makeLineAddress(in_msg.Address)]);
114 } else {
115 error("Invalid response type");
116 }
117 }
118 }
119 }
120
121 // Trigger Queue
122 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
123 if (triggerQueue_in.isReady()) {
124 peek(triggerQueue_in, TriggerMsg) {
125 if (in_msg.Type == TriggerType:ALL_ACKS) {
126 trigger(Event:All_Acks, in_msg.Address, TBEs[in_msg.Address]);
127 } else {
128 error("Unexpected message");
129 }
130 }
131 }
132 }
133
134 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
135 peek(dmaRequestQueue_in, SequencerMsg) {
136 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
137 out_msg.Address := in_msg.PhysicalAddress;
138 out_msg.Type := CoherenceRequestType:DMA_READ;
139 out_msg.DataBlk := in_msg.DataBlk;
140 out_msg.Len := in_msg.Len;
141 out_msg.Destination.add(map_Address_to_Directory(address));
142 out_msg.Requestor := machineID;
143 out_msg.MessageSize := MessageSizeType:Writeback_Control;
144 }
145 }
146 }
147
148 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
149 peek(dmaRequestQueue_in, SequencerMsg) {
150 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
151 out_msg.Address := in_msg.PhysicalAddress;
152 out_msg.Type := CoherenceRequestType:DMA_WRITE;
153 out_msg.DataBlk := in_msg.DataBlk;
154 out_msg.Len := in_msg.Len;
155 out_msg.Destination.add(map_Address_to_Directory(address));
156 out_msg.Requestor := machineID;
157 out_msg.MessageSize := MessageSizeType:Writeback_Control;
158 }
159 }
160 }
161
162 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
163 dma_sequencer.ackCallback();
164 }
165
166 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
167 assert(is_valid(tbe));
168 if (tbe.NumAcks == 0) {
169 enqueue(triggerQueue_out, TriggerMsg) {
170 out_msg.Address := address;
171 out_msg.Type := TriggerType:ALL_ACKS;
172 }
173 }
174 }
175
176 action(u_updateAckCount, "u", desc="Update ack count") {
177 peek(dmaResponseQueue_in, ResponseMsg) {
178 assert(is_valid(tbe));
179 tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
180 }
181 }
182
183 action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
184 enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
185 out_msg.Address := address;
186 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
187 out_msg.Destination.add(map_Address_to_Directory(address));
188 out_msg.MessageSize := MessageSizeType:Writeback_Control;
189 }
190 }
191
192 action(p_popRequestQueue, "p", desc="Pop request queue") {
193 dmaRequestQueue_in.dequeue();
194 }
195
196 action(p_popResponseQueue, "\p", desc="Pop request queue") {
197 dmaResponseQueue_in.dequeue();
198 }
199
200 action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
201 triggerQueue_in.dequeue();
202 }
203
204 action(t_updateTBEData, "t", desc="Update TBE Data") {
205 peek(dmaResponseQueue_in, ResponseMsg) {
206 assert(is_valid(tbe));
207 tbe.DataBlk := in_msg.DataBlk;
208 }
209 }
210
211 action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
212 assert(is_valid(tbe));
213 dma_sequencer.dataCallback(tbe.DataBlk);
214 }
215
216 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
217 TBEs.allocate(address);
218 set_tbe(TBEs[address]);
219 }
220
221 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
222 TBEs.deallocate(address);
223 unset_tbe();
224 }
225
226 action(z_stall, "z", desc="dma is busy..stall") {
227 // do nothing
228 }
229
230
231
232 transition(READY, ReadRequest, BUSY_RD) {
233 s_sendReadRequest;
234 v_allocateTBE;
235 p_popRequestQueue;
236 }
237
238 transition(BUSY_RD, Inv_Ack) {
239 u_updateAckCount;
240 o_checkForCompletion;
241 p_popResponseQueue;
242 }
243
244 transition(BUSY_RD, Data, READY) {
245 t_updateTBEData;
246 d_dataCallbackFromTBE;
247 w_deallocateTBE;
248 //u_updateAckCount;
249 //o_checkForCompletion;
250 p_popResponseQueue;
251 }
252
253 transition(BUSY_RD, All_Acks, READY) {
254 d_dataCallbackFromTBE;
255 //u_sendExclusiveUnblockToDir;
256 w_deallocateTBE;
257 p_popTriggerQueue;
258 }
259
260 transition(READY, WriteRequest, BUSY_WR) {
261 s_sendWriteRequest;
262 v_allocateTBE;
263 p_popRequestQueue;
264 }
265
266 transition(BUSY_WR, Inv_Ack) {
267 u_updateAckCount;
268 o_checkForCompletion;
269 p_popResponseQueue;
270 }
271
272 transition(BUSY_WR, DMA_Ack) {
273 u_updateAckCount; // actually increases
274 o_checkForCompletion;
275 p_popResponseQueue;
276 }
277
278 transition(BUSY_WR, All_Acks, READY) {
279 a_ackCallback;
280 u_sendExclusiveUnblockToDir;
281 w_deallocateTBE;
282 p_popTriggerQueue;
283 }
284 }