This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dma.sm
1
2 machine(DMA, "DMA Controller")
3 : DMASequencer * dma_sequencer,
4 int request_latency = 14,
5 int response_latency = 14
6 {
7
8 MessageBuffer goo1, network="From", virtual_network="0", ordered="false";
9 MessageBuffer goo2, network="From", virtual_network="1", ordered="false";
10 MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false";
11
12 MessageBuffer foo1, network="To", virtual_network="0", ordered="false";
13 MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false";
14 MessageBuffer respToDir, network="To", virtual_network="2", ordered="false";
15
16 enumeration(State, desc="DMA states", default="DMA_State_READY") {
17 READY, desc="Ready to accept a new request";
18 BUSY_RD, desc="Busy: currently processing a request";
19 BUSY_WR, desc="Busy: currently processing a request";
20 }
21
22 enumeration(Event, desc="DMA events") {
23 ReadRequest, desc="A new read request";
24 WriteRequest, desc="A new write request";
25 Data, desc="Data from a DMA memory read";
26 DMA_Ack, desc="DMA write to memory completed";
27 Inv_Ack, desc="Invalidation Ack from a sharer";
28 All_Acks, desc="All acks received";
29 }
30
31 structure(TBE, desc="...") {
32 Address address, desc="Physical address";
33 int NumAcks, default="0", desc="Number of Acks pending";
34 DataBlock DataBlk, desc="Data";
35 }
36
37 external_type(DMASequencer) {
38 void ackCallback();
39 void dataCallback(DataBlock);
40 }
41
42 external_type(TBETable) {
43 TBE lookup(Address);
44 void allocate(Address);
45 void deallocate(Address);
46 bool isPresent(Address);
47 }
48
49 MessageBuffer mandatoryQueue, ordered="false";
50 MessageBuffer triggerQueue, ordered="true";
51 TBETable TBEs, template_hack="<DMA_TBE>";
52 State cur_state;
53
54 State getState(Address addr) {
55 return cur_state;
56 }
57 void setState(Address addr, State state) {
58 cur_state := state;
59 }
60
61 out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
62 out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
63 out_port(foo1_out, ResponseMsg, foo1, desc="...");
64 out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
65
66 in_port(goo1_in, RequestMsg, goo1) {
67 if (goo1_in.isReady()) {
68 peek(goo1_in, RequestMsg) {
69 assert(false);
70 }
71 }
72 }
73
74 in_port(goo2_in, RequestMsg, goo2) {
75 if (goo2_in.isReady()) {
76 peek(goo2_in, RequestMsg) {
77 assert(false);
78 }
79 }
80 }
81
82 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
83 if (dmaRequestQueue_in.isReady()) {
84 peek(dmaRequestQueue_in, SequencerMsg) {
85 if (in_msg.Type == SequencerRequestType:LD ) {
86 trigger(Event:ReadRequest, in_msg.LineAddress);
87 } else if (in_msg.Type == SequencerRequestType:ST) {
88 trigger(Event:WriteRequest, in_msg.LineAddress);
89 } else {
90 error("Invalid request type");
91 }
92 }
93 }
94 }
95
96 in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
97 if (dmaResponseQueue_in.isReady()) {
98 peek( dmaResponseQueue_in, ResponseMsg) {
99 if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
100 trigger(Event:DMA_Ack, makeLineAddress(in_msg.Address));
101 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
102 in_msg.Type == CoherenceResponseType:DATA) {
103 trigger(Event:Data, makeLineAddress(in_msg.Address));
104 } else if (in_msg.Type == CoherenceResponseType:ACK) {
105 trigger(Event:Inv_Ack, makeLineAddress(in_msg.Address));
106 } else {
107 error("Invalid response type");
108 }
109 }
110 }
111 }
112
113 // Trigger Queue
114 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
115 if (triggerQueue_in.isReady()) {
116 peek(triggerQueue_in, TriggerMsg) {
117 if (in_msg.Type == TriggerType:ALL_ACKS) {
118 trigger(Event:All_Acks, in_msg.Address);
119 } else {
120 error("Unexpected message");
121 }
122 }
123 }
124 }
125
126 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
127 peek(dmaRequestQueue_in, SequencerMsg) {
128 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
129 out_msg.Address := in_msg.PhysicalAddress;
130 out_msg.Type := CoherenceRequestType:DMA_READ;
131 out_msg.DataBlk := in_msg.DataBlk;
132 out_msg.Len := in_msg.Len;
133 out_msg.Destination.add(map_Address_to_Directory(address));
134 out_msg.Requestor := machineID;
135 out_msg.MessageSize := MessageSizeType:Writeback_Control;
136 }
137 }
138 }
139
140 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
141 peek(dmaRequestQueue_in, SequencerMsg) {
142 enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
143 out_msg.Address := in_msg.PhysicalAddress;
144 out_msg.Type := CoherenceRequestType:DMA_WRITE;
145 out_msg.DataBlk := in_msg.DataBlk;
146 out_msg.Len := in_msg.Len;
147 out_msg.Destination.add(map_Address_to_Directory(address));
148 out_msg.Requestor := machineID;
149 out_msg.MessageSize := MessageSizeType:Writeback_Control;
150 }
151 }
152 }
153
154 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
155 dma_sequencer.ackCallback();
156 }
157
158 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
159 if (TBEs[address].NumAcks == 0) {
160 enqueue(triggerQueue_out, TriggerMsg) {
161 out_msg.Address := address;
162 out_msg.Type := TriggerType:ALL_ACKS;
163 }
164 }
165 }
166
167 action(u_updateAckCount, "u", desc="Update ack count") {
168 peek(dmaResponseQueue_in, ResponseMsg) {
169 TBEs[address].NumAcks := TBEs[address].NumAcks - in_msg.Acks;
170 }
171 }
172
173 action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
174 enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
175 out_msg.Address := address;
176 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
177 out_msg.Destination.add(map_Address_to_Directory(address));
178 out_msg.MessageSize := MessageSizeType:Writeback_Control;
179 }
180 }
181
182 action(p_popRequestQueue, "p", desc="Pop request queue") {
183 dmaRequestQueue_in.dequeue();
184 }
185
186 action(p_popResponseQueue, "\p", desc="Pop request queue") {
187 dmaResponseQueue_in.dequeue();
188 }
189
190 action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
191 triggerQueue_in.dequeue();
192 }
193
194 action(t_updateTBEData, "t", desc="Update TBE Data") {
195 peek(dmaResponseQueue_in, ResponseMsg) {
196 TBEs[address].DataBlk := in_msg.DataBlk;
197 }
198 }
199
200 action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
201 dma_sequencer.dataCallback(TBEs[address].DataBlk);
202 }
203
204 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
205 TBEs.allocate(address);
206 }
207
208 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
209 TBEs.deallocate(address);
210 }
211
212 action(z_stall, "z", desc="dma is busy..stall") {
213 // do nothing
214 }
215
216
217
218 transition(READY, ReadRequest, BUSY_RD) {
219 s_sendReadRequest;
220 v_allocateTBE;
221 p_popRequestQueue;
222 }
223
224 transition(BUSY_RD, Inv_Ack) {
225 u_updateAckCount;
226 o_checkForCompletion;
227 p_popResponseQueue;
228 }
229
230 transition(BUSY_RD, Data, READY) {
231 t_updateTBEData;
232 d_dataCallbackFromTBE;
233 w_deallocateTBE;
234 //u_updateAckCount;
235 //o_checkForCompletion;
236 p_popResponseQueue;
237 }
238
239 transition(BUSY_RD, All_Acks, READY) {
240 d_dataCallbackFromTBE;
241 //u_sendExclusiveUnblockToDir;
242 w_deallocateTBE;
243 p_popTriggerQueue;
244 }
245
246 transition(READY, WriteRequest, BUSY_WR) {
247 s_sendWriteRequest;
248 v_allocateTBE;
249 p_popRequestQueue;
250 }
251
252 transition(BUSY_WR, Inv_Ack) {
253 u_updateAckCount;
254 o_checkForCompletion;
255 p_popResponseQueue;
256 }
257
258 transition(BUSY_WR, DMA_Ack) {
259 u_updateAckCount; // actually increases
260 o_checkForCompletion;
261 p_popResponseQueue;
262 }
263
264 transition(BUSY_WR, All_Acks, READY) {
265 a_ackCallback;
266 u_sendExclusiveUnblockToDir;
267 w_deallocateTBE;
268 p_popTriggerQueue;
269 }
270 }