O3, Ruby: Forward invalidations from Ruby to O3 CPU
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1
2 machine(L1Cache, "MI Example L1 Cache")
3 : Sequencer * sequencer,
4 CacheMemory * cacheMemory,
5 int cache_response_latency = 12,
6 int issue_latency = 2,
7 bool send_evictions
8 {
9
10 // NETWORK BUFFERS
11 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
12 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
13
14 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
15 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
16
17 // STATES
18 state_declaration(State, desc="Cache states") {
19 I, AccessPermission:Invalid, desc="Not Present/Invalid";
20 II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
21 M, AccessPermission:Read_Write, desc="Modified";
22 MI, AccessPermission:Busy, desc="Modified, issued PUT";
23 MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
24
25 IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
26 IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
27 }
28
29 // EVENTS
30 enumeration(Event, desc="Cache events") {
31 // From processor
32
33 Load, desc="Load request from processor";
34 Ifetch, desc="Ifetch request from processor";
35 Store, desc="Store request from processor";
36
37 Data, desc="Data from network";
38 Fwd_GETX, desc="Forward from network";
39
40 Inv, desc="Invalidate request from dir";
41
42 Replacement, desc="Replace a block";
43 Writeback_Ack, desc="Ack from the directory for a writeback";
44 Writeback_Nack, desc="Nack from the directory for a writeback";
45 }
46
47 // STRUCTURE DEFINITIONS
48
49 MessageBuffer mandatoryQueue, ordered="false";
50
51 // CacheEntry
52 structure(Entry, desc="...", interface="AbstractCacheEntry") {
53 State CacheState, desc="cache state";
54 bool Dirty, desc="Is the data dirty (different than memory)?";
55 DataBlock DataBlk, desc="Data in the block";
56 }
57
58 // TBE fields
59 structure(TBE, desc="...") {
60 State TBEState, desc="Transient state";
61 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
62 }
63
64 structure(TBETable, external="yes") {
65 TBE lookup(Address);
66 void allocate(Address);
67 void deallocate(Address);
68 bool isPresent(Address);
69 }
70
71
72 // STRUCTURES
73 TBETable TBEs, template_hack="<L1Cache_TBE>";
74
75 // PROTOTYPES
76 void set_cache_entry(AbstractCacheEntry a);
77 void unset_cache_entry();
78 void set_tbe(TBE b);
79 void unset_tbe();
80
81 Entry getCacheEntry(Address address), return_by_pointer="yes" {
82 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
83 }
84
85 // FUNCTIONS
86 Event mandatory_request_type_to_event(RubyRequestType type) {
87 if (type == RubyRequestType:LD) {
88 return Event:Load;
89 } else if (type == RubyRequestType:IFETCH) {
90 return Event:Ifetch;
91 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
92 return Event:Store;
93 } else {
94 error("Invalid RubyRequestType");
95 }
96 }
97
98 State getState(TBE tbe, Entry cache_entry, Address addr) {
99
100 if (is_valid(tbe)) {
101 return tbe.TBEState;
102 }
103 else if (is_valid(cache_entry)) {
104 return cache_entry.CacheState;
105 }
106 else {
107 return State:I;
108 }
109 }
110
111 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
112
113 if (is_valid(tbe)) {
114 tbe.TBEState := state;
115 }
116
117 if (is_valid(cache_entry)) {
118 cache_entry.CacheState := state;
119 }
120 }
121
122 AccessPermission getAccessPermission(Address addr) {
123 TBE tbe := TBEs[addr];
124 if(is_valid(tbe)) {
125 return L1Cache_State_to_permission(tbe.TBEState);
126 }
127
128 Entry cache_entry := getCacheEntry(addr);
129 if(is_valid(cache_entry)) {
130 return L1Cache_State_to_permission(cache_entry.CacheState);
131 }
132
133 return AccessPermission:NotPresent;
134 }
135
136 void setAccessPermission(Entry cache_entry, Address addr, State state) {
137 if (is_valid(cache_entry)) {
138 cache_entry.changePermission(L1Cache_State_to_permission(state));
139 }
140 }
141
142 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
143 return getCacheEntry(addr).DataBlk;
144 }
145
146 GenericMachineType getNondirectHitMachType(MachineID sender) {
147 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
148 //
149 // NOTE direct local hits should not call this
150 //
151 return GenericMachineType:L1Cache_wCC;
152 } else {
153 return ConvertMachToGenericMach(machineIDToMachineType(sender));
154 }
155 }
156
157
158 // NETWORK PORTS
159
160 out_port(requestNetwork_out, RequestMsg, requestFromCache);
161 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
162
163 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
164 if (forwardRequestNetwork_in.isReady()) {
165 peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
166
167 Entry cache_entry := getCacheEntry(in_msg.Address);
168 TBE tbe := TBEs[in_msg.Address];
169
170 if (in_msg.Type == CoherenceRequestType:GETX) {
171 trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
172 }
173 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
174 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
175 }
176 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
177 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
178 }
179 else if (in_msg.Type == CoherenceRequestType:INV) {
180 trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
181 }
182 else {
183 error("Unexpected message");
184 }
185 }
186 }
187 }
188
189 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
190 if (responseNetwork_in.isReady()) {
191 peek(responseNetwork_in, ResponseMsg, block_on="Address") {
192
193 Entry cache_entry := getCacheEntry(in_msg.Address);
194 TBE tbe := TBEs[in_msg.Address];
195
196 if (in_msg.Type == CoherenceResponseType:DATA) {
197 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
198 }
199 else {
200 error("Unexpected message");
201 }
202 }
203 }
204 }
205
206 // Mandatory Queue
207 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
208 if (mandatoryQueue_in.isReady()) {
209 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
210
211 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
212 if (is_invalid(cache_entry) &&
213 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
214 // make room for the block
215 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
216 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
217 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
218 }
219 else {
220 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
221 cache_entry, TBEs[in_msg.LineAddress]);
222 }
223 }
224 }
225 }
226
227 // ACTIONS
228
229 action(a_issueRequest, "a", desc="Issue a request") {
230 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
231 out_msg.Address := address;
232 out_msg.Type := CoherenceRequestType:GETX;
233 out_msg.Requestor := machineID;
234 out_msg.Destination.add(map_Address_to_Directory(address));
235 out_msg.MessageSize := MessageSizeType:Control;
236 }
237 }
238
239 action(b_issuePUT, "b", desc="Issue a PUT request") {
240 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
241 assert(is_valid(cache_entry));
242 out_msg.Address := address;
243 out_msg.Type := CoherenceRequestType:PUTX;
244 out_msg.Requestor := machineID;
245 out_msg.Destination.add(map_Address_to_Directory(address));
246 out_msg.DataBlk := cache_entry.DataBlk;
247 out_msg.MessageSize := MessageSizeType:Data;
248 }
249 }
250
251 action(e_sendData, "e", desc="Send data from cache to requestor") {
252 peek(forwardRequestNetwork_in, RequestMsg) {
253 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
254 assert(is_valid(cache_entry));
255 out_msg.Address := address;
256 out_msg.Type := CoherenceResponseType:DATA;
257 out_msg.Sender := machineID;
258 out_msg.Destination.add(in_msg.Requestor);
259 out_msg.DataBlk := cache_entry.DataBlk;
260 out_msg.MessageSize := MessageSizeType:Response_Data;
261 }
262 }
263 }
264
265 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
266 peek(forwardRequestNetwork_in, RequestMsg) {
267 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
268 assert(is_valid(tbe));
269 out_msg.Address := address;
270 out_msg.Type := CoherenceResponseType:DATA;
271 out_msg.Sender := machineID;
272 out_msg.Destination.add(in_msg.Requestor);
273 out_msg.DataBlk := tbe.DataBlk;
274 out_msg.MessageSize := MessageSizeType:Response_Data;
275 }
276 }
277 }
278
279 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
280 if (is_valid(cache_entry)) {
281 } else {
282 set_cache_entry(cacheMemory.allocate(address, new Entry));
283 }
284 }
285
286 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
287 if (is_valid(cache_entry)) {
288 cacheMemory.deallocate(address);
289 unset_cache_entry();
290 }
291 }
292
293 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
294 mandatoryQueue_in.dequeue();
295 }
296
297 action(n_popResponseQueue, "n", desc="Pop the response queue") {
298 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
299 }
300
301 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
302 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
303 }
304
305 action(p_profileMiss, "p", desc="Profile cache miss") {
306 peek(mandatoryQueue_in, RubyRequest) {
307 cacheMemory.profileMiss(in_msg);
308 }
309 }
310
311 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
312 assert(is_valid(cache_entry));
313 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
314 sequencer.readCallback(address,
315 GenericMachineType:L1Cache,
316 cache_entry.DataBlk);
317 }
318
319 action(rx_load_hit, "rx", desc="External load completed.") {
320 peek(responseNetwork_in, ResponseMsg) {
321 assert(is_valid(cache_entry));
322 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
323 sequencer.readCallback(address,
324 getNondirectHitMachType(in_msg.Sender),
325 cache_entry.DataBlk);
326 }
327 }
328
329 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
330 assert(is_valid(cache_entry));
331 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
332 sequencer.writeCallback(address,
333 GenericMachineType:L1Cache,
334 cache_entry.DataBlk);
335 }
336
337 action(sx_store_hit, "sx", desc="External store completed.") {
338 peek(responseNetwork_in, ResponseMsg) {
339 assert(is_valid(cache_entry));
340 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
341 sequencer.writeCallback(address,
342 getNondirectHitMachType(in_msg.Sender),
343 cache_entry.DataBlk);
344 }
345 }
346
347 action(u_writeDataToCache, "u", desc="Write data to the cache") {
348 peek(responseNetwork_in, ResponseMsg) {
349 assert(is_valid(cache_entry));
350 cache_entry.DataBlk := in_msg.DataBlk;
351 }
352 }
353
354 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
355 if (send_evictions) {
356 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
357 sequencer.evictionCallback(address);
358 }
359 }
360
361 action(v_allocateTBE, "v", desc="Allocate TBE") {
362 TBEs.allocate(address);
363 set_tbe(TBEs[address]);
364 }
365
366 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
367 TBEs.deallocate(address);
368 unset_tbe();
369 }
370
371 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
372 assert(is_valid(cache_entry));
373 assert(is_valid(tbe));
374 tbe.DataBlk := cache_entry.DataBlk;
375 }
376
377 action(z_stall, "z", desc="stall") {
378 // do nothing
379 }
380
381 // TRANSITIONS
382
383 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
384 z_stall;
385 }
386
387 transition({IS, IM}, {Fwd_GETX, Inv}) {
388 z_stall;
389 }
390
391 transition(MI, Inv) {
392 o_popForwardedRequestQueue;
393 }
394
395 transition(M, Store) {
396 s_store_hit;
397 m_popMandatoryQueue;
398 }
399
400 transition(M, {Load, Ifetch}) {
401 r_load_hit;
402 m_popMandatoryQueue;
403 }
404
405 transition(I, Inv) {
406 o_popForwardedRequestQueue;
407 }
408
409 transition(I, Store, IM) {
410 v_allocateTBE;
411 i_allocateL1CacheBlock;
412 a_issueRequest;
413 p_profileMiss;
414 m_popMandatoryQueue;
415 }
416
417 transition(I, {Load, Ifetch}, IS) {
418 v_allocateTBE;
419 i_allocateL1CacheBlock;
420 a_issueRequest;
421 p_profileMiss;
422 m_popMandatoryQueue;
423 }
424
425 transition(IS, Data, M) {
426 u_writeDataToCache;
427 rx_load_hit;
428 w_deallocateTBE;
429 n_popResponseQueue;
430 }
431
432 transition(IM, Data, M) {
433 u_writeDataToCache;
434 sx_store_hit;
435 w_deallocateTBE;
436 n_popResponseQueue;
437 }
438
439 transition(M, Fwd_GETX, I) {
440 e_sendData;
441 forward_eviction_to_cpu;
442 o_popForwardedRequestQueue;
443 }
444
445 transition(I, Replacement) {
446 h_deallocateL1CacheBlock;
447 }
448
449 transition(M, {Replacement,Inv}, MI) {
450 v_allocateTBE;
451 b_issuePUT;
452 x_copyDataFromCacheToTBE;
453 forward_eviction_to_cpu;
454 h_deallocateL1CacheBlock;
455 }
456
457 transition(MI, Writeback_Ack, I) {
458 w_deallocateTBE;
459 o_popForwardedRequestQueue;
460 }
461
462 transition(MI, Fwd_GETX, II) {
463 ee_sendDataFromTBE;
464 o_popForwardedRequestQueue;
465 }
466
467 transition(MI, Writeback_Nack, MII) {
468 o_popForwardedRequestQueue;
469 }
470
471 transition(MII, Fwd_GETX, I) {
472 ee_sendDataFromTBE;
473 w_deallocateTBE;
474 o_popForwardedRequestQueue;
475 }
476
477 transition(II, Writeback_Nack, I) {
478 w_deallocateTBE;
479 o_popForwardedRequestQueue;
480 }
481 }