2 machine(L1Cache, "MI Example L1 Cache")
3 : Sequencer * sequencer,
4 CacheMemory * cacheMemory,
5 int cache_response_latency = 12,
10 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true";
11 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true";
13 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true";
14 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true";
17 enumeration(State, desc="Cache states") {
18 I, desc="Not Present/Invalid";
19 II, desc="Not Present/Invalid, issued PUT";
21 MI, desc="Modified, issued PUT";
22 MII, desc="Modified, issued PUTX, received nack";
24 IS, desc="Issued request for LOAD/IFETCH";
25 IM, desc="Issued request for STORE/ATOMIC";
29 enumeration(Event, desc="Cache events") {
32 Load, desc="Load request from processor";
33 Ifetch, desc="Ifetch request from processor";
34 Store, desc="Store request from processor";
36 Data, desc="Data from network";
37 Fwd_GETX, desc="Forward from network";
39 Inv, desc="Invalidate request from dir";
41 Replacement, desc="Replace a block";
42 Writeback_Ack, desc="Ack from the directory for a writeback";
43 Writeback_Nack, desc="Nack from the directory for a writeback";
46 // STRUCTURE DEFINITIONS
48 MessageBuffer mandatoryQueue, ordered="false";
51 structure(Entry, desc="...", interface="AbstractCacheEntry") {
52 State CacheState, desc="cache state";
53 bool Dirty, desc="Is the data dirty (different than memory)?";
54 DataBlock DataBlk, desc="Data in the block";
59 structure(TBE, desc="...") {
60 State TBEState, desc="Transient state";
61 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
64 external_type(TBETable) {
66 void allocate(Address);
67 void deallocate(Address);
68 bool isPresent(Address);
74 TBETable TBEs, template_hack="<L1Cache_TBE>";
77 void set_cache_entry(AbstractCacheEntry a);
78 void unset_cache_entry();
82 Entry getCacheEntry(Address address), return_by_pointer="yes" {
83 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
87 Event mandatory_request_type_to_event(CacheRequestType type) {
88 if (type == CacheRequestType:LD) {
90 } else if (type == CacheRequestType:IFETCH) {
92 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
95 error("Invalid CacheRequestType");
99 State getState(TBE tbe, Entry cache_entry, Address addr) {
104 else if (is_valid(cache_entry)) {
105 return cache_entry.CacheState;
112 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
115 tbe.TBEState := state;
118 if (is_valid(cache_entry)) {
119 cache_entry.CacheState := state;
120 if (state == State:M) {
121 cache_entry.changePermission(AccessPermission:Read_Write);
123 cache_entry.changePermission(AccessPermission:Invalid);
128 GenericMachineType getNondirectHitMachType(MachineID sender) {
129 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
131 // NOTE direct local hits should not call this
133 return GenericMachineType:L1Cache_wCC;
135 return ConvertMachToGenericMach(machineIDToMachineType(sender));
142 out_port(requestNetwork_out, RequestMsg, requestFromCache);
143 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
145 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
146 if (forwardRequestNetwork_in.isReady()) {
147 peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
149 Entry cache_entry := getCacheEntry(in_msg.Address);
150 TBE tbe := TBEs[in_msg.Address];
152 if (in_msg.Type == CoherenceRequestType:GETX) {
153 trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
155 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
156 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
158 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
159 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
161 else if (in_msg.Type == CoherenceRequestType:INV) {
162 trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
165 error("Unexpected message");
171 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
172 if (responseNetwork_in.isReady()) {
173 peek(responseNetwork_in, ResponseMsg, block_on="Address") {
175 Entry cache_entry := getCacheEntry(in_msg.Address);
176 TBE tbe := TBEs[in_msg.Address];
178 if (in_msg.Type == CoherenceResponseType:DATA) {
179 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
182 error("Unexpected message");
189 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
190 if (mandatoryQueue_in.isReady()) {
191 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
193 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
194 if (is_invalid(cache_entry) &&
195 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
196 // make room for the block
197 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
198 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
199 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
202 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
203 cache_entry, TBEs[in_msg.LineAddress]);
211 action(a_issueRequest, "a", desc="Issue a request") {
212 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
213 out_msg.Address := address;
214 out_msg.Type := CoherenceRequestType:GETX;
215 out_msg.Requestor := machineID;
216 out_msg.Destination.add(map_Address_to_Directory(address));
217 out_msg.MessageSize := MessageSizeType:Control;
221 action(b_issuePUT, "b", desc="Issue a PUT request") {
222 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
223 assert(is_valid(cache_entry));
224 out_msg.Address := address;
225 out_msg.Type := CoherenceRequestType:PUTX;
226 out_msg.Requestor := machineID;
227 out_msg.Destination.add(map_Address_to_Directory(address));
228 out_msg.DataBlk := cache_entry.DataBlk;
229 out_msg.MessageSize := MessageSizeType:Data;
234 action(e_sendData, "e", desc="Send data from cache to requestor") {
235 peek(forwardRequestNetwork_in, RequestMsg) {
236 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
237 assert(is_valid(cache_entry));
238 out_msg.Address := address;
239 out_msg.Type := CoherenceResponseType:DATA;
240 out_msg.Sender := machineID;
241 out_msg.Destination.add(in_msg.Requestor);
242 out_msg.DataBlk := cache_entry.DataBlk;
243 out_msg.MessageSize := MessageSizeType:Response_Data;
248 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
249 peek(forwardRequestNetwork_in, RequestMsg) {
250 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
251 assert(is_valid(tbe));
252 out_msg.Address := address;
253 out_msg.Type := CoherenceResponseType:DATA;
254 out_msg.Sender := machineID;
255 out_msg.Destination.add(in_msg.Requestor);
256 out_msg.DataBlk := tbe.DataBlk;
257 out_msg.MessageSize := MessageSizeType:Response_Data;
262 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
263 if (is_valid(cache_entry)) {
265 set_cache_entry(cacheMemory.allocate(address, new Entry));
269 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
270 if (is_valid(cache_entry)) {
271 cacheMemory.deallocate(address);
276 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
277 mandatoryQueue_in.dequeue();
280 action(n_popResponseQueue, "n", desc="Pop the response queue") {
281 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
284 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
285 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
288 action(p_profileMiss, "p", desc="Profile cache miss") {
289 peek(mandatoryQueue_in, CacheMsg) {
290 cacheMemory.profileMiss(in_msg);
294 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
295 assert(is_valid(cache_entry));
296 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
297 sequencer.readCallback(address,
298 GenericMachineType:L1Cache,
299 cache_entry.DataBlk);
302 action(rx_load_hit, "rx", desc="External load completed.") {
303 peek(responseNetwork_in, ResponseMsg) {
304 assert(is_valid(cache_entry));
305 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
306 sequencer.readCallback(address,
307 getNondirectHitMachType(in_msg.Sender),
308 cache_entry.DataBlk);
312 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
313 assert(is_valid(cache_entry));
314 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
315 sequencer.writeCallback(address,
316 GenericMachineType:L1Cache,
317 cache_entry.DataBlk);
320 action(sx_store_hit, "sx", desc="External store completed.") {
321 peek(responseNetwork_in, ResponseMsg) {
322 assert(is_valid(cache_entry));
323 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
324 sequencer.writeCallback(address,
325 getNondirectHitMachType(in_msg.Sender),
326 cache_entry.DataBlk);
330 action(u_writeDataToCache, "u", desc="Write data to the cache") {
331 peek(responseNetwork_in, ResponseMsg) {
332 assert(is_valid(cache_entry));
333 cache_entry.DataBlk := in_msg.DataBlk;
338 action(v_allocateTBE, "v", desc="Allocate TBE") {
339 TBEs.allocate(address);
340 set_tbe(TBEs[address]);
344 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
345 TBEs.deallocate(address);
349 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
350 assert(is_valid(cache_entry));
351 assert(is_valid(tbe));
352 tbe.DataBlk := cache_entry.DataBlk;
355 action(z_stall, "z", desc="stall") {
361 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
365 transition({IS, IM}, {Fwd_GETX, Inv}) {
369 transition(MI, Inv) {
370 o_popForwardedRequestQueue;
373 transition(M, Store) {
378 transition(M, {Load, Ifetch}) {
384 o_popForwardedRequestQueue;
387 transition(I, Store, IM) {
389 i_allocateL1CacheBlock;
395 transition(I, {Load, Ifetch}, IS) {
397 i_allocateL1CacheBlock;
403 transition(IS, Data, M) {
410 transition(IM, Data, M) {
417 transition(M, Fwd_GETX, I) {
419 o_popForwardedRequestQueue;
422 transition(I, Replacement) {
423 h_deallocateL1CacheBlock;
426 transition(M, {Replacement,Inv}, MI) {
429 x_copyDataFromCacheToTBE;
430 h_deallocateL1CacheBlock;
433 transition(MI, Writeback_Ack, I) {
435 o_popForwardedRequestQueue;
438 transition(MI, Fwd_GETX, II) {
440 o_popForwardedRequestQueue;
443 transition(MI, Writeback_Nack, MII) {
444 o_popForwardedRequestQueue;
447 transition(MII, Fwd_GETX, I) {
450 o_popForwardedRequestQueue;
453 transition(II, Writeback_Nack, I) {
455 o_popForwardedRequestQueue;