mem: Fix guest corruption when caches handle uncacheable accesses
[gem5.git] / src / mem / protocol / MI_example-dma.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer,
32 int request_latency = 6
33 {
34
35 MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response", no_vector="true";
36 MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request", no_vector="true";
37
38 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
39 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
40 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
41 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
42 }
43
44 enumeration(Event, desc="DMA events") {
45 ReadRequest, desc="A new read request";
46 WriteRequest, desc="A new write request";
47 Data, desc="Data from a DMA memory read";
48 Ack, desc="DMA write to memory completed";
49 }
50
51 MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
52 State cur_state, no_vector="true";
53
54 State getState(Address addr) {
55 return cur_state;
56 }
57 void setState(Address addr, State state) {
58 cur_state := state;
59 }
60
61 AccessPermission getAccessPermission(Address addr) {
62 return AccessPermission:NotPresent;
63 }
64
65 void setAccessPermission(Address addr, State state) {
66 }
67
68 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
69 error("DMA Controller does not support getDataBlock function.\n");
70 }
71
72 out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
73
74 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
75 if (dmaRequestQueue_in.isReady()) {
76 peek(dmaRequestQueue_in, SequencerMsg) {
77 if (in_msg.Type == SequencerRequestType:LD ) {
78 trigger(Event:ReadRequest, in_msg.LineAddress);
79 } else if (in_msg.Type == SequencerRequestType:ST) {
80 trigger(Event:WriteRequest, in_msg.LineAddress);
81 } else {
82 error("Invalid request type");
83 }
84 }
85 }
86 }
87
88 in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
89 if (dmaResponseQueue_in.isReady()) {
90 peek( dmaResponseQueue_in, DMAResponseMsg) {
91 if (in_msg.Type == DMAResponseType:ACK) {
92 trigger(Event:Ack, in_msg.LineAddress);
93 } else if (in_msg.Type == DMAResponseType:DATA) {
94 trigger(Event:Data, in_msg.LineAddress);
95 } else {
96 error("Invalid response type");
97 }
98 }
99 }
100 }
101
102 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
103 peek(dmaRequestQueue_in, SequencerMsg) {
104 enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
105 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
106 out_msg.LineAddress := in_msg.LineAddress;
107 out_msg.Type := DMARequestType:READ;
108 out_msg.Requestor := machineID;
109 out_msg.DataBlk := in_msg.DataBlk;
110 out_msg.Len := in_msg.Len;
111 out_msg.Destination.add(map_Address_to_Directory(address));
112 out_msg.MessageSize := MessageSizeType:Writeback_Control;
113 }
114 }
115 }
116
117 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
118 peek(dmaRequestQueue_in, SequencerMsg) {
119 enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
120 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
121 out_msg.LineAddress := in_msg.LineAddress;
122 out_msg.Type := DMARequestType:WRITE;
123 out_msg.Requestor := machineID;
124 out_msg.DataBlk := in_msg.DataBlk;
125 out_msg.Len := in_msg.Len;
126 out_msg.Destination.add(map_Address_to_Directory(address));
127 out_msg.MessageSize := MessageSizeType:Writeback_Control;
128 }
129 }
130 }
131
132 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
133 peek (dmaResponseQueue_in, DMAResponseMsg) {
134 dma_sequencer.ackCallback();
135 }
136 }
137
138 action(d_dataCallback, "d", desc="Write data to dma sequencer") {
139 peek (dmaResponseQueue_in, DMAResponseMsg) {
140 dma_sequencer.dataCallback(in_msg.DataBlk);
141 }
142 }
143
144 action(p_popRequestQueue, "p", desc="Pop request queue") {
145 dmaRequestQueue_in.dequeue();
146 }
147
148 action(p_popResponseQueue, "\p", desc="Pop request queue") {
149 dmaResponseQueue_in.dequeue();
150 }
151
152 transition(READY, ReadRequest, BUSY_RD) {
153 s_sendReadRequest;
154 p_popRequestQueue;
155 }
156
157 transition(READY, WriteRequest, BUSY_WR) {
158 s_sendWriteRequest;
159 p_popRequestQueue;
160 }
161
162 transition(BUSY_RD, Data, READY) {
163 d_dataCallback;
164 p_popResponseQueue;
165 }
166
167 transition(BUSY_WR, Ack, READY) {
168 a_ackCallback;
169 p_popResponseQueue;
170 }
171 }