cpu: Add TraceCPU to playback elastic traces
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dma.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30 machine(DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer;
32 Cycles request_latency := 6;
33
34 // Messsage Queues
35 MessageBuffer * responseFromDir, network="From", virtual_network="5",
36 vnet_type="response";
37 MessageBuffer * reqToDirectory, network="To", virtual_network="0",
38 vnet_type="request";
39
40 MessageBuffer * mandatoryQueue;
41 {
42 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
43 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
44 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
45 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
46 }
47
48 enumeration(Event, desc="DMA events") {
49 ReadRequest, desc="A new read request";
50 WriteRequest, desc="A new write request";
51 Data, desc="Data from a DMA memory read";
52 Ack, desc="DMA write to memory completed";
53 }
54
55 State cur_state;
56
57 Tick clockEdge();
58
59 State getState(Addr addr) {
60 return cur_state;
61 }
62
63 void setState(Addr addr, State state) {
64 cur_state := state;
65 }
66
67 AccessPermission getAccessPermission(Addr addr) {
68 return AccessPermission:NotPresent;
69 }
70
71 void setAccessPermission(Addr addr, State state) {
72 }
73
74 void functionalRead(Addr addr, Packet *pkt) {
75 error("DMA does not support functional read.");
76 }
77
78 int functionalWrite(Addr addr, Packet *pkt) {
79 error("DMA does not support functional write.");
80 }
81
82 out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
83
84 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
85 if (dmaRequestQueue_in.isReady(clockEdge())) {
86 peek(dmaRequestQueue_in, SequencerMsg) {
87 if (in_msg.Type == SequencerRequestType:LD ) {
88 trigger(Event:ReadRequest, in_msg.LineAddress);
89 } else if (in_msg.Type == SequencerRequestType:ST) {
90 trigger(Event:WriteRequest, in_msg.LineAddress);
91 } else {
92 error("Invalid request type");
93 }
94 }
95 }
96 }
97
98 in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
99 if (dmaResponseQueue_in.isReady(clockEdge())) {
100 peek( dmaResponseQueue_in, DMAResponseMsg) {
101 if (in_msg.Type == DMAResponseType:ACK) {
102 trigger(Event:Ack, in_msg.LineAddress);
103 } else if (in_msg.Type == DMAResponseType:DATA) {
104 trigger(Event:Data, in_msg.LineAddress);
105 } else {
106 error("Invalid response type");
107 }
108 }
109 }
110 }
111
112 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
113 peek(dmaRequestQueue_in, SequencerMsg) {
114 enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
115 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
116 out_msg.LineAddress := in_msg.LineAddress;
117 out_msg.Type := DMARequestType:READ;
118 out_msg.Requestor := machineID;
119 out_msg.DataBlk := in_msg.DataBlk;
120 out_msg.Len := in_msg.Len;
121 out_msg.Destination.add(map_Address_to_Directory(address));
122 out_msg.MessageSize := MessageSizeType:Writeback_Control;
123 }
124 }
125 }
126
127 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
128 peek(dmaRequestQueue_in, SequencerMsg) {
129 enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
130 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
131 out_msg.LineAddress := in_msg.LineAddress;
132 out_msg.Type := DMARequestType:WRITE;
133 out_msg.Requestor := machineID;
134 out_msg.DataBlk := in_msg.DataBlk;
135 out_msg.Len := in_msg.Len;
136 out_msg.Destination.add(map_Address_to_Directory(address));
137 out_msg.MessageSize := MessageSizeType:Writeback_Control;
138 }
139 }
140 }
141
142 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
143 peek (dmaResponseQueue_in, DMAResponseMsg) {
144 dma_sequencer.ackCallback();
145 }
146 }
147
148 action(d_dataCallback, "d", desc="Write data to dma sequencer") {
149 peek (dmaResponseQueue_in, DMAResponseMsg) {
150 dma_sequencer.dataCallback(in_msg.DataBlk);
151 }
152 }
153
154 action(p_popRequestQueue, "p", desc="Pop request queue") {
155 dmaRequestQueue_in.dequeue(clockEdge());
156 }
157
158 action(p_popResponseQueue, "\p", desc="Pop request queue") {
159 dmaResponseQueue_in.dequeue(clockEdge());
160 }
161
162 transition(READY, ReadRequest, BUSY_RD) {
163 s_sendReadRequest;
164 p_popRequestQueue;
165 }
166
167 transition(READY, WriteRequest, BUSY_WR) {
168 s_sendWriteRequest;
169 p_popRequestQueue;
170 }
171
172 transition(BUSY_RD, Data, READY) {
173 d_dataCallback;
174 p_popResponseQueue;
175 }
176
177 transition(BUSY_WR, Ack, READY) {
178 a_ackCallback;
179 p_popResponseQueue;
180 }
181 }