ruby: message buffers: significant changes
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dma.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30 machine(DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer;
32 Cycles request_latency := 6;
33
34 // Messsage Queues
35 MessageBuffer * responseFromDir, network="From", virtual_network="5",
36 ordered="true", vnet_type="response";
37 MessageBuffer * reqToDirectory, network="To", virtual_network="0",
38 ordered="false", vnet_type="request";
39
40 {
41 state_declaration(State, desc="DMA states", default="DMA_State_READY") {
42 READY, AccessPermission:Invalid, desc="Ready to accept a new request";
43 BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
44 BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
45 }
46
47 enumeration(Event, desc="DMA events") {
48 ReadRequest, desc="A new read request";
49 WriteRequest, desc="A new write request";
50 Data, desc="Data from a DMA memory read";
51 Ack, desc="DMA write to memory completed";
52 }
53
54 structure(DMASequencer, external="yes") {
55 void ackCallback();
56 void dataCallback(DataBlock);
57 }
58
59 MessageBuffer mandatoryQueue, ordered="false";
60 State cur_state;
61
62 State getState(Address addr) {
63 return cur_state;
64 }
65 void setState(Address addr, State state) {
66 cur_state := state;
67 }
68
69 AccessPermission getAccessPermission(Address addr) {
70 return AccessPermission:NotPresent;
71 }
72
73 void setAccessPermission(Address addr, State state) {
74 }
75
76 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
77 error("DMA Controller does not support getDataBlock function.\n");
78 }
79
80 out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
81
82 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
83 if (dmaRequestQueue_in.isReady()) {
84 peek(dmaRequestQueue_in, SequencerMsg) {
85 if (in_msg.Type == SequencerRequestType:LD ) {
86 trigger(Event:ReadRequest, in_msg.LineAddress);
87 } else if (in_msg.Type == SequencerRequestType:ST) {
88 trigger(Event:WriteRequest, in_msg.LineAddress);
89 } else {
90 error("Invalid request type");
91 }
92 }
93 }
94 }
95
96 in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
97 if (dmaResponseQueue_in.isReady()) {
98 peek( dmaResponseQueue_in, DMAResponseMsg) {
99 if (in_msg.Type == DMAResponseType:ACK) {
100 trigger(Event:Ack, in_msg.LineAddress);
101 } else if (in_msg.Type == DMAResponseType:DATA) {
102 trigger(Event:Data, in_msg.LineAddress);
103 } else {
104 error("Invalid response type");
105 }
106 }
107 }
108 }
109
110 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
111 peek(dmaRequestQueue_in, SequencerMsg) {
112 enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
113 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
114 out_msg.LineAddress := in_msg.LineAddress;
115 out_msg.Type := DMARequestType:READ;
116 out_msg.Requestor := machineID;
117 out_msg.DataBlk := in_msg.DataBlk;
118 out_msg.Len := in_msg.Len;
119 out_msg.Destination.add(map_Address_to_Directory(address));
120 out_msg.MessageSize := MessageSizeType:Writeback_Control;
121 }
122 }
123 }
124
125 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
126 peek(dmaRequestQueue_in, SequencerMsg) {
127 enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
128 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
129 out_msg.LineAddress := in_msg.LineAddress;
130 out_msg.Type := DMARequestType:WRITE;
131 out_msg.Requestor := machineID;
132 out_msg.DataBlk := in_msg.DataBlk;
133 out_msg.Len := in_msg.Len;
134 out_msg.Destination.add(map_Address_to_Directory(address));
135 out_msg.MessageSize := MessageSizeType:Writeback_Control;
136 }
137 }
138 }
139
140 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
141 peek (dmaResponseQueue_in, DMAResponseMsg) {
142 dma_sequencer.ackCallback();
143 }
144 }
145
146 action(d_dataCallback, "d", desc="Write data to dma sequencer") {
147 peek (dmaResponseQueue_in, DMAResponseMsg) {
148 dma_sequencer.dataCallback(in_msg.DataBlk);
149 }
150 }
151
152 action(p_popRequestQueue, "p", desc="Pop request queue") {
153 dmaRequestQueue_in.dequeue();
154 }
155
156 action(p_popResponseQueue, "\p", desc="Pop request queue") {
157 dmaResponseQueue_in.dequeue();
158 }
159
160 transition(READY, ReadRequest, BUSY_RD) {
161 s_sendReadRequest;
162 p_popRequestQueue;
163 }
164
165 transition(READY, WriteRequest, BUSY_WR) {
166 s_sendWriteRequest;
167 p_popRequestQueue;
168 }
169
170 transition(BUSY_RD, Data, READY) {
171 d_dataCallback;
172 p_popResponseQueue;
173 }
174
175 transition(BUSY_WR, Ack, READY) {
176 a_ackCallback;
177 p_popResponseQueue;
178 }
179 }