ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dma.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30 machine(DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer,
32 int request_latency = 6
33 {
34
35 MessageBuffer responseFromDir, network="From", virtual_network="5", ordered="true", no_vector="true";
36 MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", no_vector="true";
37
38 enumeration(State, desc="DMA states", default="DMA_State_READY") {
39 READY, desc="Ready to accept a new request";
40 BUSY_RD, desc="Busy: currently processing a request";
41 BUSY_WR, desc="Busy: currently processing a request";
42 }
43
44 enumeration(Event, desc="DMA events") {
45 ReadRequest, desc="A new read request";
46 WriteRequest, desc="A new write request";
47 Data, desc="Data from a DMA memory read";
48 Ack, desc="DMA write to memory completed";
49 }
50
51 external_type(DMASequencer) {
52 void ackCallback();
53 void dataCallback(DataBlock);
54 }
55
56 MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
57 State cur_state, no_vector="true";
58
59 State getState(Address addr) {
60 return cur_state;
61 }
62 void setState(Address addr, State state) {
63 cur_state := state;
64 }
65
66 out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
67
68 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
69 if (dmaRequestQueue_in.isReady()) {
70 peek(dmaRequestQueue_in, SequencerMsg) {
71 if (in_msg.Type == SequencerRequestType:LD ) {
72 trigger(Event:ReadRequest, in_msg.LineAddress);
73 } else if (in_msg.Type == SequencerRequestType:ST) {
74 trigger(Event:WriteRequest, in_msg.LineAddress);
75 } else {
76 error("Invalid request type");
77 }
78 }
79 }
80 }
81
82 in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
83 if (dmaResponseQueue_in.isReady()) {
84 peek( dmaResponseQueue_in, DMAResponseMsg) {
85 if (in_msg.Type == DMAResponseType:ACK) {
86 trigger(Event:Ack, in_msg.LineAddress);
87 } else if (in_msg.Type == DMAResponseType:DATA) {
88 trigger(Event:Data, in_msg.LineAddress);
89 } else {
90 error("Invalid response type");
91 }
92 }
93 }
94 }
95
96 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
97 peek(dmaRequestQueue_in, SequencerMsg) {
98 enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
99 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
100 out_msg.LineAddress := in_msg.LineAddress;
101 out_msg.Type := DMARequestType:READ;
102 out_msg.Requestor := machineID;
103 out_msg.DataBlk := in_msg.DataBlk;
104 out_msg.Len := in_msg.Len;
105 out_msg.Destination.add(map_Address_to_Directory(address));
106 out_msg.MessageSize := MessageSizeType:Writeback_Control;
107 }
108 }
109 }
110
111 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
112 peek(dmaRequestQueue_in, SequencerMsg) {
113 enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
114 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
115 out_msg.LineAddress := in_msg.LineAddress;
116 out_msg.Type := DMARequestType:WRITE;
117 out_msg.Requestor := machineID;
118 out_msg.DataBlk := in_msg.DataBlk;
119 out_msg.Len := in_msg.Len;
120 out_msg.Destination.add(map_Address_to_Directory(address));
121 out_msg.MessageSize := MessageSizeType:Writeback_Control;
122 }
123 }
124 }
125
126 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
127 peek (dmaResponseQueue_in, DMAResponseMsg) {
128 dma_sequencer.ackCallback();
129 }
130 }
131
132 action(d_dataCallback, "d", desc="Write data to dma sequencer") {
133 peek (dmaResponseQueue_in, DMAResponseMsg) {
134 dma_sequencer.dataCallback(in_msg.DataBlk);
135 }
136 }
137
138 action(p_popRequestQueue, "p", desc="Pop request queue") {
139 dmaRequestQueue_in.dequeue();
140 }
141
142 action(p_popResponseQueue, "\p", desc="Pop request queue") {
143 dmaResponseQueue_in.dequeue();
144 }
145
146 transition(READY, ReadRequest, BUSY_RD) {
147 s_sendReadRequest;
148 p_popRequestQueue;
149 }
150
151 transition(READY, WriteRequest, BUSY_WR) {
152 s_sendWriteRequest;
153 p_popRequestQueue;
154 }
155
156 transition(BUSY_RD, Data, READY) {
157 d_dataCallback;
158 p_popResponseQueue;
159 }
160
161 transition(BUSY_WR, Ack, READY) {
162 a_ackCallback;
163 p_popResponseQueue;
164 }
165 }