ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOESI_hammer-dma.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30 machine(DMA, "DMA Controller")
31 : DMASequencer * dma_sequencer,
32 int request_latency = 6
33 {
34
35 MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", no_vector="true";
36 MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", no_vector="true";
37
38 enumeration(State, desc="DMA states", default="DMA_State_READY") {
39 READY, desc="Ready to accept a new request";
40 BUSY_RD, desc="Busy: currently processing a request";
41 BUSY_WR, desc="Busy: currently processing a request";
42 }
43
44 enumeration(Event, desc="DMA events") {
45 ReadRequest, desc="A new read request";
46 WriteRequest, desc="A new write request";
47 Data, desc="Data from a DMA memory read";
48 Ack, desc="DMA write to memory completed";
49 }
50
51 MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
52 State cur_state, no_vector="true";
53
54 State getState(Address addr) {
55 return cur_state;
56 }
57 void setState(Address addr, State state) {
58 cur_state := state;
59 }
60
61 out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
62
63 in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
64 if (dmaRequestQueue_in.isReady()) {
65 peek(dmaRequestQueue_in, SequencerMsg) {
66 if (in_msg.Type == SequencerRequestType:LD ) {
67 trigger(Event:ReadRequest, in_msg.LineAddress);
68 } else if (in_msg.Type == SequencerRequestType:ST) {
69 trigger(Event:WriteRequest, in_msg.LineAddress);
70 } else {
71 error("Invalid request type");
72 }
73 }
74 }
75 }
76
77 in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
78 if (dmaResponseQueue_in.isReady()) {
79 peek( dmaResponseQueue_in, DMAResponseMsg) {
80 if (in_msg.Type == DMAResponseType:ACK) {
81 trigger(Event:Ack, in_msg.LineAddress);
82 } else if (in_msg.Type == DMAResponseType:DATA) {
83 trigger(Event:Data, in_msg.LineAddress);
84 } else {
85 error("Invalid response type");
86 }
87 }
88 }
89 }
90
91 action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
92 peek(dmaRequestQueue_in, SequencerMsg) {
93 enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
94 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
95 out_msg.LineAddress := in_msg.LineAddress;
96 out_msg.Type := DMARequestType:READ;
97 out_msg.Requestor := machineID;
98 out_msg.DataBlk := in_msg.DataBlk;
99 out_msg.Len := in_msg.Len;
100 out_msg.Destination.add(map_Address_to_Directory(address));
101 out_msg.MessageSize := MessageSizeType:Writeback_Control;
102 }
103 }
104 }
105
106 action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
107 peek(dmaRequestQueue_in, SequencerMsg) {
108 enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
109 out_msg.PhysicalAddress := in_msg.PhysicalAddress;
110 out_msg.LineAddress := in_msg.LineAddress;
111 out_msg.Type := DMARequestType:WRITE;
112 out_msg.Requestor := machineID;
113 out_msg.DataBlk := in_msg.DataBlk;
114 out_msg.Len := in_msg.Len;
115 out_msg.Destination.add(map_Address_to_Directory(address));
116 out_msg.MessageSize := MessageSizeType:Writeback_Control;
117 }
118 }
119 }
120
121 action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
122 peek (dmaResponseQueue_in, DMAResponseMsg) {
123 dma_sequencer.ackCallback();
124 }
125 }
126
127 action(d_dataCallback, "d", desc="Write data to dma sequencer") {
128 peek (dmaResponseQueue_in, DMAResponseMsg) {
129 dma_sequencer.dataCallback(in_msg.DataBlk);
130 }
131 }
132
133 action(p_popRequestQueue, "p", desc="Pop request queue") {
134 dmaRequestQueue_in.dequeue();
135 }
136
137 action(p_popResponseQueue, "\p", desc="Pop request queue") {
138 dmaResponseQueue_in.dequeue();
139 }
140
141 transition(READY, ReadRequest, BUSY_RD) {
142 s_sendReadRequest;
143 p_popRequestQueue;
144 }
145
146 transition(READY, WriteRequest, BUSY_WR) {
147 s_sendWriteRequest;
148 p_popRequestQueue;
149 }
150
151 transition(BUSY_RD, Data, READY) {
152 d_dataCallback;
153 p_popResponseQueue;
154 }
155
156 transition(BUSY_WR, Ack, READY) {
157 a_ackCallback;
158 p_popResponseQueue;
159 }
160 }