962775cd3356c1fc0af9ebea6cee37a534be7a73
[mesa.git] / src / gallium / drivers / cell / ppu / cell_batch.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "cell_context.h"
30 #include "cell_batch.h"
31 #include "cell_fence.h"
32 #include "cell_spu.h"
33
34
35
36 /**
37 * Search the buffer pool for an empty/free buffer and return its index.
38 * Buffers are used for storing vertex data, state and commands which
39 * will be sent to the SPUs.
40 * If no empty buffers are available, wait for one.
41 * \return buffer index in [0, CELL_NUM_BUFFERS-1]
42 */
43 uint
44 cell_get_empty_buffer(struct cell_context *cell)
45 {
46 static uint prev_buffer = 0;
47 uint buf = (prev_buffer + 1) % CELL_NUM_BUFFERS;
48 uint tries = 0;
49
50 /* Find a buffer that's marked as free by all SPUs */
51 while (1) {
52 uint spu, num_free = 0;
53
54 for (spu = 0; spu < cell->num_spus; spu++) {
55 if (cell->buffer_status[spu][buf][0] == CELL_BUFFER_STATUS_FREE) {
56 num_free++;
57
58 if (num_free == cell->num_spus) {
59 /* found a free buffer, now mark status as used */
60 for (spu = 0; spu < cell->num_spus; spu++) {
61 cell->buffer_status[spu][buf][0] = CELL_BUFFER_STATUS_USED;
62 }
63 /*
64 printf("PPU: ALLOC BUFFER %u, %u tries\n", buf, tries);
65 */
66 prev_buffer = buf;
67
68 /* release tex buffer associated w/ prev use of this batch buf */
69 cell_free_fenced_buffers(cell, &cell->fenced_buffers[buf]);
70
71 return buf;
72 }
73 }
74 else {
75 break;
76 }
77 }
78
79 /* try next buf */
80 buf = (buf + 1) % CELL_NUM_BUFFERS;
81
82 tries++;
83 if (tries == 100) {
84 /*
85 printf("PPU WAITING for buffer...\n");
86 */
87 }
88 }
89 }
90
91
92 /**
93 * Append a fence command to the current batch buffer.
94 * Note that we're sure there's always room for this because of the
95 * adjusted size check in cell_batch_free_space().
96 */
97 static void
98 emit_fence(struct cell_context *cell)
99 {
100 const uint batch = cell->cur_batch;
101 const uint size = cell->buffer_size[batch];
102 struct cell_command_fence *fence_cmd;
103 struct cell_fence *fence = &cell->fenced_buffers[batch].fence;
104 uint i;
105
106 /* set fence status to emitted, not yet signalled */
107 for (i = 0; i < cell->num_spus; i++) {
108 fence->status[i][0] = CELL_FENCE_EMITTED;
109 }
110
111 ASSERT(size + sizeof(struct cell_command_fence) <= CELL_BUFFER_SIZE);
112
113 fence_cmd = (struct cell_command_fence *) (cell->buffer[batch] + size);
114 fence_cmd->opcode = CELL_CMD_FENCE;
115 fence_cmd->fence = fence;
116
117 /* update batch buffer size */
118 cell->buffer_size[batch] = size + sizeof(struct cell_command_fence);
119 assert(sizeof(struct cell_command_fence) % 8 == 0);
120 }
121
122
123 /**
124 * Flush the current batch buffer to the SPUs.
125 * An empty buffer will be found and set as the new current batch buffer
126 * for subsequent commands/data.
127 */
128 void
129 cell_batch_flush(struct cell_context *cell)
130 {
131 static boolean flushing = FALSE;
132 uint batch = cell->cur_batch;
133 uint size = cell->buffer_size[batch];
134 uint spu, cmd_word;
135
136 assert(!flushing);
137
138 if (size == 0)
139 return;
140
141 /* Before we use this batch buffer, make sure any fenced texture buffers
142 * are released.
143 */
144 if (cell->fenced_buffers[batch].head) {
145 emit_fence(cell);
146 size = cell->buffer_size[batch];
147 }
148
149 flushing = TRUE;
150
151 assert(batch < CELL_NUM_BUFFERS);
152
153 /*
154 printf("cell_batch_dispatch: buf %u at %p, size %u\n",
155 batch, &cell->buffer[batch][0], size);
156 */
157
158 /*
159 * Build "BATCH" command and send to all SPUs.
160 */
161 cmd_word = CELL_CMD_BATCH | (batch << 8) | (size << 16);
162
163 for (spu = 0; spu < cell->num_spus; spu++) {
164 assert(cell->buffer_status[spu][batch][0] == CELL_BUFFER_STATUS_USED);
165 send_mbox_message(cell_global.spe_contexts[spu], cmd_word);
166 }
167
168 /* When the SPUs are done copying the buffer into their locals stores
169 * they'll write a BUFFER_STATUS_FREE message into the buffer_status[]
170 * array indicating that the PPU can re-use the buffer.
171 */
172
173 batch = cell_get_empty_buffer(cell);
174
175 cell->buffer_size[batch] = 0; /* empty */
176 cell->cur_batch = batch;
177
178 flushing = FALSE;
179 }
180
181
182 /**
183 * Return the number of bytes free in the current batch buffer.
184 */
185 uint
186 cell_batch_free_space(const struct cell_context *cell)
187 {
188 uint free = CELL_BUFFER_SIZE - cell->buffer_size[cell->cur_batch];
189 free -= sizeof(struct cell_command_fence);
190 return free;
191 }
192
193
194 /**
195 * Append data to the current batch buffer.
196 * \param data address of block of bytes to append
197 * \param bytes size of block of bytes
198 */
199 void
200 cell_batch_append(struct cell_context *cell, const void *data, uint bytes)
201 {
202 uint size;
203
204 ASSERT(bytes % 8 == 0);
205 ASSERT(bytes <= CELL_BUFFER_SIZE);
206 ASSERT(cell->cur_batch >= 0);
207
208 #ifdef ASSERT
209 {
210 uint spu;
211 for (spu = 0; spu < cell->num_spus; spu++) {
212 ASSERT(cell->buffer_status[spu][cell->cur_batch][0]
213 == CELL_BUFFER_STATUS_USED);
214 }
215 }
216 #endif
217
218 size = cell->buffer_size[cell->cur_batch];
219
220 if (bytes > cell_batch_free_space(cell)) {
221 cell_batch_flush(cell);
222 size = 0;
223 }
224
225 ASSERT(size + bytes <= CELL_BUFFER_SIZE);
226
227 memcpy(cell->buffer[cell->cur_batch] + size, data, bytes);
228
229 cell->buffer_size[cell->cur_batch] = size + bytes;
230 }
231
232
233 /**
234 * Allocate space in the current batch buffer for 'bytes' space.
235 * \return address in batch buffer to put data
236 */
237 void *
238 cell_batch_alloc(struct cell_context *cell, uint bytes)
239 {
240 return cell_batch_alloc_aligned(cell, bytes, 1);
241 }
242
243
244 /**
245 * Same as \sa cell_batch_alloc, but return an address at a particular
246 * alignment.
247 */
248 void *
249 cell_batch_alloc_aligned(struct cell_context *cell, uint bytes,
250 uint alignment)
251 {
252 void *pos;
253 uint size, padbytes;
254
255 ASSERT(bytes % 8 == 0);
256 ASSERT(bytes <= CELL_BUFFER_SIZE);
257 ASSERT(alignment > 0);
258 ASSERT(cell->cur_batch >= 0);
259
260 #ifdef ASSERT
261 {
262 uint spu;
263 for (spu = 0; spu < cell->num_spus; spu++) {
264 ASSERT(cell->buffer_status[spu][cell->cur_batch][0]
265 == CELL_BUFFER_STATUS_USED);
266 }
267 }
268 #endif
269
270 size = cell->buffer_size[cell->cur_batch];
271
272 padbytes = (alignment - (size % alignment)) % alignment;
273
274 if (padbytes + bytes > cell_batch_free_space(cell)) {
275 cell_batch_flush(cell);
276 size = 0;
277 }
278 else {
279 size += padbytes;
280 }
281
282 ASSERT(size % alignment == 0);
283 ASSERT(size + bytes <= CELL_BUFFER_SIZE);
284
285 pos = (void *) (cell->buffer[cell->cur_batch] + size);
286
287 cell->buffer_size[cell->cur_batch] = size + bytes;
288
289 return pos;
290 }
291
292
293 /**
294 * One-time init of batch buffers.
295 */
296 void
297 cell_init_batch_buffers(struct cell_context *cell)
298 {
299 uint spu, buf;
300
301 /* init command, vertex/index buffer info */
302 for (buf = 0; buf < CELL_NUM_BUFFERS; buf++) {
303 cell->buffer_size[buf] = 0;
304
305 /* init batch buffer status values,
306 * mark 0th buffer as used, rest as free.
307 */
308 for (spu = 0; spu < cell->num_spus; spu++) {
309 if (buf == 0)
310 cell->buffer_status[spu][buf][0] = CELL_BUFFER_STATUS_USED;
311 else
312 cell->buffer_status[spu][buf][0] = CELL_BUFFER_STATUS_FREE;
313 }
314 }
315 }