1 /**************************************************************************
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "cell_context.h"
30 #include "cell_batch.h"
31 #include "cell_fence.h"
37 * Search the buffer pool for an empty/free buffer and return its index.
38 * Buffers are used for storing vertex data, state and commands which
39 * will be sent to the SPUs.
40 * If no empty buffers are available, wait for one.
41 * \return buffer index in [0, CELL_NUM_BUFFERS-1]
44 cell_get_empty_buffer(struct cell_context
*cell
)
46 static uint prev_buffer
= 0;
47 uint buf
= (prev_buffer
+ 1) % CELL_NUM_BUFFERS
;
50 /* Find a buffer that's marked as free by all SPUs */
52 uint spu
, num_free
= 0;
54 for (spu
= 0; spu
< cell
->num_spus
; spu
++) {
55 if (cell
->buffer_status
[spu
][buf
][0] == CELL_BUFFER_STATUS_FREE
) {
58 if (num_free
== cell
->num_spus
) {
59 /* found a free buffer, now mark status as used */
60 for (spu
= 0; spu
< cell
->num_spus
; spu
++) {
61 cell
->buffer_status
[spu
][buf
][0] = CELL_BUFFER_STATUS_USED
;
64 printf("PPU: ALLOC BUFFER %u, %u tries\n", buf, tries);
68 /* release tex buffer associated w/ prev use of this batch buf */
69 cell_free_fenced_buffers(cell
, &cell
->fenced_buffers
[buf
]);
80 buf
= (buf
+ 1) % CELL_NUM_BUFFERS
;
85 printf("PPU WAITING for buffer...\n");
93 * Append a fence command to the current batch buffer.
94 * Note that we're sure there's always room for this because of the
95 * adjusted size check in cell_batch_free_space().
98 emit_fence(struct cell_context
*cell
)
100 const uint batch
= cell
->cur_batch
;
101 const uint size
= cell
->buffer_size
[batch
];
102 struct cell_command_fence
*fence_cmd
;
103 struct cell_fence
*fence
= &cell
->fenced_buffers
[batch
].fence
;
106 /* set fence status to emitted, not yet signalled */
107 for (i
= 0; i
< cell
->num_spus
; i
++) {
108 fence
->status
[i
][0] = CELL_FENCE_EMITTED
;
111 ASSERT(size
+ sizeof(struct cell_command_fence
) <= CELL_BUFFER_SIZE
);
113 fence_cmd
= (struct cell_command_fence
*) (cell
->buffer
[batch
] + size
);
114 fence_cmd
->opcode
= CELL_CMD_FENCE
;
115 fence_cmd
->fence
= fence
;
117 /* update batch buffer size */
118 cell
->buffer_size
[batch
] = size
+ sizeof(struct cell_command_fence
);
119 assert(sizeof(struct cell_command_fence
) % 8 == 0);
124 * Flush the current batch buffer to the SPUs.
125 * An empty buffer will be found and set as the new current batch buffer
126 * for subsequent commands/data.
129 cell_batch_flush(struct cell_context
*cell
)
131 static boolean flushing
= FALSE
;
132 uint batch
= cell
->cur_batch
;
133 uint size
= cell
->buffer_size
[batch
];
141 /* Before we use this batch buffer, make sure any fenced texture buffers
144 if (cell
->fenced_buffers
[batch
].head
) {
146 size
= cell
->buffer_size
[batch
];
151 assert(batch
< CELL_NUM_BUFFERS
);
154 printf("cell_batch_dispatch: buf %u at %p, size %u\n",
155 batch, &cell->buffer[batch][0], size);
159 * Build "BATCH" command and send to all SPUs.
161 cmd_word
= CELL_CMD_BATCH
| (batch
<< 8) | (size
<< 16);
163 for (spu
= 0; spu
< cell
->num_spus
; spu
++) {
164 assert(cell
->buffer_status
[spu
][batch
][0] == CELL_BUFFER_STATUS_USED
);
165 send_mbox_message(cell_global
.spe_contexts
[spu
], cmd_word
);
168 /* When the SPUs are done copying the buffer into their locals stores
169 * they'll write a BUFFER_STATUS_FREE message into the buffer_status[]
170 * array indicating that the PPU can re-use the buffer.
173 batch
= cell_get_empty_buffer(cell
);
175 cell
->buffer_size
[batch
] = 0; /* empty */
176 cell
->cur_batch
= batch
;
183 * Return the number of bytes free in the current batch buffer.
186 cell_batch_free_space(const struct cell_context
*cell
)
188 uint free
= CELL_BUFFER_SIZE
- cell
->buffer_size
[cell
->cur_batch
];
189 free
-= sizeof(struct cell_command_fence
);
195 * Append data to the current batch buffer.
196 * \param data address of block of bytes to append
197 * \param bytes size of block of bytes
200 cell_batch_append(struct cell_context
*cell
, const void *data
, uint bytes
)
204 ASSERT(bytes
% 8 == 0);
205 ASSERT(bytes
<= CELL_BUFFER_SIZE
);
206 ASSERT(cell
->cur_batch
>= 0);
211 for (spu
= 0; spu
< cell
->num_spus
; spu
++) {
212 ASSERT(cell
->buffer_status
[spu
][cell
->cur_batch
][0]
213 == CELL_BUFFER_STATUS_USED
);
218 size
= cell
->buffer_size
[cell
->cur_batch
];
220 if (bytes
> cell_batch_free_space(cell
)) {
221 cell_batch_flush(cell
);
225 ASSERT(size
+ bytes
<= CELL_BUFFER_SIZE
);
227 memcpy(cell
->buffer
[cell
->cur_batch
] + size
, data
, bytes
);
229 cell
->buffer_size
[cell
->cur_batch
] = size
+ bytes
;
234 * Allocate space in the current batch buffer for 'bytes' space.
235 * \return address in batch buffer to put data
238 cell_batch_alloc(struct cell_context
*cell
, uint bytes
)
240 return cell_batch_alloc_aligned(cell
, bytes
, 1);
245 * Same as \sa cell_batch_alloc, but return an address at a particular
249 cell_batch_alloc_aligned(struct cell_context
*cell
, uint bytes
,
255 ASSERT(bytes
% 8 == 0);
256 ASSERT(bytes
<= CELL_BUFFER_SIZE
);
257 ASSERT(alignment
> 0);
258 ASSERT(cell
->cur_batch
>= 0);
263 for (spu
= 0; spu
< cell
->num_spus
; spu
++) {
264 ASSERT(cell
->buffer_status
[spu
][cell
->cur_batch
][0]
265 == CELL_BUFFER_STATUS_USED
);
270 size
= cell
->buffer_size
[cell
->cur_batch
];
272 padbytes
= (alignment
- (size
% alignment
)) % alignment
;
274 if (padbytes
+ bytes
> cell_batch_free_space(cell
)) {
275 cell_batch_flush(cell
);
282 ASSERT(size
% alignment
== 0);
283 ASSERT(size
+ bytes
<= CELL_BUFFER_SIZE
);
285 pos
= (void *) (cell
->buffer
[cell
->cur_batch
] + size
);
287 cell
->buffer_size
[cell
->cur_batch
] = size
+ bytes
;
294 * One-time init of batch buffers.
297 cell_init_batch_buffers(struct cell_context
*cell
)
301 /* init command, vertex/index buffer info */
302 for (buf
= 0; buf
< CELL_NUM_BUFFERS
; buf
++) {
303 cell
->buffer_size
[buf
] = 0;
305 /* init batch buffer status values,
306 * mark 0th buffer as used, rest as free.
308 for (spu
= 0; spu
< cell
->num_spus
; spu
++) {
310 cell
->buffer_status
[spu
][buf
][0] = CELL_BUFFER_STATUS_USED
;
312 cell
->buffer_status
[spu
][buf
][0] = CELL_BUFFER_STATUS_FREE
;