Merge commit 'origin/gallium-master-merge'
[mesa.git] / src / gallium / drivers / cell / ppu / cell_state_emit.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "pipe/p_inlines.h"
29 #include "util/u_memory.h"
30 #include "cell_context.h"
31 #include "cell_gen_fragment.h"
32 #include "cell_state.h"
33 #include "cell_state_emit.h"
34 #include "cell_batch.h"
35 #include "cell_texture.h"
36 #include "draw/draw_context.h"
37 #include "draw/draw_private.h"
38
39
40 /**
41 * Find/create a cell_command_fragment_ops object corresponding to the
42 * current blend/stencil/z/colormask/etc. state.
43 */
44 static struct cell_command_fragment_ops *
45 lookup_fragment_ops(struct cell_context *cell)
46 {
47 struct cell_fragment_ops_key key;
48 struct cell_command_fragment_ops *ops;
49
50 /*
51 * Build key
52 */
53 memset(&key, 0, sizeof(key));
54 key.blend = *cell->blend;
55 key.blend_color = cell->blend_color;
56 key.dsa = *cell->depth_stencil;
57
58 if (cell->framebuffer.cbufs[0])
59 key.color_format = cell->framebuffer.cbufs[0]->format;
60 else
61 key.color_format = PIPE_FORMAT_NONE;
62
63 if (cell->framebuffer.zsbuf)
64 key.zs_format = cell->framebuffer.zsbuf->format;
65 else
66 key.zs_format = PIPE_FORMAT_NONE;
67
68 /*
69 * Look up key in cache.
70 */
71 ops = (struct cell_command_fragment_ops *)
72 util_keymap_lookup(cell->fragment_ops_cache, &key);
73
74 /*
75 * If not found, create/save new fragment ops command.
76 */
77 if (!ops) {
78 struct spe_function spe_code_front, spe_code_back;
79 unsigned int facing_dependent, total_code_size;
80
81 if (0)
82 debug_printf("**** Create New Fragment Ops\n");
83
84 /* Prepare the buffer that will hold the generated code. The
85 * "0" passed in for the size means that the SPE code will
86 * use a default size.
87 */
88 spe_init_func(&spe_code_front, 0);
89 spe_init_func(&spe_code_back, 0);
90
91 /* Generate new code. Always generate new code for both front-facing
92 * and back-facing fragments, even if it's the same code in both
93 * cases.
94 */
95 cell_gen_fragment_function(cell, CELL_FACING_FRONT, &spe_code_front);
96 cell_gen_fragment_function(cell, CELL_FACING_BACK, &spe_code_back);
97
98 /* Make sure the code is a multiple of 8 bytes long; this is
99 * required to ensure that the dual pipe instruction alignment
100 * is correct. It's also important for the SPU unpacking,
101 * which assumes 8-byte boundaries.
102 */
103 unsigned int front_code_size = spe_code_size(&spe_code_front);
104 while (front_code_size % 8 != 0) {
105 spe_lnop(&spe_code_front);
106 front_code_size = spe_code_size(&spe_code_front);
107 }
108 unsigned int back_code_size = spe_code_size(&spe_code_back);
109 while (back_code_size % 8 != 0) {
110 spe_lnop(&spe_code_back);
111 back_code_size = spe_code_size(&spe_code_back);
112 }
113
114 /* Determine whether the code we generated is facing-dependent, by
115 * determining whether the generated code is different for the front-
116 * and back-facing fragments.
117 */
118 if (front_code_size == back_code_size && memcmp(spe_code_front.store, spe_code_back.store, front_code_size) == 0) {
119 /* Code is identical; only need one copy. */
120 facing_dependent = 0;
121 total_code_size = front_code_size;
122 }
123 else {
124 /* Code is different for front-facing and back-facing fragments.
125 * Need to send both copies.
126 */
127 facing_dependent = 1;
128 total_code_size = front_code_size + back_code_size;
129 }
130
131 /* alloc new fragment ops command. Note that this structure
132 * has variant length based on the total code size required.
133 */
134 ops = CALLOC_VARIANT_LENGTH_STRUCT(cell_command_fragment_ops, total_code_size);
135 /* populate the new cell_command_fragment_ops object */
136 ops->opcode[0] = CELL_CMD_STATE_FRAGMENT_OPS;
137 ops->total_code_size = total_code_size;
138 ops->front_code_index = 0;
139 memcpy(ops->code, spe_code_front.store, front_code_size);
140 if (facing_dependent) {
141 /* We have separate front- and back-facing code. Append the
142 * back-facing code to the buffer. Be careful because the code
143 * size is in bytes, but the buffer is of unsigned elements.
144 */
145 ops->back_code_index = front_code_size / sizeof(spe_code_front.store[0]);
146 memcpy(ops->code + ops->back_code_index, spe_code_back.store, back_code_size);
147 }
148 else {
149 /* Use the same code for front- and back-facing fragments */
150 ops->back_code_index = ops->front_code_index;
151 }
152
153 /* Set the fields for the fallback case. Note that these fields
154 * (and the whole fallback case) will eventually go away.
155 */
156 ops->dsa = *cell->depth_stencil;
157 ops->blend = *cell->blend;
158 ops->blend_color = cell->blend_color;
159
160 /* insert cell_command_fragment_ops object into keymap/cache */
161 util_keymap_insert(cell->fragment_ops_cache, &key, ops, NULL);
162
163 /* release rtasm buffer */
164 spe_release_func(&spe_code_front);
165 spe_release_func(&spe_code_back);
166 }
167 else {
168 if (0)
169 debug_printf("**** Re-use Fragment Ops\n");
170 }
171
172 return ops;
173 }
174
175
176
177 static void
178 emit_state_cmd(struct cell_context *cell, uint cmd,
179 const void *state, uint state_size)
180 {
181 uint32_t *dst = (uint32_t *)
182 cell_batch_alloc16(cell, ROUNDUP16(sizeof(opcode_t) + state_size));
183 *dst = cmd;
184 memcpy(dst + 4, state, state_size);
185 }
186
187
188 /**
189 * For state marked as 'dirty', construct a state-update command block
190 * and insert it into the current batch buffer.
191 */
192 void
193 cell_emit_state(struct cell_context *cell)
194 {
195 if (cell->dirty & CELL_NEW_FRAMEBUFFER) {
196 struct pipe_surface *cbuf = cell->framebuffer.cbufs[0];
197 struct pipe_surface *zbuf = cell->framebuffer.zsbuf;
198 STATIC_ASSERT(sizeof(struct cell_command_framebuffer) % 16 == 0);
199 struct cell_command_framebuffer *fb
200 = cell_batch_alloc16(cell, sizeof(*fb));
201 fb->opcode[0] = CELL_CMD_STATE_FRAMEBUFFER;
202 fb->color_start = cell->cbuf_map[0];
203 fb->color_format = cbuf->format;
204 fb->depth_start = cell->zsbuf_map;
205 fb->depth_format = zbuf ? zbuf->format : PIPE_FORMAT_NONE;
206 fb->width = cell->framebuffer.width;
207 fb->height = cell->framebuffer.height;
208 #if 0
209 printf("EMIT color format %s\n", pf_name(fb->color_format));
210 printf("EMIT depth format %s\n", pf_name(fb->depth_format));
211 #endif
212 }
213
214 if (cell->dirty & (CELL_NEW_RASTERIZER)) {
215 STATIC_ASSERT(sizeof(struct cell_command_rasterizer) % 16 == 0);
216 struct cell_command_rasterizer *rast =
217 cell_batch_alloc16(cell, sizeof(*rast));
218 rast->opcode[0] = CELL_CMD_STATE_RASTERIZER;
219 rast->rasterizer = *cell->rasterizer;
220 }
221
222 if (cell->dirty & (CELL_NEW_FS)) {
223 /* Send new fragment program to SPUs */
224 STATIC_ASSERT(sizeof(struct cell_command_fragment_program) % 16 == 0);
225 struct cell_command_fragment_program *fp
226 = cell_batch_alloc16(cell, sizeof(*fp));
227 fp->opcode[0] = CELL_CMD_STATE_FRAGMENT_PROGRAM;
228 fp->num_inst = cell->fs->code.num_inst;
229 memcpy(&fp->code, cell->fs->code.store,
230 SPU_MAX_FRAGMENT_PROGRAM_INSTS * SPE_INST_SIZE);
231 if (0) {
232 int i;
233 printf("PPU Emit CELL_CMD_STATE_FRAGMENT_PROGRAM:\n");
234 for (i = 0; i < fp->num_inst; i++) {
235 printf(" %3d: 0x%08x\n", i, fp->code[i]);
236 }
237 }
238 }
239
240 if (cell->dirty & (CELL_NEW_FS_CONSTANTS)) {
241 const uint shader = PIPE_SHADER_FRAGMENT;
242 const uint num_const = cell->constants[shader].buffer->size / sizeof(float);
243 uint i, j;
244 float *buf = cell_batch_alloc16(cell, ROUNDUP16(32 + num_const * sizeof(float)));
245 uint32_t *ibuf = (uint32_t *) buf;
246 const float *constants = pipe_buffer_map(cell->pipe.screen,
247 cell->constants[shader].buffer,
248 PIPE_BUFFER_USAGE_CPU_READ);
249 ibuf[0] = CELL_CMD_STATE_FS_CONSTANTS;
250 ibuf[4] = num_const;
251 j = 8;
252 for (i = 0; i < num_const; i++) {
253 buf[j++] = constants[i];
254 }
255 pipe_buffer_unmap(cell->pipe.screen, cell->constants[shader].buffer);
256 }
257
258 if (cell->dirty & (CELL_NEW_FRAMEBUFFER |
259 CELL_NEW_DEPTH_STENCIL |
260 CELL_NEW_BLEND)) {
261 struct cell_command_fragment_ops *fops, *fops_cmd;
262 /* Note that cell_command_fragment_ops is a variant-sized record */
263 fops = lookup_fragment_ops(cell);
264 fops_cmd = cell_batch_alloc16(cell, ROUNDUP16(sizeof(*fops_cmd) + fops->total_code_size));
265 memcpy(fops_cmd, fops, sizeof(*fops) + fops->total_code_size);
266 }
267
268 if (cell->dirty & CELL_NEW_SAMPLER) {
269 uint i;
270 for (i = 0; i < CELL_MAX_SAMPLERS; i++) {
271 if (cell->dirty_samplers & (1 << i)) {
272 if (cell->sampler[i]) {
273 STATIC_ASSERT(sizeof(struct cell_command_sampler) % 16 == 0);
274 struct cell_command_sampler *sampler
275 = cell_batch_alloc16(cell, sizeof(*sampler));
276 sampler->opcode[0] = CELL_CMD_STATE_SAMPLER;
277 sampler->unit = i;
278 sampler->state = *cell->sampler[i];
279 }
280 }
281 }
282 cell->dirty_samplers = 0x0;
283 }
284
285 if (cell->dirty & CELL_NEW_TEXTURE) {
286 uint i;
287 for (i = 0;i < CELL_MAX_SAMPLERS; i++) {
288 if (cell->dirty_textures & (1 << i)) {
289 STATIC_ASSERT(sizeof(struct cell_command_texture) % 16 == 0);
290 struct cell_command_texture *texture
291 = (struct cell_command_texture *)cell_batch_alloc16(cell, sizeof(*texture));
292 texture->opcode[0] = CELL_CMD_STATE_TEXTURE;
293 texture->unit = i;
294 if (cell->texture[i]) {
295 uint level;
296 for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
297 texture->start[level] = cell->texture[i]->tiled_mapped[level];
298 texture->width[level] = cell->texture[i]->base.width[level];
299 texture->height[level] = cell->texture[i]->base.height[level];
300 texture->depth[level] = cell->texture[i]->base.depth[level];
301 }
302 texture->target = cell->texture[i]->base.target;
303 }
304 else {
305 uint level;
306 for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
307 texture->start[level] = NULL;
308 texture->width[level] = 0;
309 texture->height[level] = 0;
310 texture->depth[level] = 0;
311 }
312 texture->target = 0;
313 }
314 }
315 }
316 cell->dirty_textures = 0x0;
317 }
318
319 if (cell->dirty & CELL_NEW_VERTEX_INFO) {
320 emit_state_cmd(cell, CELL_CMD_STATE_VERTEX_INFO,
321 &cell->vertex_info, sizeof(struct vertex_info));
322 }
323
324 #if 0
325 if (cell->dirty & CELL_NEW_VS) {
326 const struct draw_context *const draw = cell->draw;
327 struct cell_shader_info info;
328
329 info.num_outputs = draw_num_vs_outputs(draw);
330 info.declarations = (uintptr_t) draw->vs.machine.Declarations;
331 info.num_declarations = draw->vs.machine.NumDeclarations;
332 info.instructions = (uintptr_t) draw->vs.machine.Instructions;
333 info.num_instructions = draw->vs.machine.NumInstructions;
334 info.immediates = (uintptr_t) draw->vs.machine.Imms;
335 info.num_immediates = draw->vs.machine.ImmLimit / 4;
336
337 emit_state_cmd(cell, CELL_CMD_STATE_BIND_VS, &info, sizeof(info));
338 }
339 #endif
340 }