Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / drivers / cell / ppu / cell_state_emit.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "pipe/p_inlines.h"
29 #include "util/u_memory.h"
30 #include "util/u_math.h"
31 #include "cell_context.h"
32 #include "cell_gen_fragment.h"
33 #include "cell_state.h"
34 #include "cell_state_emit.h"
35 #include "cell_batch.h"
36 #include "cell_texture.h"
37 #include "draw/draw_context.h"
38 #include "draw/draw_private.h"
39
40
41 /**
42 * Find/create a cell_command_fragment_ops object corresponding to the
43 * current blend/stencil/z/colormask/etc. state.
44 */
45 static struct cell_command_fragment_ops *
46 lookup_fragment_ops(struct cell_context *cell)
47 {
48 struct cell_fragment_ops_key key;
49 struct cell_command_fragment_ops *ops;
50
51 /*
52 * Build key
53 */
54 memset(&key, 0, sizeof(key));
55 key.blend = *cell->blend;
56 key.blend_color = cell->blend_color;
57 key.dsa = *cell->depth_stencil;
58
59 if (cell->framebuffer.cbufs[0])
60 key.color_format = cell->framebuffer.cbufs[0]->format;
61 else
62 key.color_format = PIPE_FORMAT_NONE;
63
64 if (cell->framebuffer.zsbuf)
65 key.zs_format = cell->framebuffer.zsbuf->format;
66 else
67 key.zs_format = PIPE_FORMAT_NONE;
68
69 /*
70 * Look up key in cache.
71 */
72 ops = (struct cell_command_fragment_ops *)
73 util_keymap_lookup(cell->fragment_ops_cache, &key);
74
75 /*
76 * If not found, create/save new fragment ops command.
77 */
78 if (!ops) {
79 struct spe_function spe_code_front, spe_code_back;
80 unsigned int facing_dependent, total_code_size;
81
82 if (0)
83 debug_printf("**** Create New Fragment Ops\n");
84
85 /* Prepare the buffer that will hold the generated code. The
86 * "0" passed in for the size means that the SPE code will
87 * use a default size.
88 */
89 spe_init_func(&spe_code_front, 0);
90 spe_init_func(&spe_code_back, 0);
91
92 /* Generate new code. Always generate new code for both front-facing
93 * and back-facing fragments, even if it's the same code in both
94 * cases.
95 */
96 cell_gen_fragment_function(cell, CELL_FACING_FRONT, &spe_code_front);
97 cell_gen_fragment_function(cell, CELL_FACING_BACK, &spe_code_back);
98
99 /* Make sure the code is a multiple of 8 bytes long; this is
100 * required to ensure that the dual pipe instruction alignment
101 * is correct. It's also important for the SPU unpacking,
102 * which assumes 8-byte boundaries.
103 */
104 unsigned int front_code_size = spe_code_size(&spe_code_front);
105 while (front_code_size % 8 != 0) {
106 spe_lnop(&spe_code_front);
107 front_code_size = spe_code_size(&spe_code_front);
108 }
109 unsigned int back_code_size = spe_code_size(&spe_code_back);
110 while (back_code_size % 8 != 0) {
111 spe_lnop(&spe_code_back);
112 back_code_size = spe_code_size(&spe_code_back);
113 }
114
115 /* Determine whether the code we generated is facing-dependent, by
116 * determining whether the generated code is different for the front-
117 * and back-facing fragments.
118 */
119 if (front_code_size == back_code_size && memcmp(spe_code_front.store, spe_code_back.store, front_code_size) == 0) {
120 /* Code is identical; only need one copy. */
121 facing_dependent = 0;
122 total_code_size = front_code_size;
123 }
124 else {
125 /* Code is different for front-facing and back-facing fragments.
126 * Need to send both copies.
127 */
128 facing_dependent = 1;
129 total_code_size = front_code_size + back_code_size;
130 }
131
132 /* alloc new fragment ops command. Note that this structure
133 * has variant length based on the total code size required.
134 */
135 ops = CALLOC_VARIANT_LENGTH_STRUCT(cell_command_fragment_ops, total_code_size);
136 /* populate the new cell_command_fragment_ops object */
137 ops->opcode[0] = CELL_CMD_STATE_FRAGMENT_OPS;
138 ops->total_code_size = total_code_size;
139 ops->front_code_index = 0;
140 memcpy(ops->code, spe_code_front.store, front_code_size);
141 if (facing_dependent) {
142 /* We have separate front- and back-facing code. Append the
143 * back-facing code to the buffer. Be careful because the code
144 * size is in bytes, but the buffer is of unsigned elements.
145 */
146 ops->back_code_index = front_code_size / sizeof(spe_code_front.store[0]);
147 memcpy(ops->code + ops->back_code_index, spe_code_back.store, back_code_size);
148 }
149 else {
150 /* Use the same code for front- and back-facing fragments */
151 ops->back_code_index = ops->front_code_index;
152 }
153
154 /* Set the fields for the fallback case. Note that these fields
155 * (and the whole fallback case) will eventually go away.
156 */
157 ops->dsa = *cell->depth_stencil;
158 ops->blend = *cell->blend;
159 ops->blend_color = cell->blend_color;
160
161 /* insert cell_command_fragment_ops object into keymap/cache */
162 util_keymap_insert(cell->fragment_ops_cache, &key, ops, NULL);
163
164 /* release rtasm buffer */
165 spe_release_func(&spe_code_front);
166 spe_release_func(&spe_code_back);
167 }
168 else {
169 if (0)
170 debug_printf("**** Re-use Fragment Ops\n");
171 }
172
173 return ops;
174 }
175
176
177
178 static void
179 emit_state_cmd(struct cell_context *cell, uint cmd,
180 const void *state, uint state_size)
181 {
182 uint32_t *dst = (uint32_t *)
183 cell_batch_alloc16(cell, ROUNDUP16(sizeof(opcode_t) + state_size));
184 *dst = cmd;
185 memcpy(dst + 4, state, state_size);
186 }
187
188
189 /**
190 * For state marked as 'dirty', construct a state-update command block
191 * and insert it into the current batch buffer.
192 */
193 void
194 cell_emit_state(struct cell_context *cell)
195 {
196 if (cell->dirty & CELL_NEW_FRAMEBUFFER) {
197 struct pipe_surface *cbuf = cell->framebuffer.cbufs[0];
198 struct pipe_surface *zbuf = cell->framebuffer.zsbuf;
199 STATIC_ASSERT(sizeof(struct cell_command_framebuffer) % 16 == 0);
200 struct cell_command_framebuffer *fb
201 = cell_batch_alloc16(cell, sizeof(*fb));
202 fb->opcode[0] = CELL_CMD_STATE_FRAMEBUFFER;
203 fb->color_start = cell->cbuf_map[0];
204 fb->color_format = cbuf->format;
205 fb->depth_start = cell->zsbuf_map;
206 fb->depth_format = zbuf ? zbuf->format : PIPE_FORMAT_NONE;
207 fb->width = cell->framebuffer.width;
208 fb->height = cell->framebuffer.height;
209 #if 0
210 printf("EMIT color format %s\n", pf_name(fb->color_format));
211 printf("EMIT depth format %s\n", pf_name(fb->depth_format));
212 #endif
213 }
214
215 if (cell->dirty & (CELL_NEW_RASTERIZER)) {
216 STATIC_ASSERT(sizeof(struct cell_command_rasterizer) % 16 == 0);
217 struct cell_command_rasterizer *rast =
218 cell_batch_alloc16(cell, sizeof(*rast));
219 rast->opcode[0] = CELL_CMD_STATE_RASTERIZER;
220 rast->rasterizer = *cell->rasterizer;
221 }
222
223 if (cell->dirty & (CELL_NEW_FS)) {
224 /* Send new fragment program to SPUs */
225 STATIC_ASSERT(sizeof(struct cell_command_fragment_program) % 16 == 0);
226 struct cell_command_fragment_program *fp
227 = cell_batch_alloc16(cell, sizeof(*fp));
228 fp->opcode[0] = CELL_CMD_STATE_FRAGMENT_PROGRAM;
229 fp->num_inst = cell->fs->code.num_inst;
230 memcpy(&fp->code, cell->fs->code.store,
231 SPU_MAX_FRAGMENT_PROGRAM_INSTS * SPE_INST_SIZE);
232 if (0) {
233 int i;
234 printf("PPU Emit CELL_CMD_STATE_FRAGMENT_PROGRAM:\n");
235 for (i = 0; i < fp->num_inst; i++) {
236 printf(" %3d: 0x%08x\n", i, fp->code[i]);
237 }
238 }
239 }
240
241 if (cell->dirty & (CELL_NEW_FS_CONSTANTS)) {
242 const uint shader = PIPE_SHADER_FRAGMENT;
243 const uint num_const = cell->constants[shader].buffer->size / sizeof(float);
244 uint i, j;
245 float *buf = cell_batch_alloc16(cell, ROUNDUP16(32 + num_const * sizeof(float)));
246 uint32_t *ibuf = (uint32_t *) buf;
247 const float *constants = pipe_buffer_map(cell->pipe.screen,
248 cell->constants[shader].buffer,
249 PIPE_BUFFER_USAGE_CPU_READ);
250 ibuf[0] = CELL_CMD_STATE_FS_CONSTANTS;
251 ibuf[4] = num_const;
252 j = 8;
253 for (i = 0; i < num_const; i++) {
254 buf[j++] = constants[i];
255 }
256 pipe_buffer_unmap(cell->pipe.screen, cell->constants[shader].buffer);
257 }
258
259 if (cell->dirty & (CELL_NEW_FRAMEBUFFER |
260 CELL_NEW_DEPTH_STENCIL |
261 CELL_NEW_BLEND)) {
262 struct cell_command_fragment_ops *fops, *fops_cmd;
263 /* Note that cell_command_fragment_ops is a variant-sized record */
264 fops = lookup_fragment_ops(cell);
265 fops_cmd = cell_batch_alloc16(cell, ROUNDUP16(sizeof(*fops_cmd) + fops->total_code_size));
266 memcpy(fops_cmd, fops, sizeof(*fops) + fops->total_code_size);
267 }
268
269 if (cell->dirty & CELL_NEW_SAMPLER) {
270 uint i;
271 for (i = 0; i < CELL_MAX_SAMPLERS; i++) {
272 if (cell->dirty_samplers & (1 << i)) {
273 if (cell->sampler[i]) {
274 STATIC_ASSERT(sizeof(struct cell_command_sampler) % 16 == 0);
275 struct cell_command_sampler *sampler
276 = cell_batch_alloc16(cell, sizeof(*sampler));
277 sampler->opcode[0] = CELL_CMD_STATE_SAMPLER;
278 sampler->unit = i;
279 sampler->state = *cell->sampler[i];
280 }
281 }
282 }
283 cell->dirty_samplers = 0x0;
284 }
285
286 if (cell->dirty & CELL_NEW_TEXTURE) {
287 uint i;
288 for (i = 0;i < CELL_MAX_SAMPLERS; i++) {
289 if (cell->dirty_textures & (1 << i)) {
290 STATIC_ASSERT(sizeof(struct cell_command_texture) % 16 == 0);
291 struct cell_command_texture *texture =
292 (struct cell_command_texture *)
293 cell_batch_alloc16(cell, sizeof(*texture));
294
295 texture->opcode[0] = CELL_CMD_STATE_TEXTURE;
296 texture->unit = i;
297 if (cell->texture[i]) {
298 struct cell_texture *ct = cell->texture[i];
299 uint level;
300 for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
301 texture->start[level] = (ct->mapped +
302 ct->level_offset[level]);
303 texture->width[level] = u_minify(ct->base.width0, level);
304 texture->height[level] = u_minify(ct->base.height0, level);
305 texture->depth[level] = u_minify(ct->base.depth0, level);
306 }
307 texture->target = ct->base.target;
308 }
309 else {
310 uint level;
311 for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
312 texture->start[level] = NULL;
313 texture->width[level] = 0;
314 texture->height[level] = 0;
315 texture->depth[level] = 0;
316 }
317 texture->target = 0;
318 }
319 }
320 }
321 cell->dirty_textures = 0x0;
322 }
323
324 if (cell->dirty & CELL_NEW_VERTEX_INFO) {
325 emit_state_cmd(cell, CELL_CMD_STATE_VERTEX_INFO,
326 &cell->vertex_info, sizeof(struct vertex_info));
327 }
328
329 #if 0
330 if (cell->dirty & CELL_NEW_VS) {
331 const struct draw_context *const draw = cell->draw;
332 struct cell_shader_info info;
333
334 info.num_outputs = draw_num_vs_outputs(draw);
335 info.declarations = (uintptr_t) draw->vs.machine.Declarations;
336 info.num_declarations = draw->vs.machine.NumDeclarations;
337 info.instructions = (uintptr_t) draw->vs.machine.Instructions;
338 info.num_instructions = draw->vs.machine.NumInstructions;
339 info.immediates = (uintptr_t) draw->vs.machine.Imms;
340 info.num_immediates = draw->vs.machine.ImmLimit / 4;
341
342 emit_state_cmd(cell, CELL_CMD_STATE_BIND_VS, &info, sizeof(info));
343 }
344 #endif
345 }