Merge commit 'origin/7.8'
[mesa.git] / src / gallium / drivers / cell / ppu / cell_state_emit.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "util/u_inlines.h"
29 #include "util/u_memory.h"
30 #include "util/u_math.h"
31 #include "util/u_format.h"
32 #include "cell_context.h"
33 #include "cell_gen_fragment.h"
34 #include "cell_state.h"
35 #include "cell_state_emit.h"
36 #include "cell_batch.h"
37 #include "cell_texture.h"
38 #include "draw/draw_context.h"
39 #include "draw/draw_private.h"
40
41
42 /**
43 * Find/create a cell_command_fragment_ops object corresponding to the
44 * current blend/stencil/z/colormask/etc. state.
45 */
46 static struct cell_command_fragment_ops *
47 lookup_fragment_ops(struct cell_context *cell)
48 {
49 struct cell_fragment_ops_key key;
50 struct cell_command_fragment_ops *ops;
51
52 /*
53 * Build key
54 */
55 memset(&key, 0, sizeof(key));
56 key.blend = *cell->blend;
57 key.blend_color = cell->blend_color;
58 key.dsa = *cell->depth_stencil;
59
60 if (cell->framebuffer.cbufs[0])
61 key.color_format = cell->framebuffer.cbufs[0]->format;
62 else
63 key.color_format = PIPE_FORMAT_NONE;
64
65 if (cell->framebuffer.zsbuf)
66 key.zs_format = cell->framebuffer.zsbuf->format;
67 else
68 key.zs_format = PIPE_FORMAT_NONE;
69
70 /*
71 * Look up key in cache.
72 */
73 ops = (struct cell_command_fragment_ops *)
74 util_keymap_lookup(cell->fragment_ops_cache, &key);
75
76 /*
77 * If not found, create/save new fragment ops command.
78 */
79 if (!ops) {
80 struct spe_function spe_code_front, spe_code_back;
81 unsigned int facing_dependent, total_code_size;
82
83 if (0)
84 debug_printf("**** Create New Fragment Ops\n");
85
86 /* Prepare the buffer that will hold the generated code. The
87 * "0" passed in for the size means that the SPE code will
88 * use a default size.
89 */
90 spe_init_func(&spe_code_front, 0);
91 spe_init_func(&spe_code_back, 0);
92
93 /* Generate new code. Always generate new code for both front-facing
94 * and back-facing fragments, even if it's the same code in both
95 * cases.
96 */
97 cell_gen_fragment_function(cell, CELL_FACING_FRONT, &spe_code_front);
98 cell_gen_fragment_function(cell, CELL_FACING_BACK, &spe_code_back);
99
100 /* Make sure the code is a multiple of 8 bytes long; this is
101 * required to ensure that the dual pipe instruction alignment
102 * is correct. It's also important for the SPU unpacking,
103 * which assumes 8-byte boundaries.
104 */
105 unsigned int front_code_size = spe_code_size(&spe_code_front);
106 while (front_code_size % 8 != 0) {
107 spe_lnop(&spe_code_front);
108 front_code_size = spe_code_size(&spe_code_front);
109 }
110 unsigned int back_code_size = spe_code_size(&spe_code_back);
111 while (back_code_size % 8 != 0) {
112 spe_lnop(&spe_code_back);
113 back_code_size = spe_code_size(&spe_code_back);
114 }
115
116 /* Determine whether the code we generated is facing-dependent, by
117 * determining whether the generated code is different for the front-
118 * and back-facing fragments.
119 */
120 if (front_code_size == back_code_size && memcmp(spe_code_front.store, spe_code_back.store, front_code_size) == 0) {
121 /* Code is identical; only need one copy. */
122 facing_dependent = 0;
123 total_code_size = front_code_size;
124 }
125 else {
126 /* Code is different for front-facing and back-facing fragments.
127 * Need to send both copies.
128 */
129 facing_dependent = 1;
130 total_code_size = front_code_size + back_code_size;
131 }
132
133 /* alloc new fragment ops command. Note that this structure
134 * has variant length based on the total code size required.
135 */
136 ops = CALLOC_VARIANT_LENGTH_STRUCT(cell_command_fragment_ops, total_code_size);
137 /* populate the new cell_command_fragment_ops object */
138 ops->opcode[0] = CELL_CMD_STATE_FRAGMENT_OPS;
139 ops->total_code_size = total_code_size;
140 ops->front_code_index = 0;
141 memcpy(ops->code, spe_code_front.store, front_code_size);
142 if (facing_dependent) {
143 /* We have separate front- and back-facing code. Append the
144 * back-facing code to the buffer. Be careful because the code
145 * size is in bytes, but the buffer is of unsigned elements.
146 */
147 ops->back_code_index = front_code_size / sizeof(spe_code_front.store[0]);
148 memcpy(ops->code + ops->back_code_index, spe_code_back.store, back_code_size);
149 }
150 else {
151 /* Use the same code for front- and back-facing fragments */
152 ops->back_code_index = ops->front_code_index;
153 }
154
155 /* Set the fields for the fallback case. Note that these fields
156 * (and the whole fallback case) will eventually go away.
157 */
158 ops->dsa = *cell->depth_stencil;
159 ops->blend = *cell->blend;
160 ops->blend_color = cell->blend_color;
161
162 /* insert cell_command_fragment_ops object into keymap/cache */
163 util_keymap_insert(cell->fragment_ops_cache, &key, ops, NULL);
164
165 /* release rtasm buffer */
166 spe_release_func(&spe_code_front);
167 spe_release_func(&spe_code_back);
168 }
169 else {
170 if (0)
171 debug_printf("**** Re-use Fragment Ops\n");
172 }
173
174 return ops;
175 }
176
177
178
179 static void
180 emit_state_cmd(struct cell_context *cell, uint cmd,
181 const void *state, uint state_size)
182 {
183 uint32_t *dst = (uint32_t *)
184 cell_batch_alloc16(cell, ROUNDUP16(sizeof(opcode_t) + state_size));
185 *dst = cmd;
186 memcpy(dst + 4, state, state_size);
187 }
188
189
190 /**
191 * For state marked as 'dirty', construct a state-update command block
192 * and insert it into the current batch buffer.
193 */
194 void
195 cell_emit_state(struct cell_context *cell)
196 {
197 if (cell->dirty & CELL_NEW_FRAMEBUFFER) {
198 struct pipe_surface *cbuf = cell->framebuffer.cbufs[0];
199 struct pipe_surface *zbuf = cell->framebuffer.zsbuf;
200 STATIC_ASSERT(sizeof(struct cell_command_framebuffer) % 16 == 0);
201 struct cell_command_framebuffer *fb
202 = cell_batch_alloc16(cell, sizeof(*fb));
203 fb->opcode[0] = CELL_CMD_STATE_FRAMEBUFFER;
204 fb->color_start = cell->cbuf_map[0];
205 fb->color_format = cbuf->format;
206 fb->depth_start = cell->zsbuf_map;
207 fb->depth_format = zbuf ? zbuf->format : PIPE_FORMAT_NONE;
208 fb->width = cell->framebuffer.width;
209 fb->height = cell->framebuffer.height;
210 #if 0
211 printf("EMIT color format %s\n", util_format_name(fb->color_format));
212 printf("EMIT depth format %s\n", util_format_name(fb->depth_format));
213 #endif
214 }
215
216 if (cell->dirty & (CELL_NEW_RASTERIZER)) {
217 STATIC_ASSERT(sizeof(struct cell_command_rasterizer) % 16 == 0);
218 struct cell_command_rasterizer *rast =
219 cell_batch_alloc16(cell, sizeof(*rast));
220 rast->opcode[0] = CELL_CMD_STATE_RASTERIZER;
221 rast->rasterizer = *cell->rasterizer;
222 }
223
224 if (cell->dirty & (CELL_NEW_FS)) {
225 /* Send new fragment program to SPUs */
226 STATIC_ASSERT(sizeof(struct cell_command_fragment_program) % 16 == 0);
227 struct cell_command_fragment_program *fp
228 = cell_batch_alloc16(cell, sizeof(*fp));
229 fp->opcode[0] = CELL_CMD_STATE_FRAGMENT_PROGRAM;
230 fp->num_inst = cell->fs->code.num_inst;
231 memcpy(&fp->code, cell->fs->code.store,
232 SPU_MAX_FRAGMENT_PROGRAM_INSTS * SPE_INST_SIZE);
233 if (0) {
234 int i;
235 printf("PPU Emit CELL_CMD_STATE_FRAGMENT_PROGRAM:\n");
236 for (i = 0; i < fp->num_inst; i++) {
237 printf(" %3d: 0x%08x\n", i, fp->code[i]);
238 }
239 }
240 }
241
242 if (cell->dirty & (CELL_NEW_FS_CONSTANTS)) {
243 const uint shader = PIPE_SHADER_FRAGMENT;
244 const uint num_const = cell->constants[shader]->width0 / sizeof(float);
245 uint i, j;
246 float *buf = cell_batch_alloc16(cell, ROUNDUP16(32 + num_const * sizeof(float)));
247 uint32_t *ibuf = (uint32_t *) buf;
248 const float *constants = cell->mapped_constants[shader];
249 ibuf[0] = CELL_CMD_STATE_FS_CONSTANTS;
250 ibuf[4] = num_const;
251 j = 8;
252 for (i = 0; i < num_const; i++) {
253 buf[j++] = constants[i];
254 }
255 }
256
257 if (cell->dirty & (CELL_NEW_FRAMEBUFFER |
258 CELL_NEW_DEPTH_STENCIL |
259 CELL_NEW_BLEND)) {
260 struct cell_command_fragment_ops *fops, *fops_cmd;
261 /* Note that cell_command_fragment_ops is a variant-sized record */
262 fops = lookup_fragment_ops(cell);
263 fops_cmd = cell_batch_alloc16(cell, ROUNDUP16(sizeof(*fops_cmd) + fops->total_code_size));
264 memcpy(fops_cmd, fops, sizeof(*fops) + fops->total_code_size);
265 }
266
267 if (cell->dirty & CELL_NEW_SAMPLER) {
268 uint i;
269 for (i = 0; i < CELL_MAX_SAMPLERS; i++) {
270 if (cell->dirty_samplers & (1 << i)) {
271 if (cell->sampler[i]) {
272 STATIC_ASSERT(sizeof(struct cell_command_sampler) % 16 == 0);
273 struct cell_command_sampler *sampler
274 = cell_batch_alloc16(cell, sizeof(*sampler));
275 sampler->opcode[0] = CELL_CMD_STATE_SAMPLER;
276 sampler->unit = i;
277 sampler->state = *cell->sampler[i];
278 }
279 }
280 }
281 cell->dirty_samplers = 0x0;
282 }
283
284 if (cell->dirty & CELL_NEW_TEXTURE) {
285 uint i;
286 for (i = 0;i < CELL_MAX_SAMPLERS; i++) {
287 if (cell->dirty_textures & (1 << i)) {
288 STATIC_ASSERT(sizeof(struct cell_command_texture) % 16 == 0);
289 struct cell_command_texture *texture =
290 (struct cell_command_texture *)
291 cell_batch_alloc16(cell, sizeof(*texture));
292
293 texture->opcode[0] = CELL_CMD_STATE_TEXTURE;
294 texture->unit = i;
295 if (cell->texture[i]) {
296 struct cell_resource *ct = cell->texture[i];
297 uint level;
298 for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
299 texture->start[level] = (ct->mapped +
300 ct->level_offset[level]);
301 texture->width[level] = u_minify(ct->base.width0, level);
302 texture->height[level] = u_minify(ct->base.height0, level);
303 texture->depth[level] = u_minify(ct->base.depth0, level);
304 }
305 texture->target = ct->base.target;
306 }
307 else {
308 uint level;
309 for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
310 texture->start[level] = NULL;
311 texture->width[level] = 0;
312 texture->height[level] = 0;
313 texture->depth[level] = 0;
314 }
315 texture->target = 0;
316 }
317 }
318 }
319 cell->dirty_textures = 0x0;
320 }
321
322 if (cell->dirty & CELL_NEW_VERTEX_INFO) {
323 emit_state_cmd(cell, CELL_CMD_STATE_VERTEX_INFO,
324 &cell->vertex_info, sizeof(struct vertex_info));
325 }
326
327 #if 0
328 if (cell->dirty & CELL_NEW_VS) {
329 const struct draw_context *const draw = cell->draw;
330 struct cell_shader_info info;
331
332 info.num_outputs = draw_num_shader_outputs(draw);
333 info.declarations = (uintptr_t) draw->vs.machine.Declarations;
334 info.num_declarations = draw->vs.machine.NumDeclarations;
335 info.instructions = (uintptr_t) draw->vs.machine.Instructions;
336 info.num_instructions = draw->vs.machine.NumInstructions;
337 info.immediates = (uintptr_t) draw->vs.machine.Imms;
338 info.num_immediates = draw->vs.machine.ImmLimit / 4;
339
340 emit_state_cmd(cell, CELL_CMD_STATE_BIND_VS, &info, sizeof(info));
341 }
342 #endif
343 }