03e432970232ba555c94c66ecf7acbbb3f1405d6
[mesa.git] / src / mesa / drivers / dri / i965 / brw_shader.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 extern "C" {
25 #include "main/macros.h"
26 #include "brw_context.h"
27 #include "brw_vs.h"
28 }
29 #include "brw_fs.h"
30 #include "glsl/ir_optimization.h"
31 #include "glsl/ir_print_visitor.h"
32
33 struct gl_shader *
34 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
35 {
36 struct brw_shader *shader;
37
38 shader = rzalloc(NULL, struct brw_shader);
39 if (shader) {
40 shader->base.Type = type;
41 shader->base.Name = name;
42 _mesa_init_shader(ctx, &shader->base);
43 }
44
45 return &shader->base;
46 }
47
48 struct gl_shader_program *
49 brw_new_shader_program(struct gl_context *ctx, GLuint name)
50 {
51 struct gl_shader_program *prog = rzalloc(NULL, struct gl_shader_program);
52 if (prog) {
53 prog->Name = name;
54 _mesa_init_shader_program(ctx, prog);
55 }
56 return prog;
57 }
58
59 /**
60 * Performs a compile of the shader stages even when we don't know
61 * what non-orthogonal state will be set, in the hope that it reflects
62 * the eventual NOS used, and thus allows us to produce link failures.
63 */
64 static bool
65 brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
66 {
67 struct brw_context *brw = brw_context(ctx);
68
69 if (brw->precompile && !brw_fs_precompile(ctx, prog))
70 return false;
71
72 if (brw->precompile && !brw_vs_precompile(ctx, prog))
73 return false;
74
75 return true;
76 }
77
78 static void
79 brw_lower_packing_builtins(struct brw_context *brw,
80 gl_shader_type shader_type,
81 exec_list *ir)
82 {
83 int ops = LOWER_PACK_SNORM_2x16
84 | LOWER_UNPACK_SNORM_2x16
85 | LOWER_PACK_UNORM_2x16
86 | LOWER_UNPACK_UNORM_2x16
87 | LOWER_PACK_SNORM_4x8
88 | LOWER_UNPACK_SNORM_4x8
89 | LOWER_PACK_UNORM_4x8
90 | LOWER_UNPACK_UNORM_4x8;
91
92 if (brw->intel.gen >= 7) {
93 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
94 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
95 * lowering is needed. For SOA code, the Half2x16 ops must be
96 * scalarized.
97 */
98 if (shader_type == MESA_SHADER_FRAGMENT) {
99 ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
100 | LOWER_UNPACK_HALF_2x16_TO_SPLIT;
101 }
102 } else {
103 ops |= LOWER_PACK_HALF_2x16
104 | LOWER_UNPACK_HALF_2x16;
105 }
106
107 lower_packing_builtins(ir, ops);
108 }
109
110 GLboolean
111 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
112 {
113 struct brw_context *brw = brw_context(ctx);
114 struct intel_context *intel = &brw->intel;
115 unsigned int stage;
116 static const char *target_strings[]
117 = { "vertex", "fragment", "geometry" };
118
119 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
120 struct brw_shader *shader =
121 (struct brw_shader *)shProg->_LinkedShaders[stage];
122 static const GLenum targets[] = {
123 GL_VERTEX_PROGRAM_ARB,
124 GL_FRAGMENT_PROGRAM_ARB,
125 GL_GEOMETRY_PROGRAM_NV
126 };
127
128 if (!shader)
129 continue;
130
131 struct gl_program *prog =
132 ctx->Driver.NewProgram(ctx, targets[stage], shader->base.Name);
133 if (!prog)
134 return false;
135 prog->Parameters = _mesa_new_parameter_list();
136
137 if (stage == 0) {
138 struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
139 vp->UsesClipDistance = shProg->Vert.UsesClipDistance;
140 }
141
142 void *mem_ctx = ralloc_context(NULL);
143 bool progress;
144
145 if (shader->ir)
146 ralloc_free(shader->ir);
147 shader->ir = new(shader) exec_list;
148 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
149
150 /* lower_packing_builtins() inserts arithmetic instructions, so it
151 * must precede lower_instructions().
152 */
153 brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir);
154 do_mat_op_to_vec(shader->ir);
155 const int bitfield_insert = intel->gen >= 7
156 ? BITFIELD_INSERT_TO_BFM_BFI
157 : 0;
158 const int lrp_to_arith = intel->gen < 6 ? LRP_TO_ARITH : 0;
159 lower_instructions(shader->ir,
160 MOD_TO_FRACT |
161 DIV_TO_MUL_RCP |
162 SUB_TO_ADD_NEG |
163 EXP_TO_EXP2 |
164 LOG_TO_LOG2 |
165 bitfield_insert |
166 lrp_to_arith);
167
168 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
169 * if-statements need to be flattened.
170 */
171 if (intel->gen < 6)
172 lower_if_to_cond_assign(shader->ir, 16);
173
174 do_lower_texture_projection(shader->ir);
175 brw_lower_texture_gradients(intel, shader->ir);
176 do_vec_index_to_cond_assign(shader->ir);
177 lower_vector_insert(shader->ir, true);
178 brw_do_cubemap_normalize(shader->ir);
179 lower_noise(shader->ir);
180 lower_quadop_vector(shader->ir, false);
181
182 bool input = true;
183 bool output = stage == MESA_SHADER_FRAGMENT;
184 bool temp = stage == MESA_SHADER_FRAGMENT;
185 bool uniform = false;
186
187 bool lowered_variable_indexing =
188 lower_variable_index_to_cond_assign(shader->ir,
189 input, output, temp, uniform);
190
191 if (unlikely((intel->perf_debug) && lowered_variable_indexing)) {
192 perf_debug("Unsupported form of variable indexing in FS; falling "
193 "back to very inefficient code generation\n");
194 }
195
196 /* FINISHME: Do this before the variable index lowering. */
197 lower_ubo_reference(&shader->base, shader->ir);
198
199 do {
200 progress = false;
201
202 if (stage == MESA_SHADER_FRAGMENT) {
203 brw_do_channel_expressions(shader->ir);
204 brw_do_vector_splitting(shader->ir);
205 }
206
207 progress = do_lower_jumps(shader->ir, true, true,
208 true, /* main return */
209 false, /* continue */
210 false /* loops */
211 ) || progress;
212
213 progress = do_common_optimization(shader->ir, true, true, 32,
214 &ctx->ShaderCompilerOptions[stage])
215 || progress;
216 } while (progress);
217
218 /* Make a pass over the IR to add state references for any built-in
219 * uniforms that are used. This has to be done now (during linking).
220 * Code generation doesn't happen until the first time this shader is
221 * used for rendering. Waiting until then to generate the parameters is
222 * too late. At that point, the values for the built-in uniforms won't
223 * get sent to the shader.
224 */
225 foreach_list(node, shader->ir) {
226 ir_variable *var = ((ir_instruction *) node)->as_variable();
227
228 if ((var == NULL) || (var->mode != ir_var_uniform)
229 || (strncmp(var->name, "gl_", 3) != 0))
230 continue;
231
232 const ir_state_slot *const slots = var->state_slots;
233 assert(var->state_slots != NULL);
234
235 for (unsigned int i = 0; i < var->num_state_slots; i++) {
236 _mesa_add_state_reference(prog->Parameters,
237 (gl_state_index *) slots[i].tokens);
238 }
239 }
240
241 validate_ir_tree(shader->ir);
242
243 reparent_ir(shader->ir, shader->ir);
244 ralloc_free(mem_ctx);
245
246 do_set_program_inouts(shader->ir, prog,
247 shader->base.Type == GL_FRAGMENT_SHADER);
248
249 prog->SamplersUsed = shader->base.active_samplers;
250 _mesa_update_shader_textures_used(shProg, prog);
251
252 _mesa_reference_program(ctx, &shader->base.Program, prog);
253
254 brw_add_texrect_params(prog);
255
256 /* This has to be done last. Any operation that can cause
257 * prog->ParameterValues to get reallocated (e.g., anything that adds a
258 * program constant) has to happen before creating this linkage.
259 */
260 _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
261
262 _mesa_reference_program(ctx, &prog, NULL);
263
264 if (ctx->Shader.Flags & GLSL_DUMP) {
265 printf("\n");
266 printf("GLSL IR for linked %s program %d:\n", target_strings[stage],
267 shProg->Name);
268 _mesa_print_ir(shader->base.ir, NULL);
269 printf("\n");
270 }
271 }
272
273 if (ctx->Shader.Flags & GLSL_DUMP) {
274 for (unsigned i = 0; i < shProg->NumShaders; i++) {
275 const struct gl_shader *sh = shProg->Shaders[i];
276 if (!sh)
277 continue;
278
279 printf("GLSL %s shader %d source for linked program %d:\n",
280 target_strings[_mesa_shader_type_to_index(sh->Type)],
281 i,
282 shProg->Name);
283 printf("%s", sh->Source);
284 printf("\n");
285 }
286 }
287
288 if (!brw_shader_precompile(ctx, shProg))
289 return false;
290
291 return true;
292 }
293
294
295 int
296 brw_type_for_base_type(const struct glsl_type *type)
297 {
298 switch (type->base_type) {
299 case GLSL_TYPE_FLOAT:
300 return BRW_REGISTER_TYPE_F;
301 case GLSL_TYPE_INT:
302 case GLSL_TYPE_BOOL:
303 return BRW_REGISTER_TYPE_D;
304 case GLSL_TYPE_UINT:
305 return BRW_REGISTER_TYPE_UD;
306 case GLSL_TYPE_ARRAY:
307 return brw_type_for_base_type(type->fields.array);
308 case GLSL_TYPE_STRUCT:
309 case GLSL_TYPE_SAMPLER:
310 /* These should be overridden with the type of the member when
311 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
312 * way to trip up if we don't.
313 */
314 return BRW_REGISTER_TYPE_UD;
315 case GLSL_TYPE_VOID:
316 case GLSL_TYPE_ERROR:
317 case GLSL_TYPE_INTERFACE:
318 assert(!"not reached");
319 break;
320 }
321
322 return BRW_REGISTER_TYPE_F;
323 }
324
325 uint32_t
326 brw_conditional_for_comparison(unsigned int op)
327 {
328 switch (op) {
329 case ir_binop_less:
330 return BRW_CONDITIONAL_L;
331 case ir_binop_greater:
332 return BRW_CONDITIONAL_G;
333 case ir_binop_lequal:
334 return BRW_CONDITIONAL_LE;
335 case ir_binop_gequal:
336 return BRW_CONDITIONAL_GE;
337 case ir_binop_equal:
338 case ir_binop_all_equal: /* same as equal for scalars */
339 return BRW_CONDITIONAL_Z;
340 case ir_binop_nequal:
341 case ir_binop_any_nequal: /* same as nequal for scalars */
342 return BRW_CONDITIONAL_NZ;
343 default:
344 assert(!"not reached: bad operation for comparison");
345 return BRW_CONDITIONAL_NZ;
346 }
347 }
348
349 uint32_t
350 brw_math_function(enum opcode op)
351 {
352 switch (op) {
353 case SHADER_OPCODE_RCP:
354 return BRW_MATH_FUNCTION_INV;
355 case SHADER_OPCODE_RSQ:
356 return BRW_MATH_FUNCTION_RSQ;
357 case SHADER_OPCODE_SQRT:
358 return BRW_MATH_FUNCTION_SQRT;
359 case SHADER_OPCODE_EXP2:
360 return BRW_MATH_FUNCTION_EXP;
361 case SHADER_OPCODE_LOG2:
362 return BRW_MATH_FUNCTION_LOG;
363 case SHADER_OPCODE_POW:
364 return BRW_MATH_FUNCTION_POW;
365 case SHADER_OPCODE_SIN:
366 return BRW_MATH_FUNCTION_SIN;
367 case SHADER_OPCODE_COS:
368 return BRW_MATH_FUNCTION_COS;
369 case SHADER_OPCODE_INT_QUOTIENT:
370 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
371 case SHADER_OPCODE_INT_REMAINDER:
372 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
373 default:
374 assert(!"not reached: unknown math function");
375 return 0;
376 }
377 }
378
379 uint32_t
380 brw_texture_offset(ir_constant *offset)
381 {
382 assert(offset != NULL);
383
384 signed char offsets[3];
385 for (unsigned i = 0; i < offset->type->vector_elements; i++)
386 offsets[i] = (signed char) offset->value.i[i];
387
388 /* Combine all three offsets into a single unsigned dword:
389 *
390 * bits 11:8 - U Offset (X component)
391 * bits 7:4 - V Offset (Y component)
392 * bits 3:0 - R Offset (Z component)
393 */
394 unsigned offset_bits = 0;
395 for (unsigned i = 0; i < offset->type->vector_elements; i++) {
396 const unsigned shift = 4 * (2 - i);
397 offset_bits |= (offsets[i] << shift) & (0xF << shift);
398 }
399 return offset_bits;
400 }
401
402 const char *
403 brw_instruction_name(enum opcode op)
404 {
405 char *fallback;
406
407 if (op < ARRAY_SIZE(opcode_descs) && opcode_descs[op].name)
408 return opcode_descs[op].name;
409
410 switch (op) {
411 case FS_OPCODE_FB_WRITE:
412 return "fb_write";
413
414 case SHADER_OPCODE_RCP:
415 return "rcp";
416 case SHADER_OPCODE_RSQ:
417 return "rsq";
418 case SHADER_OPCODE_SQRT:
419 return "sqrt";
420 case SHADER_OPCODE_EXP2:
421 return "exp2";
422 case SHADER_OPCODE_LOG2:
423 return "log2";
424 case SHADER_OPCODE_POW:
425 return "pow";
426 case SHADER_OPCODE_INT_QUOTIENT:
427 return "int_quot";
428 case SHADER_OPCODE_INT_REMAINDER:
429 return "int_rem";
430 case SHADER_OPCODE_SIN:
431 return "sin";
432 case SHADER_OPCODE_COS:
433 return "cos";
434
435 case SHADER_OPCODE_TEX:
436 return "tex";
437 case SHADER_OPCODE_TXD:
438 return "txd";
439 case SHADER_OPCODE_TXF:
440 return "txf";
441 case SHADER_OPCODE_TXL:
442 return "txl";
443 case SHADER_OPCODE_TXS:
444 return "txs";
445 case FS_OPCODE_TXB:
446 return "txb";
447 case SHADER_OPCODE_TXF_MS:
448 return "txf_ms";
449
450 case FS_OPCODE_DDX:
451 return "ddx";
452 case FS_OPCODE_DDY:
453 return "ddy";
454
455 case FS_OPCODE_PIXEL_X:
456 return "pixel_x";
457 case FS_OPCODE_PIXEL_Y:
458 return "pixel_y";
459
460 case FS_OPCODE_CINTERP:
461 return "cinterp";
462 case FS_OPCODE_LINTERP:
463 return "linterp";
464
465 case FS_OPCODE_SPILL:
466 return "spill";
467 case FS_OPCODE_UNSPILL:
468 return "unspill";
469
470 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
471 return "uniform_pull_const";
472 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
473 return "uniform_pull_const_gen7";
474 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
475 return "varying_pull_const";
476 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
477 return "varying_pull_const_gen7";
478
479 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
480 return "mov_dispatch_to_flags";
481 case FS_OPCODE_DISCARD_JUMP:
482 return "discard_jump";
483
484 case FS_OPCODE_SET_SIMD4X2_OFFSET:
485 return "set_simd4x2_offset";
486
487 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
488 return "pack_half_2x16_split";
489 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
490 return "unpack_half_2x16_split_x";
491 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
492 return "unpack_half_2x16_split_y";
493
494 case FS_OPCODE_PLACEHOLDER_HALT:
495 return "placeholder_halt";
496
497 case VS_OPCODE_URB_WRITE:
498 return "urb_write";
499 case VS_OPCODE_SCRATCH_READ:
500 return "scratch_read";
501 case VS_OPCODE_SCRATCH_WRITE:
502 return "scratch_write";
503 case VS_OPCODE_PULL_CONSTANT_LOAD:
504 return "pull_constant_load";
505 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
506 return "pull_constant_load_gen7";
507
508 default:
509 /* Yes, this leaks. It's in debug code, it should never occur, and if
510 * it does, you should just add the case to the list above.
511 */
512 asprintf(&fallback, "op%d", op);
513 return fallback;
514 }
515 }
516
517 bool
518 backend_instruction::is_tex()
519 {
520 return (opcode == SHADER_OPCODE_TEX ||
521 opcode == FS_OPCODE_TXB ||
522 opcode == SHADER_OPCODE_TXD ||
523 opcode == SHADER_OPCODE_TXF ||
524 opcode == SHADER_OPCODE_TXF_MS ||
525 opcode == SHADER_OPCODE_TXL ||
526 opcode == SHADER_OPCODE_TXS ||
527 opcode == SHADER_OPCODE_LOD);
528 }
529
530 bool
531 backend_instruction::is_math()
532 {
533 return (opcode == SHADER_OPCODE_RCP ||
534 opcode == SHADER_OPCODE_RSQ ||
535 opcode == SHADER_OPCODE_SQRT ||
536 opcode == SHADER_OPCODE_EXP2 ||
537 opcode == SHADER_OPCODE_LOG2 ||
538 opcode == SHADER_OPCODE_SIN ||
539 opcode == SHADER_OPCODE_COS ||
540 opcode == SHADER_OPCODE_INT_QUOTIENT ||
541 opcode == SHADER_OPCODE_INT_REMAINDER ||
542 opcode == SHADER_OPCODE_POW);
543 }
544
545 bool
546 backend_instruction::is_control_flow()
547 {
548 switch (opcode) {
549 case BRW_OPCODE_DO:
550 case BRW_OPCODE_WHILE:
551 case BRW_OPCODE_IF:
552 case BRW_OPCODE_ELSE:
553 case BRW_OPCODE_ENDIF:
554 case BRW_OPCODE_BREAK:
555 case BRW_OPCODE_CONTINUE:
556 return true;
557 default:
558 return false;
559 }
560 }
561
562 void
563 backend_visitor::dump_instructions()
564 {
565 int ip = 0;
566 foreach_list(node, &this->instructions) {
567 backend_instruction *inst = (backend_instruction *)node;
568 printf("%d: ", ip++);
569 dump_instruction(inst);
570 }
571 }