glsl: Remove ir_print_visitor.h includes and usage
[mesa.git] / src / mesa / drivers / dri / i965 / brw_shader.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 extern "C" {
25 #include "main/macros.h"
26 #include "brw_context.h"
27 #include "brw_vs.h"
28 }
29 #include "brw_fs.h"
30 #include "glsl/ir_optimization.h"
31
32 struct gl_shader *
33 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
34 {
35 struct brw_shader *shader;
36
37 shader = rzalloc(NULL, struct brw_shader);
38 if (shader) {
39 shader->base.Type = type;
40 shader->base.Name = name;
41 _mesa_init_shader(ctx, &shader->base);
42 }
43
44 return &shader->base;
45 }
46
47 struct gl_shader_program *
48 brw_new_shader_program(struct gl_context *ctx, GLuint name)
49 {
50 struct gl_shader_program *prog = rzalloc(NULL, struct gl_shader_program);
51 if (prog) {
52 prog->Name = name;
53 _mesa_init_shader_program(ctx, prog);
54 }
55 return prog;
56 }
57
58 /**
59 * Performs a compile of the shader stages even when we don't know
60 * what non-orthogonal state will be set, in the hope that it reflects
61 * the eventual NOS used, and thus allows us to produce link failures.
62 */
63 static bool
64 brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
65 {
66 struct brw_context *brw = brw_context(ctx);
67
68 if (brw->precompile && !brw_fs_precompile(ctx, prog))
69 return false;
70
71 if (brw->precompile && !brw_vs_precompile(ctx, prog))
72 return false;
73
74 return true;
75 }
76
77 static void
78 brw_lower_packing_builtins(struct brw_context *brw,
79 gl_shader_type shader_type,
80 exec_list *ir)
81 {
82 int ops = LOWER_PACK_SNORM_2x16
83 | LOWER_UNPACK_SNORM_2x16
84 | LOWER_PACK_UNORM_2x16
85 | LOWER_UNPACK_UNORM_2x16
86 | LOWER_PACK_SNORM_4x8
87 | LOWER_UNPACK_SNORM_4x8
88 | LOWER_PACK_UNORM_4x8
89 | LOWER_UNPACK_UNORM_4x8;
90
91 if (brw->intel.gen >= 7) {
92 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
93 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
94 * lowering is needed. For SOA code, the Half2x16 ops must be
95 * scalarized.
96 */
97 if (shader_type == MESA_SHADER_FRAGMENT) {
98 ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
99 | LOWER_UNPACK_HALF_2x16_TO_SPLIT;
100 }
101 } else {
102 ops |= LOWER_PACK_HALF_2x16
103 | LOWER_UNPACK_HALF_2x16;
104 }
105
106 lower_packing_builtins(ir, ops);
107 }
108
109 GLboolean
110 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
111 {
112 struct brw_context *brw = brw_context(ctx);
113 struct intel_context *intel = &brw->intel;
114 unsigned int stage;
115 static const char *target_strings[]
116 = { "vertex", "fragment", "geometry" };
117
118 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
119 struct brw_shader *shader =
120 (struct brw_shader *)shProg->_LinkedShaders[stage];
121 static const GLenum targets[] = {
122 GL_VERTEX_PROGRAM_ARB,
123 GL_FRAGMENT_PROGRAM_ARB,
124 GL_GEOMETRY_PROGRAM_NV
125 };
126
127 if (!shader)
128 continue;
129
130 struct gl_program *prog =
131 ctx->Driver.NewProgram(ctx, targets[stage], shader->base.Name);
132 if (!prog)
133 return false;
134 prog->Parameters = _mesa_new_parameter_list();
135
136 if (stage == 0) {
137 struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
138 vp->UsesClipDistance = shProg->Vert.UsesClipDistance;
139 }
140
141 void *mem_ctx = ralloc_context(NULL);
142 bool progress;
143
144 if (shader->ir)
145 ralloc_free(shader->ir);
146 shader->ir = new(shader) exec_list;
147 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
148
149 /* lower_packing_builtins() inserts arithmetic instructions, so it
150 * must precede lower_instructions().
151 */
152 brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir);
153 do_mat_op_to_vec(shader->ir);
154 const int bitfield_insert = intel->gen >= 7
155 ? BITFIELD_INSERT_TO_BFM_BFI
156 : 0;
157 const int lrp_to_arith = intel->gen < 6 ? LRP_TO_ARITH : 0;
158 lower_instructions(shader->ir,
159 MOD_TO_FRACT |
160 DIV_TO_MUL_RCP |
161 SUB_TO_ADD_NEG |
162 EXP_TO_EXP2 |
163 LOG_TO_LOG2 |
164 bitfield_insert |
165 lrp_to_arith);
166
167 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
168 * if-statements need to be flattened.
169 */
170 if (intel->gen < 6)
171 lower_if_to_cond_assign(shader->ir, 16);
172
173 do_lower_texture_projection(shader->ir);
174 brw_lower_texture_gradients(intel, shader->ir);
175 do_vec_index_to_cond_assign(shader->ir);
176 lower_vector_insert(shader->ir, true);
177 brw_do_cubemap_normalize(shader->ir);
178 lower_noise(shader->ir);
179 lower_quadop_vector(shader->ir, false);
180
181 bool input = true;
182 bool output = stage == MESA_SHADER_FRAGMENT;
183 bool temp = stage == MESA_SHADER_FRAGMENT;
184 bool uniform = false;
185
186 bool lowered_variable_indexing =
187 lower_variable_index_to_cond_assign(shader->ir,
188 input, output, temp, uniform);
189
190 if (unlikely((intel->perf_debug) && lowered_variable_indexing)) {
191 perf_debug("Unsupported form of variable indexing in FS; falling "
192 "back to very inefficient code generation\n");
193 }
194
195 /* FINISHME: Do this before the variable index lowering. */
196 lower_ubo_reference(&shader->base, shader->ir);
197
198 do {
199 progress = false;
200
201 if (stage == MESA_SHADER_FRAGMENT) {
202 brw_do_channel_expressions(shader->ir);
203 brw_do_vector_splitting(shader->ir);
204 }
205
206 progress = do_lower_jumps(shader->ir, true, true,
207 true, /* main return */
208 false, /* continue */
209 false /* loops */
210 ) || progress;
211
212 progress = do_common_optimization(shader->ir, true, true, 32,
213 &ctx->ShaderCompilerOptions[stage])
214 || progress;
215 } while (progress);
216
217 /* Make a pass over the IR to add state references for any built-in
218 * uniforms that are used. This has to be done now (during linking).
219 * Code generation doesn't happen until the first time this shader is
220 * used for rendering. Waiting until then to generate the parameters is
221 * too late. At that point, the values for the built-in uniforms won't
222 * get sent to the shader.
223 */
224 foreach_list(node, shader->ir) {
225 ir_variable *var = ((ir_instruction *) node)->as_variable();
226
227 if ((var == NULL) || (var->mode != ir_var_uniform)
228 || (strncmp(var->name, "gl_", 3) != 0))
229 continue;
230
231 const ir_state_slot *const slots = var->state_slots;
232 assert(var->state_slots != NULL);
233
234 for (unsigned int i = 0; i < var->num_state_slots; i++) {
235 _mesa_add_state_reference(prog->Parameters,
236 (gl_state_index *) slots[i].tokens);
237 }
238 }
239
240 validate_ir_tree(shader->ir);
241
242 reparent_ir(shader->ir, shader->ir);
243 ralloc_free(mem_ctx);
244
245 do_set_program_inouts(shader->ir, prog,
246 shader->base.Type == GL_FRAGMENT_SHADER);
247
248 prog->SamplersUsed = shader->base.active_samplers;
249 _mesa_update_shader_textures_used(shProg, prog);
250
251 _mesa_reference_program(ctx, &shader->base.Program, prog);
252
253 brw_add_texrect_params(prog);
254
255 /* This has to be done last. Any operation that can cause
256 * prog->ParameterValues to get reallocated (e.g., anything that adds a
257 * program constant) has to happen before creating this linkage.
258 */
259 _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
260
261 _mesa_reference_program(ctx, &prog, NULL);
262
263 if (ctx->Shader.Flags & GLSL_DUMP) {
264 printf("\n");
265 printf("GLSL IR for linked %s program %d:\n", target_strings[stage],
266 shProg->Name);
267 _mesa_print_ir(shader->base.ir, NULL);
268 printf("\n");
269 }
270 }
271
272 if (ctx->Shader.Flags & GLSL_DUMP) {
273 for (unsigned i = 0; i < shProg->NumShaders; i++) {
274 const struct gl_shader *sh = shProg->Shaders[i];
275 if (!sh)
276 continue;
277
278 printf("GLSL %s shader %d source for linked program %d:\n",
279 target_strings[_mesa_shader_type_to_index(sh->Type)],
280 i,
281 shProg->Name);
282 printf("%s", sh->Source);
283 printf("\n");
284 }
285 }
286
287 if (!brw_shader_precompile(ctx, shProg))
288 return false;
289
290 return true;
291 }
292
293
294 int
295 brw_type_for_base_type(const struct glsl_type *type)
296 {
297 switch (type->base_type) {
298 case GLSL_TYPE_FLOAT:
299 return BRW_REGISTER_TYPE_F;
300 case GLSL_TYPE_INT:
301 case GLSL_TYPE_BOOL:
302 return BRW_REGISTER_TYPE_D;
303 case GLSL_TYPE_UINT:
304 return BRW_REGISTER_TYPE_UD;
305 case GLSL_TYPE_ARRAY:
306 return brw_type_for_base_type(type->fields.array);
307 case GLSL_TYPE_STRUCT:
308 case GLSL_TYPE_SAMPLER:
309 /* These should be overridden with the type of the member when
310 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
311 * way to trip up if we don't.
312 */
313 return BRW_REGISTER_TYPE_UD;
314 case GLSL_TYPE_VOID:
315 case GLSL_TYPE_ERROR:
316 case GLSL_TYPE_INTERFACE:
317 assert(!"not reached");
318 break;
319 }
320
321 return BRW_REGISTER_TYPE_F;
322 }
323
324 uint32_t
325 brw_conditional_for_comparison(unsigned int op)
326 {
327 switch (op) {
328 case ir_binop_less:
329 return BRW_CONDITIONAL_L;
330 case ir_binop_greater:
331 return BRW_CONDITIONAL_G;
332 case ir_binop_lequal:
333 return BRW_CONDITIONAL_LE;
334 case ir_binop_gequal:
335 return BRW_CONDITIONAL_GE;
336 case ir_binop_equal:
337 case ir_binop_all_equal: /* same as equal for scalars */
338 return BRW_CONDITIONAL_Z;
339 case ir_binop_nequal:
340 case ir_binop_any_nequal: /* same as nequal for scalars */
341 return BRW_CONDITIONAL_NZ;
342 default:
343 assert(!"not reached: bad operation for comparison");
344 return BRW_CONDITIONAL_NZ;
345 }
346 }
347
348 uint32_t
349 brw_math_function(enum opcode op)
350 {
351 switch (op) {
352 case SHADER_OPCODE_RCP:
353 return BRW_MATH_FUNCTION_INV;
354 case SHADER_OPCODE_RSQ:
355 return BRW_MATH_FUNCTION_RSQ;
356 case SHADER_OPCODE_SQRT:
357 return BRW_MATH_FUNCTION_SQRT;
358 case SHADER_OPCODE_EXP2:
359 return BRW_MATH_FUNCTION_EXP;
360 case SHADER_OPCODE_LOG2:
361 return BRW_MATH_FUNCTION_LOG;
362 case SHADER_OPCODE_POW:
363 return BRW_MATH_FUNCTION_POW;
364 case SHADER_OPCODE_SIN:
365 return BRW_MATH_FUNCTION_SIN;
366 case SHADER_OPCODE_COS:
367 return BRW_MATH_FUNCTION_COS;
368 case SHADER_OPCODE_INT_QUOTIENT:
369 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
370 case SHADER_OPCODE_INT_REMAINDER:
371 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
372 default:
373 assert(!"not reached: unknown math function");
374 return 0;
375 }
376 }
377
378 uint32_t
379 brw_texture_offset(ir_constant *offset)
380 {
381 assert(offset != NULL);
382
383 signed char offsets[3];
384 for (unsigned i = 0; i < offset->type->vector_elements; i++)
385 offsets[i] = (signed char) offset->value.i[i];
386
387 /* Combine all three offsets into a single unsigned dword:
388 *
389 * bits 11:8 - U Offset (X component)
390 * bits 7:4 - V Offset (Y component)
391 * bits 3:0 - R Offset (Z component)
392 */
393 unsigned offset_bits = 0;
394 for (unsigned i = 0; i < offset->type->vector_elements; i++) {
395 const unsigned shift = 4 * (2 - i);
396 offset_bits |= (offsets[i] << shift) & (0xF << shift);
397 }
398 return offset_bits;
399 }
400
401 const char *
402 brw_instruction_name(enum opcode op)
403 {
404 char *fallback;
405
406 if (op < ARRAY_SIZE(opcode_descs) && opcode_descs[op].name)
407 return opcode_descs[op].name;
408
409 switch (op) {
410 case FS_OPCODE_FB_WRITE:
411 return "fb_write";
412
413 case SHADER_OPCODE_RCP:
414 return "rcp";
415 case SHADER_OPCODE_RSQ:
416 return "rsq";
417 case SHADER_OPCODE_SQRT:
418 return "sqrt";
419 case SHADER_OPCODE_EXP2:
420 return "exp2";
421 case SHADER_OPCODE_LOG2:
422 return "log2";
423 case SHADER_OPCODE_POW:
424 return "pow";
425 case SHADER_OPCODE_INT_QUOTIENT:
426 return "int_quot";
427 case SHADER_OPCODE_INT_REMAINDER:
428 return "int_rem";
429 case SHADER_OPCODE_SIN:
430 return "sin";
431 case SHADER_OPCODE_COS:
432 return "cos";
433
434 case SHADER_OPCODE_TEX:
435 return "tex";
436 case SHADER_OPCODE_TXD:
437 return "txd";
438 case SHADER_OPCODE_TXF:
439 return "txf";
440 case SHADER_OPCODE_TXL:
441 return "txl";
442 case SHADER_OPCODE_TXS:
443 return "txs";
444 case FS_OPCODE_TXB:
445 return "txb";
446 case SHADER_OPCODE_TXF_MS:
447 return "txf_ms";
448
449 case FS_OPCODE_DDX:
450 return "ddx";
451 case FS_OPCODE_DDY:
452 return "ddy";
453
454 case FS_OPCODE_PIXEL_X:
455 return "pixel_x";
456 case FS_OPCODE_PIXEL_Y:
457 return "pixel_y";
458
459 case FS_OPCODE_CINTERP:
460 return "cinterp";
461 case FS_OPCODE_LINTERP:
462 return "linterp";
463
464 case FS_OPCODE_SPILL:
465 return "spill";
466 case FS_OPCODE_UNSPILL:
467 return "unspill";
468
469 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
470 return "uniform_pull_const";
471 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
472 return "uniform_pull_const_gen7";
473 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
474 return "varying_pull_const";
475 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
476 return "varying_pull_const_gen7";
477
478 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
479 return "mov_dispatch_to_flags";
480 case FS_OPCODE_DISCARD_JUMP:
481 return "discard_jump";
482
483 case FS_OPCODE_SET_SIMD4X2_OFFSET:
484 return "set_simd4x2_offset";
485
486 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
487 return "pack_half_2x16_split";
488 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
489 return "unpack_half_2x16_split_x";
490 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
491 return "unpack_half_2x16_split_y";
492
493 case FS_OPCODE_PLACEHOLDER_HALT:
494 return "placeholder_halt";
495
496 case VS_OPCODE_URB_WRITE:
497 return "urb_write";
498 case VS_OPCODE_SCRATCH_READ:
499 return "scratch_read";
500 case VS_OPCODE_SCRATCH_WRITE:
501 return "scratch_write";
502 case VS_OPCODE_PULL_CONSTANT_LOAD:
503 return "pull_constant_load";
504 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
505 return "pull_constant_load_gen7";
506
507 default:
508 /* Yes, this leaks. It's in debug code, it should never occur, and if
509 * it does, you should just add the case to the list above.
510 */
511 asprintf(&fallback, "op%d", op);
512 return fallback;
513 }
514 }
515
516 bool
517 backend_instruction::is_tex()
518 {
519 return (opcode == SHADER_OPCODE_TEX ||
520 opcode == FS_OPCODE_TXB ||
521 opcode == SHADER_OPCODE_TXD ||
522 opcode == SHADER_OPCODE_TXF ||
523 opcode == SHADER_OPCODE_TXF_MS ||
524 opcode == SHADER_OPCODE_TXL ||
525 opcode == SHADER_OPCODE_TXS ||
526 opcode == SHADER_OPCODE_LOD);
527 }
528
529 bool
530 backend_instruction::is_math()
531 {
532 return (opcode == SHADER_OPCODE_RCP ||
533 opcode == SHADER_OPCODE_RSQ ||
534 opcode == SHADER_OPCODE_SQRT ||
535 opcode == SHADER_OPCODE_EXP2 ||
536 opcode == SHADER_OPCODE_LOG2 ||
537 opcode == SHADER_OPCODE_SIN ||
538 opcode == SHADER_OPCODE_COS ||
539 opcode == SHADER_OPCODE_INT_QUOTIENT ||
540 opcode == SHADER_OPCODE_INT_REMAINDER ||
541 opcode == SHADER_OPCODE_POW);
542 }
543
544 bool
545 backend_instruction::is_control_flow()
546 {
547 switch (opcode) {
548 case BRW_OPCODE_DO:
549 case BRW_OPCODE_WHILE:
550 case BRW_OPCODE_IF:
551 case BRW_OPCODE_ELSE:
552 case BRW_OPCODE_ENDIF:
553 case BRW_OPCODE_BREAK:
554 case BRW_OPCODE_CONTINUE:
555 return true;
556 default:
557 return false;
558 }
559 }
560
561 void
562 backend_visitor::dump_instructions()
563 {
564 int ip = 0;
565 foreach_list(node, &this->instructions) {
566 backend_instruction *inst = (backend_instruction *)node;
567 printf("%d: ", ip++);
568 dump_instruction(inst);
569 }
570 }