3bbcccdd3b35cdcc63ab0cbbe3936bf8cd61b346
[mesa.git] / src / mesa / drivers / dri / i965 / brw_shader.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 extern "C" {
25 #include "main/macros.h"
26 #include "brw_context.h"
27 #include "brw_vs.h"
28 }
29 #include "brw_fs.h"
30 #include "glsl/ir_optimization.h"
31 #include "glsl/ir_print_visitor.h"
32
33 struct gl_shader *
34 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
35 {
36 struct brw_shader *shader;
37
38 shader = rzalloc(NULL, struct brw_shader);
39 if (shader) {
40 shader->base.Type = type;
41 shader->base.Name = name;
42 _mesa_init_shader(ctx, &shader->base);
43 }
44
45 return &shader->base;
46 }
47
48 struct gl_shader_program *
49 brw_new_shader_program(struct gl_context *ctx, GLuint name)
50 {
51 struct gl_shader_program *prog = rzalloc(NULL, struct gl_shader_program);
52 if (prog) {
53 prog->Name = name;
54 _mesa_init_shader_program(ctx, prog);
55 }
56 return prog;
57 }
58
59 /**
60 * Performs a compile of the shader stages even when we don't know
61 * what non-orthogonal state will be set, in the hope that it reflects
62 * the eventual NOS used, and thus allows us to produce link failures.
63 */
64 static bool
65 brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
66 {
67 struct brw_context *brw = brw_context(ctx);
68
69 if (brw->precompile && !brw_fs_precompile(ctx, prog))
70 return false;
71
72 if (brw->precompile && !brw_vs_precompile(ctx, prog))
73 return false;
74
75 return true;
76 }
77
78 static void
79 brw_lower_packing_builtins(struct brw_context *brw,
80 gl_shader_type shader_type,
81 exec_list *ir)
82 {
83 int ops = LOWER_PACK_SNORM_2x16
84 | LOWER_UNPACK_SNORM_2x16
85 | LOWER_PACK_UNORM_2x16
86 | LOWER_UNPACK_UNORM_2x16
87 | LOWER_PACK_SNORM_4x8
88 | LOWER_UNPACK_SNORM_4x8
89 | LOWER_PACK_UNORM_4x8
90 | LOWER_UNPACK_UNORM_4x8;
91
92 if (brw->intel.gen >= 7) {
93 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
94 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
95 * lowering is needed. For SOA code, the Half2x16 ops must be
96 * scalarized.
97 */
98 if (shader_type == MESA_SHADER_FRAGMENT) {
99 ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
100 | LOWER_UNPACK_HALF_2x16_TO_SPLIT;
101 }
102 } else {
103 ops |= LOWER_PACK_HALF_2x16
104 | LOWER_UNPACK_HALF_2x16;
105 }
106
107 lower_packing_builtins(ir, ops);
108 }
109
110 GLboolean
111 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
112 {
113 struct brw_context *brw = brw_context(ctx);
114 struct intel_context *intel = &brw->intel;
115 unsigned int stage;
116 static const char *target_strings[]
117 = { "vertex", "fragment", "geometry" };
118
119 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
120 struct brw_shader *shader =
121 (struct brw_shader *)shProg->_LinkedShaders[stage];
122 static const GLenum targets[] = {
123 GL_VERTEX_PROGRAM_ARB,
124 GL_FRAGMENT_PROGRAM_ARB,
125 GL_GEOMETRY_PROGRAM_NV
126 };
127
128 if (!shader)
129 continue;
130
131 struct gl_program *prog =
132 ctx->Driver.NewProgram(ctx, targets[stage], shader->base.Name);
133 if (!prog)
134 return false;
135 prog->Parameters = _mesa_new_parameter_list();
136
137 if (stage == 0) {
138 struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
139 vp->UsesClipDistance = shProg->Vert.UsesClipDistance;
140 }
141
142 void *mem_ctx = ralloc_context(NULL);
143 bool progress;
144
145 if (shader->ir)
146 ralloc_free(shader->ir);
147 shader->ir = new(shader) exec_list;
148 clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
149
150 /* lower_packing_builtins() inserts arithmetic instructions, so it
151 * must precede lower_instructions().
152 */
153 brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir);
154 do_mat_op_to_vec(shader->ir);
155 const int bitfield_insert = intel->gen >= 7
156 ? BITFIELD_INSERT_TO_BFM_BFI
157 : 0;
158 const int lrp_to_arith = intel->gen < 6 ? LRP_TO_ARITH : 0;
159 lower_instructions(shader->ir,
160 MOD_TO_FRACT |
161 DIV_TO_MUL_RCP |
162 SUB_TO_ADD_NEG |
163 EXP_TO_EXP2 |
164 LOG_TO_LOG2 |
165 bitfield_insert |
166 lrp_to_arith);
167
168 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
169 * if-statements need to be flattened.
170 */
171 if (intel->gen < 6)
172 lower_if_to_cond_assign(shader->ir, 16);
173
174 do_lower_texture_projection(shader->ir);
175 brw_lower_texture_gradients(intel, shader->ir);
176 do_vec_index_to_cond_assign(shader->ir);
177 brw_do_cubemap_normalize(shader->ir);
178 lower_noise(shader->ir);
179 lower_quadop_vector(shader->ir, false);
180
181 bool input = true;
182 bool output = stage == MESA_SHADER_FRAGMENT;
183 bool temp = stage == MESA_SHADER_FRAGMENT;
184 bool uniform = false;
185
186 bool lowered_variable_indexing =
187 lower_variable_index_to_cond_assign(shader->ir,
188 input, output, temp, uniform);
189
190 if (unlikely((intel->perf_debug) && lowered_variable_indexing)) {
191 perf_debug("Unsupported form of variable indexing in FS; falling "
192 "back to very inefficient code generation\n");
193 }
194
195 /* FINISHME: Do this before the variable index lowering. */
196 lower_ubo_reference(&shader->base, shader->ir);
197
198 do {
199 progress = false;
200
201 if (stage == MESA_SHADER_FRAGMENT) {
202 brw_do_channel_expressions(shader->ir);
203 brw_do_vector_splitting(shader->ir);
204 }
205
206 progress = do_lower_jumps(shader->ir, true, true,
207 true, /* main return */
208 false, /* continue */
209 false /* loops */
210 ) || progress;
211
212 progress = do_common_optimization(shader->ir, true, true, 32)
213 || progress;
214 } while (progress);
215
216 /* Make a pass over the IR to add state references for any built-in
217 * uniforms that are used. This has to be done now (during linking).
218 * Code generation doesn't happen until the first time this shader is
219 * used for rendering. Waiting until then to generate the parameters is
220 * too late. At that point, the values for the built-in uniforms won't
221 * get sent to the shader.
222 */
223 foreach_list(node, shader->ir) {
224 ir_variable *var = ((ir_instruction *) node)->as_variable();
225
226 if ((var == NULL) || (var->mode != ir_var_uniform)
227 || (strncmp(var->name, "gl_", 3) != 0))
228 continue;
229
230 const ir_state_slot *const slots = var->state_slots;
231 assert(var->state_slots != NULL);
232
233 for (unsigned int i = 0; i < var->num_state_slots; i++) {
234 _mesa_add_state_reference(prog->Parameters,
235 (gl_state_index *) slots[i].tokens);
236 }
237 }
238
239 validate_ir_tree(shader->ir);
240
241 reparent_ir(shader->ir, shader->ir);
242 ralloc_free(mem_ctx);
243
244 do_set_program_inouts(shader->ir, prog,
245 shader->base.Type == GL_FRAGMENT_SHADER);
246
247 prog->SamplersUsed = shader->base.active_samplers;
248 _mesa_update_shader_textures_used(shProg, prog);
249
250 _mesa_reference_program(ctx, &shader->base.Program, prog);
251
252 brw_add_texrect_params(prog);
253
254 /* This has to be done last. Any operation that can cause
255 * prog->ParameterValues to get reallocated (e.g., anything that adds a
256 * program constant) has to happen before creating this linkage.
257 */
258 _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
259
260 _mesa_reference_program(ctx, &prog, NULL);
261
262 if (ctx->Shader.Flags & GLSL_DUMP) {
263 printf("\n");
264 printf("GLSL IR for linked %s program %d:\n", target_strings[stage],
265 shProg->Name);
266 _mesa_print_ir(shader->base.ir, NULL);
267 printf("\n");
268 }
269 }
270
271 if (ctx->Shader.Flags & GLSL_DUMP) {
272 for (unsigned i = 0; i < shProg->NumShaders; i++) {
273 const struct gl_shader *sh = shProg->Shaders[i];
274 if (!sh)
275 continue;
276
277 printf("GLSL %s shader %d source for linked program %d:\n",
278 target_strings[_mesa_shader_type_to_index(sh->Type)],
279 i,
280 shProg->Name);
281 printf("%s", sh->Source);
282 printf("\n");
283 }
284 }
285
286 if (!brw_shader_precompile(ctx, shProg))
287 return false;
288
289 return true;
290 }
291
292
293 int
294 brw_type_for_base_type(const struct glsl_type *type)
295 {
296 switch (type->base_type) {
297 case GLSL_TYPE_FLOAT:
298 return BRW_REGISTER_TYPE_F;
299 case GLSL_TYPE_INT:
300 case GLSL_TYPE_BOOL:
301 return BRW_REGISTER_TYPE_D;
302 case GLSL_TYPE_UINT:
303 return BRW_REGISTER_TYPE_UD;
304 case GLSL_TYPE_ARRAY:
305 return brw_type_for_base_type(type->fields.array);
306 case GLSL_TYPE_STRUCT:
307 case GLSL_TYPE_SAMPLER:
308 /* These should be overridden with the type of the member when
309 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
310 * way to trip up if we don't.
311 */
312 return BRW_REGISTER_TYPE_UD;
313 case GLSL_TYPE_VOID:
314 case GLSL_TYPE_ERROR:
315 case GLSL_TYPE_INTERFACE:
316 assert(!"not reached");
317 break;
318 }
319
320 return BRW_REGISTER_TYPE_F;
321 }
322
323 uint32_t
324 brw_conditional_for_comparison(unsigned int op)
325 {
326 switch (op) {
327 case ir_binop_less:
328 return BRW_CONDITIONAL_L;
329 case ir_binop_greater:
330 return BRW_CONDITIONAL_G;
331 case ir_binop_lequal:
332 return BRW_CONDITIONAL_LE;
333 case ir_binop_gequal:
334 return BRW_CONDITIONAL_GE;
335 case ir_binop_equal:
336 case ir_binop_all_equal: /* same as equal for scalars */
337 return BRW_CONDITIONAL_Z;
338 case ir_binop_nequal:
339 case ir_binop_any_nequal: /* same as nequal for scalars */
340 return BRW_CONDITIONAL_NZ;
341 default:
342 assert(!"not reached: bad operation for comparison");
343 return BRW_CONDITIONAL_NZ;
344 }
345 }
346
347 uint32_t
348 brw_math_function(enum opcode op)
349 {
350 switch (op) {
351 case SHADER_OPCODE_RCP:
352 return BRW_MATH_FUNCTION_INV;
353 case SHADER_OPCODE_RSQ:
354 return BRW_MATH_FUNCTION_RSQ;
355 case SHADER_OPCODE_SQRT:
356 return BRW_MATH_FUNCTION_SQRT;
357 case SHADER_OPCODE_EXP2:
358 return BRW_MATH_FUNCTION_EXP;
359 case SHADER_OPCODE_LOG2:
360 return BRW_MATH_FUNCTION_LOG;
361 case SHADER_OPCODE_POW:
362 return BRW_MATH_FUNCTION_POW;
363 case SHADER_OPCODE_SIN:
364 return BRW_MATH_FUNCTION_SIN;
365 case SHADER_OPCODE_COS:
366 return BRW_MATH_FUNCTION_COS;
367 case SHADER_OPCODE_INT_QUOTIENT:
368 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
369 case SHADER_OPCODE_INT_REMAINDER:
370 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
371 default:
372 assert(!"not reached: unknown math function");
373 return 0;
374 }
375 }
376
377 uint32_t
378 brw_texture_offset(ir_constant *offset)
379 {
380 assert(offset != NULL);
381
382 signed char offsets[3];
383 for (unsigned i = 0; i < offset->type->vector_elements; i++)
384 offsets[i] = (signed char) offset->value.i[i];
385
386 /* Combine all three offsets into a single unsigned dword:
387 *
388 * bits 11:8 - U Offset (X component)
389 * bits 7:4 - V Offset (Y component)
390 * bits 3:0 - R Offset (Z component)
391 */
392 unsigned offset_bits = 0;
393 for (unsigned i = 0; i < offset->type->vector_elements; i++) {
394 const unsigned shift = 4 * (2 - i);
395 offset_bits |= (offsets[i] << shift) & (0xF << shift);
396 }
397 return offset_bits;
398 }
399
400 const char *
401 brw_instruction_name(enum opcode op)
402 {
403 char *fallback;
404
405 if (op < ARRAY_SIZE(opcode_descs) && opcode_descs[op].name)
406 return opcode_descs[op].name;
407
408 switch (op) {
409 case FS_OPCODE_FB_WRITE:
410 return "fb_write";
411
412 case SHADER_OPCODE_RCP:
413 return "rcp";
414 case SHADER_OPCODE_RSQ:
415 return "rsq";
416 case SHADER_OPCODE_SQRT:
417 return "sqrt";
418 case SHADER_OPCODE_EXP2:
419 return "exp2";
420 case SHADER_OPCODE_LOG2:
421 return "log2";
422 case SHADER_OPCODE_POW:
423 return "pow";
424 case SHADER_OPCODE_INT_QUOTIENT:
425 return "int_quot";
426 case SHADER_OPCODE_INT_REMAINDER:
427 return "int_rem";
428 case SHADER_OPCODE_SIN:
429 return "sin";
430 case SHADER_OPCODE_COS:
431 return "cos";
432
433 case SHADER_OPCODE_TEX:
434 return "tex";
435 case SHADER_OPCODE_TXD:
436 return "txd";
437 case SHADER_OPCODE_TXF:
438 return "txf";
439 case SHADER_OPCODE_TXL:
440 return "txl";
441 case SHADER_OPCODE_TXS:
442 return "txs";
443 case FS_OPCODE_TXB:
444 return "txb";
445 case SHADER_OPCODE_TXF_MS:
446 return "txf_ms";
447
448 case FS_OPCODE_DDX:
449 return "ddx";
450 case FS_OPCODE_DDY:
451 return "ddy";
452
453 case FS_OPCODE_PIXEL_X:
454 return "pixel_x";
455 case FS_OPCODE_PIXEL_Y:
456 return "pixel_y";
457
458 case FS_OPCODE_CINTERP:
459 return "cinterp";
460 case FS_OPCODE_LINTERP:
461 return "linterp";
462
463 case FS_OPCODE_SPILL:
464 return "spill";
465 case FS_OPCODE_UNSPILL:
466 return "unspill";
467
468 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
469 return "uniform_pull_const";
470 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
471 return "uniform_pull_const_gen7";
472 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
473 return "varying_pull_const";
474 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
475 return "varying_pull_const_gen7";
476
477 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
478 return "mov_dispatch_to_flags";
479 case FS_OPCODE_DISCARD_JUMP:
480 return "discard_jump";
481
482 case FS_OPCODE_SET_SIMD4X2_OFFSET:
483 return "set_simd4x2_offset";
484
485 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
486 return "pack_half_2x16_split";
487 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
488 return "unpack_half_2x16_split_x";
489 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
490 return "unpack_half_2x16_split_y";
491
492 case FS_OPCODE_PLACEHOLDER_HALT:
493 return "placeholder_halt";
494
495 case VS_OPCODE_URB_WRITE:
496 return "urb_write";
497 case VS_OPCODE_SCRATCH_READ:
498 return "scratch_read";
499 case VS_OPCODE_SCRATCH_WRITE:
500 return "scratch_write";
501 case VS_OPCODE_PULL_CONSTANT_LOAD:
502 return "pull_constant_load";
503 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
504 return "pull_constant_load_gen7";
505
506 default:
507 /* Yes, this leaks. It's in debug code, it should never occur, and if
508 * it does, you should just add the case to the list above.
509 */
510 asprintf(&fallback, "op%d", op);
511 return fallback;
512 }
513 }
514
515 bool
516 backend_instruction::is_tex()
517 {
518 return (opcode == SHADER_OPCODE_TEX ||
519 opcode == FS_OPCODE_TXB ||
520 opcode == SHADER_OPCODE_TXD ||
521 opcode == SHADER_OPCODE_TXF ||
522 opcode == SHADER_OPCODE_TXF_MS ||
523 opcode == SHADER_OPCODE_TXL ||
524 opcode == SHADER_OPCODE_TXS ||
525 opcode == SHADER_OPCODE_LOD);
526 }
527
528 bool
529 backend_instruction::is_math()
530 {
531 return (opcode == SHADER_OPCODE_RCP ||
532 opcode == SHADER_OPCODE_RSQ ||
533 opcode == SHADER_OPCODE_SQRT ||
534 opcode == SHADER_OPCODE_EXP2 ||
535 opcode == SHADER_OPCODE_LOG2 ||
536 opcode == SHADER_OPCODE_SIN ||
537 opcode == SHADER_OPCODE_COS ||
538 opcode == SHADER_OPCODE_INT_QUOTIENT ||
539 opcode == SHADER_OPCODE_INT_REMAINDER ||
540 opcode == SHADER_OPCODE_POW);
541 }
542
543 bool
544 backend_instruction::is_control_flow()
545 {
546 switch (opcode) {
547 case BRW_OPCODE_DO:
548 case BRW_OPCODE_WHILE:
549 case BRW_OPCODE_IF:
550 case BRW_OPCODE_ELSE:
551 case BRW_OPCODE_ENDIF:
552 case BRW_OPCODE_BREAK:
553 case BRW_OPCODE_CONTINUE:
554 return true;
555 default:
556 return false;
557 }
558 }
559
560 void
561 backend_visitor::dump_instructions()
562 {
563 int ip = 0;
564 foreach_list(node, &this->instructions) {
565 backend_instruction *inst = (backend_instruction *)node;
566 printf("%d: ", ip++);
567 dump_instruction(inst);
568 }
569 }