Merge branch 'master' of ../mesa into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / brw_shader.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/macros.h"
25 #include "brw_context.h"
26 #include "brw_vs.h"
27 #include "brw_gs.h"
28 #include "brw_fs.h"
29 #include "brw_cfg.h"
30 #include "brw_nir.h"
31 #include "glsl/ir_optimization.h"
32 #include "glsl/glsl_parser_extras.h"
33 #include "main/shaderapi.h"
34
35 static void
36 shader_debug_log_mesa(void *data, const char *fmt, ...)
37 {
38 struct brw_context *brw = (struct brw_context *)data;
39 va_list args;
40
41 va_start(args, fmt);
42 GLuint msg_id = 0;
43 _mesa_gl_vdebug(&brw->ctx, &msg_id,
44 MESA_DEBUG_SOURCE_SHADER_COMPILER,
45 MESA_DEBUG_TYPE_OTHER,
46 MESA_DEBUG_SEVERITY_NOTIFICATION, fmt, args);
47 va_end(args);
48 }
49
50 static void
51 shader_perf_log_mesa(void *data, const char *fmt, ...)
52 {
53 struct brw_context *brw = (struct brw_context *)data;
54
55 va_list args;
56 va_start(args, fmt);
57
58 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
59 va_list args_copy;
60 va_copy(args_copy, args);
61 vfprintf(stderr, fmt, args_copy);
62 va_end(args_copy);
63 }
64
65 if (brw->perf_debug) {
66 GLuint msg_id = 0;
67 _mesa_gl_vdebug(&brw->ctx, &msg_id,
68 MESA_DEBUG_SOURCE_SHADER_COMPILER,
69 MESA_DEBUG_TYPE_PERFORMANCE,
70 MESA_DEBUG_SEVERITY_MEDIUM, fmt, args);
71 }
72 va_end(args);
73 }
74
75 struct brw_compiler *
76 brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo)
77 {
78 struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);
79
80 compiler->devinfo = devinfo;
81 compiler->shader_debug_log = shader_debug_log_mesa;
82 compiler->shader_perf_log = shader_perf_log_mesa;
83
84 brw_fs_alloc_reg_sets(compiler);
85 brw_vec4_alloc_reg_set(compiler);
86
87 if (devinfo->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
88 compiler->scalar_vs = true;
89
90 nir_shader_compiler_options *nir_options =
91 rzalloc(compiler, nir_shader_compiler_options);
92 nir_options->native_integers = true;
93 /* In order to help allow for better CSE at the NIR level we tell NIR
94 * to split all ffma instructions during opt_algebraic and we then
95 * re-combine them as a later step.
96 */
97 nir_options->lower_ffma = true;
98 nir_options->lower_sub = true;
99 nir_options->lower_fdiv = true;
100
101 /* In the vec4 backend, our dpN instruction replicates its result to all
102 * the components of a vec4. We would like NIR to give us replicated fdot
103 * instructions because it can optimize better for us.
104 *
105 * For the FS backend, it should be lowered away by the scalarizing pass so
106 * we should never see fdot anyway.
107 */
108 nir_options->fdot_replicates = true;
109
110 /* We want the GLSL compiler to emit code that uses condition codes */
111 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
112 compiler->glsl_compiler_options[i].MaxUnrollIterations = 32;
113 compiler->glsl_compiler_options[i].MaxIfDepth =
114 devinfo->gen < 6 ? 16 : UINT_MAX;
115
116 compiler->glsl_compiler_options[i].EmitCondCodes = true;
117 compiler->glsl_compiler_options[i].EmitNoNoise = true;
118 compiler->glsl_compiler_options[i].EmitNoMainReturn = true;
119 compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
120 compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
121 compiler->glsl_compiler_options[i].LowerClipDistance = true;
122
123 bool is_scalar;
124 switch (i) {
125 case MESA_SHADER_FRAGMENT:
126 case MESA_SHADER_COMPUTE:
127 is_scalar = true;
128 break;
129 case MESA_SHADER_VERTEX:
130 is_scalar = compiler->scalar_vs;
131 break;
132 default:
133 is_scalar = false;
134 break;
135 }
136
137 compiler->glsl_compiler_options[i].EmitNoIndirectOutput = is_scalar;
138 compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
139 compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;
140
141 /* !ARB_gpu_shader5 */
142 if (devinfo->gen < 7)
143 compiler->glsl_compiler_options[i].EmitNoIndirectSampler = true;
144
145 if (is_scalar || brw_env_var_as_boolean("INTEL_USE_NIR", true))
146 compiler->glsl_compiler_options[i].NirOptions = nir_options;
147 }
148
149 return compiler;
150 }
151
152 struct gl_shader *
153 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
154 {
155 struct brw_shader *shader;
156
157 shader = rzalloc(NULL, struct brw_shader);
158 if (shader) {
159 shader->base.Type = type;
160 shader->base.Stage = _mesa_shader_enum_to_shader_stage(type);
161 shader->base.Name = name;
162 _mesa_init_shader(ctx, &shader->base);
163 }
164
165 return &shader->base;
166 }
167
168 /**
169 * Performs a compile of the shader stages even when we don't know
170 * what non-orthogonal state will be set, in the hope that it reflects
171 * the eventual NOS used, and thus allows us to produce link failures.
172 */
173 static bool
174 brw_shader_precompile(struct gl_context *ctx,
175 struct gl_shader_program *sh_prog)
176 {
177 struct gl_shader *vs = sh_prog->_LinkedShaders[MESA_SHADER_VERTEX];
178 struct gl_shader *gs = sh_prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
179 struct gl_shader *fs = sh_prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
180 struct gl_shader *cs = sh_prog->_LinkedShaders[MESA_SHADER_COMPUTE];
181
182 if (fs && !brw_fs_precompile(ctx, sh_prog, fs->Program))
183 return false;
184
185 if (gs && !brw_gs_precompile(ctx, sh_prog, gs->Program))
186 return false;
187
188 if (vs && !brw_vs_precompile(ctx, sh_prog, vs->Program))
189 return false;
190
191 if (cs && !brw_cs_precompile(ctx, sh_prog, cs->Program))
192 return false;
193
194 return true;
195 }
196
197 static inline bool
198 is_scalar_shader_stage(struct brw_context *brw, int stage)
199 {
200 switch (stage) {
201 case MESA_SHADER_FRAGMENT:
202 case MESA_SHADER_COMPUTE:
203 return true;
204 case MESA_SHADER_VERTEX:
205 return brw->intelScreen->compiler->scalar_vs;
206 default:
207 return false;
208 }
209 }
210
211 static void
212 brw_lower_packing_builtins(struct brw_context *brw,
213 gl_shader_stage shader_type,
214 exec_list *ir)
215 {
216 int ops = LOWER_PACK_SNORM_2x16
217 | LOWER_UNPACK_SNORM_2x16
218 | LOWER_PACK_UNORM_2x16
219 | LOWER_UNPACK_UNORM_2x16;
220
221 if (is_scalar_shader_stage(brw, shader_type)) {
222 ops |= LOWER_UNPACK_UNORM_4x8
223 | LOWER_UNPACK_SNORM_4x8
224 | LOWER_PACK_UNORM_4x8
225 | LOWER_PACK_SNORM_4x8;
226 }
227
228 if (brw->gen >= 7) {
229 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
230 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
231 * lowering is needed. For SOA code, the Half2x16 ops must be
232 * scalarized.
233 */
234 if (is_scalar_shader_stage(brw, shader_type)) {
235 ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
236 | LOWER_UNPACK_HALF_2x16_TO_SPLIT;
237 }
238 } else {
239 ops |= LOWER_PACK_HALF_2x16
240 | LOWER_UNPACK_HALF_2x16;
241 }
242
243 lower_packing_builtins(ir, ops);
244 }
245
246 static void
247 process_glsl_ir(gl_shader_stage stage,
248 struct brw_context *brw,
249 struct gl_shader_program *shader_prog,
250 struct gl_shader *shader)
251 {
252 struct gl_context *ctx = &brw->ctx;
253 const struct gl_shader_compiler_options *options =
254 &ctx->Const.ShaderCompilerOptions[shader->Stage];
255
256 /* Temporary memory context for any new IR. */
257 void *mem_ctx = ralloc_context(NULL);
258
259 ralloc_adopt(mem_ctx, shader->ir);
260
261 /* lower_packing_builtins() inserts arithmetic instructions, so it
262 * must precede lower_instructions().
263 */
264 brw_lower_packing_builtins(brw, shader->Stage, shader->ir);
265 do_mat_op_to_vec(shader->ir);
266 const int bitfield_insert = brw->gen >= 7 ? BITFIELD_INSERT_TO_BFM_BFI : 0;
267 lower_instructions(shader->ir,
268 MOD_TO_FLOOR |
269 DIV_TO_MUL_RCP |
270 SUB_TO_ADD_NEG |
271 EXP_TO_EXP2 |
272 LOG_TO_LOG2 |
273 bitfield_insert |
274 LDEXP_TO_ARITH |
275 CARRY_TO_ARITH |
276 BORROW_TO_ARITH);
277
278 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
279 * if-statements need to be flattened.
280 */
281 if (brw->gen < 6)
282 lower_if_to_cond_assign(shader->ir, 16);
283
284 do_lower_texture_projection(shader->ir);
285 brw_lower_texture_gradients(brw, shader->ir);
286 do_vec_index_to_cond_assign(shader->ir);
287 lower_vector_insert(shader->ir, true);
288 if (options->NirOptions == NULL)
289 brw_do_cubemap_normalize(shader->ir);
290 lower_offset_arrays(shader->ir);
291 brw_do_lower_unnormalized_offset(shader->ir);
292 lower_noise(shader->ir);
293 lower_quadop_vector(shader->ir, false);
294
295 bool lowered_variable_indexing =
296 lower_variable_index_to_cond_assign((gl_shader_stage)stage,
297 shader->ir,
298 options->EmitNoIndirectInput,
299 options->EmitNoIndirectOutput,
300 options->EmitNoIndirectTemp,
301 options->EmitNoIndirectUniform);
302
303 if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
304 perf_debug("Unsupported form of variable indexing in %s; falling "
305 "back to very inefficient code generation\n",
306 _mesa_shader_stage_to_abbrev(shader->Stage));
307 }
308
309 lower_ubo_reference(shader, shader->ir);
310
311 bool progress;
312 do {
313 progress = false;
314
315 if (is_scalar_shader_stage(brw, shader->Stage)) {
316 brw_do_channel_expressions(shader->ir);
317 brw_do_vector_splitting(shader->ir);
318 }
319
320 progress = do_lower_jumps(shader->ir, true, true,
321 true, /* main return */
322 false, /* continue */
323 false /* loops */
324 ) || progress;
325
326 progress = do_common_optimization(shader->ir, true, true,
327 options, ctx->Const.NativeIntegers) || progress;
328 } while (progress);
329
330 validate_ir_tree(shader->ir);
331
332 /* Now that we've finished altering the linked IR, reparent any live IR back
333 * to the permanent memory context, and free the temporary one (discarding any
334 * junk we optimized away).
335 */
336 reparent_ir(shader->ir, shader->ir);
337 ralloc_free(mem_ctx);
338
339 if (ctx->_Shader->Flags & GLSL_DUMP) {
340 fprintf(stderr, "\n");
341 fprintf(stderr, "GLSL IR for linked %s program %d:\n",
342 _mesa_shader_stage_to_string(shader->Stage),
343 shader_prog->Name);
344 _mesa_print_ir(stderr, shader->ir, NULL);
345 fprintf(stderr, "\n");
346 }
347 }
348
349 GLboolean
350 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
351 {
352 struct brw_context *brw = brw_context(ctx);
353 unsigned int stage;
354
355 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
356 struct gl_shader *shader = shProg->_LinkedShaders[stage];
357 const struct gl_shader_compiler_options *options =
358 &ctx->Const.ShaderCompilerOptions[stage];
359
360 if (!shader)
361 continue;
362
363 struct gl_program *prog =
364 ctx->Driver.NewProgram(ctx, _mesa_shader_stage_to_program(stage),
365 shader->Name);
366 if (!prog)
367 return false;
368 prog->Parameters = _mesa_new_parameter_list();
369
370 _mesa_copy_linked_program_data((gl_shader_stage) stage, shProg, prog);
371
372 process_glsl_ir((gl_shader_stage) stage, brw, shProg, shader);
373
374 /* Make a pass over the IR to add state references for any built-in
375 * uniforms that are used. This has to be done now (during linking).
376 * Code generation doesn't happen until the first time this shader is
377 * used for rendering. Waiting until then to generate the parameters is
378 * too late. At that point, the values for the built-in uniforms won't
379 * get sent to the shader.
380 */
381 foreach_in_list(ir_instruction, node, shader->ir) {
382 ir_variable *var = node->as_variable();
383
384 if ((var == NULL) || (var->data.mode != ir_var_uniform)
385 || (strncmp(var->name, "gl_", 3) != 0))
386 continue;
387
388 const ir_state_slot *const slots = var->get_state_slots();
389 assert(slots != NULL);
390
391 for (unsigned int i = 0; i < var->get_num_state_slots(); i++) {
392 _mesa_add_state_reference(prog->Parameters,
393 (gl_state_index *) slots[i].tokens);
394 }
395 }
396
397 do_set_program_inouts(shader->ir, prog, shader->Stage);
398
399 prog->SamplersUsed = shader->active_samplers;
400 prog->ShadowSamplers = shader->shadow_samplers;
401 _mesa_update_shader_textures_used(shProg, prog);
402
403 _mesa_reference_program(ctx, &shader->Program, prog);
404
405 brw_add_texrect_params(prog);
406
407 if (options->NirOptions) {
408 prog->nir = brw_create_nir(brw, shProg, prog, (gl_shader_stage) stage,
409 is_scalar_shader_stage(brw, stage));
410 }
411
412 _mesa_reference_program(ctx, &prog, NULL);
413 }
414
415 if ((ctx->_Shader->Flags & GLSL_DUMP) && shProg->Name != 0) {
416 for (unsigned i = 0; i < shProg->NumShaders; i++) {
417 const struct gl_shader *sh = shProg->Shaders[i];
418 if (!sh)
419 continue;
420
421 fprintf(stderr, "GLSL %s shader %d source for linked program %d:\n",
422 _mesa_shader_stage_to_string(sh->Stage),
423 i, shProg->Name);
424 fprintf(stderr, "%s", sh->Source);
425 fprintf(stderr, "\n");
426 }
427 }
428
429 if (brw->precompile && !brw_shader_precompile(ctx, shProg))
430 return false;
431
432 return true;
433 }
434
435
436 enum brw_reg_type
437 brw_type_for_base_type(const struct glsl_type *type)
438 {
439 switch (type->base_type) {
440 case GLSL_TYPE_FLOAT:
441 return BRW_REGISTER_TYPE_F;
442 case GLSL_TYPE_INT:
443 case GLSL_TYPE_BOOL:
444 case GLSL_TYPE_SUBROUTINE:
445 return BRW_REGISTER_TYPE_D;
446 case GLSL_TYPE_UINT:
447 return BRW_REGISTER_TYPE_UD;
448 case GLSL_TYPE_ARRAY:
449 return brw_type_for_base_type(type->fields.array);
450 case GLSL_TYPE_STRUCT:
451 case GLSL_TYPE_SAMPLER:
452 case GLSL_TYPE_ATOMIC_UINT:
453 /* These should be overridden with the type of the member when
454 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
455 * way to trip up if we don't.
456 */
457 return BRW_REGISTER_TYPE_UD;
458 case GLSL_TYPE_IMAGE:
459 return BRW_REGISTER_TYPE_UD;
460 case GLSL_TYPE_VOID:
461 case GLSL_TYPE_ERROR:
462 case GLSL_TYPE_INTERFACE:
463 case GLSL_TYPE_DOUBLE:
464 case GLSL_TYPE_FUNCTION:
465 unreachable("not reached");
466 }
467
468 return BRW_REGISTER_TYPE_F;
469 }
470
471 enum brw_conditional_mod
472 brw_conditional_for_comparison(unsigned int op)
473 {
474 switch (op) {
475 case ir_binop_less:
476 return BRW_CONDITIONAL_L;
477 case ir_binop_greater:
478 return BRW_CONDITIONAL_G;
479 case ir_binop_lequal:
480 return BRW_CONDITIONAL_LE;
481 case ir_binop_gequal:
482 return BRW_CONDITIONAL_GE;
483 case ir_binop_equal:
484 case ir_binop_all_equal: /* same as equal for scalars */
485 return BRW_CONDITIONAL_Z;
486 case ir_binop_nequal:
487 case ir_binop_any_nequal: /* same as nequal for scalars */
488 return BRW_CONDITIONAL_NZ;
489 default:
490 unreachable("not reached: bad operation for comparison");
491 }
492 }
493
494 uint32_t
495 brw_math_function(enum opcode op)
496 {
497 switch (op) {
498 case SHADER_OPCODE_RCP:
499 return BRW_MATH_FUNCTION_INV;
500 case SHADER_OPCODE_RSQ:
501 return BRW_MATH_FUNCTION_RSQ;
502 case SHADER_OPCODE_SQRT:
503 return BRW_MATH_FUNCTION_SQRT;
504 case SHADER_OPCODE_EXP2:
505 return BRW_MATH_FUNCTION_EXP;
506 case SHADER_OPCODE_LOG2:
507 return BRW_MATH_FUNCTION_LOG;
508 case SHADER_OPCODE_POW:
509 return BRW_MATH_FUNCTION_POW;
510 case SHADER_OPCODE_SIN:
511 return BRW_MATH_FUNCTION_SIN;
512 case SHADER_OPCODE_COS:
513 return BRW_MATH_FUNCTION_COS;
514 case SHADER_OPCODE_INT_QUOTIENT:
515 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
516 case SHADER_OPCODE_INT_REMAINDER:
517 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
518 default:
519 unreachable("not reached: unknown math function");
520 }
521 }
522
523 uint32_t
524 brw_texture_offset(int *offsets, unsigned num_components)
525 {
526 if (!offsets) return 0; /* nonconstant offset; caller will handle it. */
527
528 /* Combine all three offsets into a single unsigned dword:
529 *
530 * bits 11:8 - U Offset (X component)
531 * bits 7:4 - V Offset (Y component)
532 * bits 3:0 - R Offset (Z component)
533 */
534 unsigned offset_bits = 0;
535 for (unsigned i = 0; i < num_components; i++) {
536 const unsigned shift = 4 * (2 - i);
537 offset_bits |= (offsets[i] << shift) & (0xF << shift);
538 }
539 return offset_bits;
540 }
541
542 const char *
543 brw_instruction_name(enum opcode op)
544 {
545 switch (op) {
546 case BRW_OPCODE_MOV ... BRW_OPCODE_NOP:
547 assert(opcode_descs[op].name);
548 return opcode_descs[op].name;
549 case FS_OPCODE_FB_WRITE:
550 return "fb_write";
551 case FS_OPCODE_FB_WRITE_LOGICAL:
552 return "fb_write_logical";
553 case FS_OPCODE_BLORP_FB_WRITE:
554 return "blorp_fb_write";
555 case FS_OPCODE_REP_FB_WRITE:
556 return "rep_fb_write";
557
558 case SHADER_OPCODE_RCP:
559 return "rcp";
560 case SHADER_OPCODE_RSQ:
561 return "rsq";
562 case SHADER_OPCODE_SQRT:
563 return "sqrt";
564 case SHADER_OPCODE_EXP2:
565 return "exp2";
566 case SHADER_OPCODE_LOG2:
567 return "log2";
568 case SHADER_OPCODE_POW:
569 return "pow";
570 case SHADER_OPCODE_INT_QUOTIENT:
571 return "int_quot";
572 case SHADER_OPCODE_INT_REMAINDER:
573 return "int_rem";
574 case SHADER_OPCODE_SIN:
575 return "sin";
576 case SHADER_OPCODE_COS:
577 return "cos";
578
579 case SHADER_OPCODE_TEX:
580 return "tex";
581 case SHADER_OPCODE_TEX_LOGICAL:
582 return "tex_logical";
583 case SHADER_OPCODE_TXD:
584 return "txd";
585 case SHADER_OPCODE_TXD_LOGICAL:
586 return "txd_logical";
587 case SHADER_OPCODE_TXF:
588 return "txf";
589 case SHADER_OPCODE_TXF_LOGICAL:
590 return "txf_logical";
591 case SHADER_OPCODE_TXL:
592 return "txl";
593 case SHADER_OPCODE_TXL_LOGICAL:
594 return "txl_logical";
595 case SHADER_OPCODE_TXS:
596 return "txs";
597 case SHADER_OPCODE_TXS_LOGICAL:
598 return "txs_logical";
599 case FS_OPCODE_TXB:
600 return "txb";
601 case FS_OPCODE_TXB_LOGICAL:
602 return "txb_logical";
603 case SHADER_OPCODE_TXF_CMS:
604 return "txf_cms";
605 case SHADER_OPCODE_TXF_CMS_LOGICAL:
606 return "txf_cms_logical";
607 case SHADER_OPCODE_TXF_UMS:
608 return "txf_ums";
609 case SHADER_OPCODE_TXF_UMS_LOGICAL:
610 return "txf_ums_logical";
611 case SHADER_OPCODE_TXF_MCS:
612 return "txf_mcs";
613 case SHADER_OPCODE_TXF_MCS_LOGICAL:
614 return "txf_mcs_logical";
615 case SHADER_OPCODE_LOD:
616 return "lod";
617 case SHADER_OPCODE_LOD_LOGICAL:
618 return "lod_logical";
619 case SHADER_OPCODE_TG4:
620 return "tg4";
621 case SHADER_OPCODE_TG4_LOGICAL:
622 return "tg4_logical";
623 case SHADER_OPCODE_TG4_OFFSET:
624 return "tg4_offset";
625 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
626 return "tg4_offset_logical";
627 case SHADER_OPCODE_SAMPLEINFO:
628 return "sampleinfo";
629
630 case SHADER_OPCODE_SHADER_TIME_ADD:
631 return "shader_time_add";
632
633 case SHADER_OPCODE_UNTYPED_ATOMIC:
634 return "untyped_atomic";
635 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
636 return "untyped_atomic_logical";
637 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
638 return "untyped_surface_read";
639 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
640 return "untyped_surface_read_logical";
641 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
642 return "untyped_surface_write";
643 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
644 return "untyped_surface_write_logical";
645 case SHADER_OPCODE_TYPED_ATOMIC:
646 return "typed_atomic";
647 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
648 return "typed_atomic_logical";
649 case SHADER_OPCODE_TYPED_SURFACE_READ:
650 return "typed_surface_read";
651 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
652 return "typed_surface_read_logical";
653 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
654 return "typed_surface_write";
655 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
656 return "typed_surface_write_logical";
657 case SHADER_OPCODE_MEMORY_FENCE:
658 return "memory_fence";
659
660 case SHADER_OPCODE_LOAD_PAYLOAD:
661 return "load_payload";
662
663 case SHADER_OPCODE_GEN4_SCRATCH_READ:
664 return "gen4_scratch_read";
665 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
666 return "gen4_scratch_write";
667 case SHADER_OPCODE_GEN7_SCRATCH_READ:
668 return "gen7_scratch_read";
669 case SHADER_OPCODE_URB_WRITE_SIMD8:
670 return "gen8_urb_write_simd8";
671
672 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
673 return "find_live_channel";
674 case SHADER_OPCODE_BROADCAST:
675 return "broadcast";
676
677 case VEC4_OPCODE_MOV_BYTES:
678 return "mov_bytes";
679 case VEC4_OPCODE_PACK_BYTES:
680 return "pack_bytes";
681 case VEC4_OPCODE_UNPACK_UNIFORM:
682 return "unpack_uniform";
683
684 case FS_OPCODE_DDX_COARSE:
685 return "ddx_coarse";
686 case FS_OPCODE_DDX_FINE:
687 return "ddx_fine";
688 case FS_OPCODE_DDY_COARSE:
689 return "ddy_coarse";
690 case FS_OPCODE_DDY_FINE:
691 return "ddy_fine";
692
693 case FS_OPCODE_CINTERP:
694 return "cinterp";
695 case FS_OPCODE_LINTERP:
696 return "linterp";
697
698 case FS_OPCODE_PIXEL_X:
699 return "pixel_x";
700 case FS_OPCODE_PIXEL_Y:
701 return "pixel_y";
702
703 case FS_OPCODE_GET_BUFFER_SIZE:
704 return "fs_get_buffer_size";
705
706 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
707 return "uniform_pull_const";
708 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
709 return "uniform_pull_const_gen7";
710 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
711 return "varying_pull_const";
712 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
713 return "varying_pull_const_gen7";
714
715 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
716 return "mov_dispatch_to_flags";
717 case FS_OPCODE_DISCARD_JUMP:
718 return "discard_jump";
719
720 case FS_OPCODE_SET_SAMPLE_ID:
721 return "set_sample_id";
722 case FS_OPCODE_SET_SIMD4X2_OFFSET:
723 return "set_simd4x2_offset";
724
725 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
726 return "pack_half_2x16_split";
727 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
728 return "unpack_half_2x16_split_x";
729 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
730 return "unpack_half_2x16_split_y";
731
732 case FS_OPCODE_PLACEHOLDER_HALT:
733 return "placeholder_halt";
734
735 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
736 return "interp_centroid";
737 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
738 return "interp_sample";
739 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
740 return "interp_shared_offset";
741 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
742 return "interp_per_slot_offset";
743
744 case VS_OPCODE_URB_WRITE:
745 return "vs_urb_write";
746 case VS_OPCODE_PULL_CONSTANT_LOAD:
747 return "pull_constant_load";
748 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
749 return "pull_constant_load_gen7";
750
751 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9:
752 return "set_simd4x2_header_gen9";
753
754 case VS_OPCODE_GET_BUFFER_SIZE:
755 return "vs_get_buffer_size";
756
757 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
758 return "unpack_flags_simd4x2";
759
760 case GS_OPCODE_URB_WRITE:
761 return "gs_urb_write";
762 case GS_OPCODE_URB_WRITE_ALLOCATE:
763 return "gs_urb_write_allocate";
764 case GS_OPCODE_THREAD_END:
765 return "gs_thread_end";
766 case GS_OPCODE_SET_WRITE_OFFSET:
767 return "set_write_offset";
768 case GS_OPCODE_SET_VERTEX_COUNT:
769 return "set_vertex_count";
770 case GS_OPCODE_SET_DWORD_2:
771 return "set_dword_2";
772 case GS_OPCODE_PREPARE_CHANNEL_MASKS:
773 return "prepare_channel_masks";
774 case GS_OPCODE_SET_CHANNEL_MASKS:
775 return "set_channel_masks";
776 case GS_OPCODE_GET_INSTANCE_ID:
777 return "get_instance_id";
778 case GS_OPCODE_FF_SYNC:
779 return "ff_sync";
780 case GS_OPCODE_SET_PRIMITIVE_ID:
781 return "set_primitive_id";
782 case GS_OPCODE_SVB_WRITE:
783 return "gs_svb_write";
784 case GS_OPCODE_SVB_SET_DST_INDEX:
785 return "gs_svb_set_dst_index";
786 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES:
787 return "gs_ff_sync_set_primitives";
788 case CS_OPCODE_CS_TERMINATE:
789 return "cs_terminate";
790 case SHADER_OPCODE_BARRIER:
791 return "barrier";
792 case SHADER_OPCODE_MULH:
793 return "mulh";
794 }
795
796 unreachable("not reached");
797 }
798
799 bool
800 brw_saturate_immediate(enum brw_reg_type type, struct brw_reg *reg)
801 {
802 union {
803 unsigned ud;
804 int d;
805 float f;
806 } imm = { reg->dw1.ud }, sat_imm = { 0 };
807
808 switch (type) {
809 case BRW_REGISTER_TYPE_UD:
810 case BRW_REGISTER_TYPE_D:
811 case BRW_REGISTER_TYPE_UQ:
812 case BRW_REGISTER_TYPE_Q:
813 /* Nothing to do. */
814 return false;
815 case BRW_REGISTER_TYPE_UW:
816 sat_imm.ud = CLAMP(imm.ud, 0, USHRT_MAX);
817 break;
818 case BRW_REGISTER_TYPE_W:
819 sat_imm.d = CLAMP(imm.d, SHRT_MIN, SHRT_MAX);
820 break;
821 case BRW_REGISTER_TYPE_F:
822 sat_imm.f = CLAMP(imm.f, 0.0f, 1.0f);
823 break;
824 case BRW_REGISTER_TYPE_UB:
825 case BRW_REGISTER_TYPE_B:
826 unreachable("no UB/B immediates");
827 case BRW_REGISTER_TYPE_V:
828 case BRW_REGISTER_TYPE_UV:
829 case BRW_REGISTER_TYPE_VF:
830 unreachable("unimplemented: saturate vector immediate");
831 case BRW_REGISTER_TYPE_DF:
832 case BRW_REGISTER_TYPE_HF:
833 unreachable("unimplemented: saturate DF/HF immediate");
834 }
835
836 if (imm.ud != sat_imm.ud) {
837 reg->dw1.ud = sat_imm.ud;
838 return true;
839 }
840 return false;
841 }
842
843 bool
844 brw_negate_immediate(enum brw_reg_type type, struct brw_reg *reg)
845 {
846 switch (type) {
847 case BRW_REGISTER_TYPE_D:
848 case BRW_REGISTER_TYPE_UD:
849 reg->dw1.d = -reg->dw1.d;
850 return true;
851 case BRW_REGISTER_TYPE_W:
852 case BRW_REGISTER_TYPE_UW:
853 reg->dw1.d = -(int16_t)reg->dw1.ud;
854 return true;
855 case BRW_REGISTER_TYPE_F:
856 reg->dw1.f = -reg->dw1.f;
857 return true;
858 case BRW_REGISTER_TYPE_VF:
859 reg->dw1.ud ^= 0x80808080;
860 return true;
861 case BRW_REGISTER_TYPE_UB:
862 case BRW_REGISTER_TYPE_B:
863 unreachable("no UB/B immediates");
864 case BRW_REGISTER_TYPE_UV:
865 case BRW_REGISTER_TYPE_V:
866 assert(!"unimplemented: negate UV/V immediate");
867 case BRW_REGISTER_TYPE_UQ:
868 case BRW_REGISTER_TYPE_Q:
869 assert(!"unimplemented: negate UQ/Q immediate");
870 case BRW_REGISTER_TYPE_DF:
871 case BRW_REGISTER_TYPE_HF:
872 assert(!"unimplemented: negate DF/HF immediate");
873 }
874
875 return false;
876 }
877
878 bool
879 brw_abs_immediate(enum brw_reg_type type, struct brw_reg *reg)
880 {
881 switch (type) {
882 case BRW_REGISTER_TYPE_D:
883 reg->dw1.d = abs(reg->dw1.d);
884 return true;
885 case BRW_REGISTER_TYPE_W:
886 reg->dw1.d = abs((int16_t)reg->dw1.ud);
887 return true;
888 case BRW_REGISTER_TYPE_F:
889 reg->dw1.f = fabsf(reg->dw1.f);
890 return true;
891 case BRW_REGISTER_TYPE_VF:
892 reg->dw1.ud &= ~0x80808080;
893 return true;
894 case BRW_REGISTER_TYPE_UB:
895 case BRW_REGISTER_TYPE_B:
896 unreachable("no UB/B immediates");
897 case BRW_REGISTER_TYPE_UQ:
898 case BRW_REGISTER_TYPE_UD:
899 case BRW_REGISTER_TYPE_UW:
900 case BRW_REGISTER_TYPE_UV:
901 /* Presumably the absolute value modifier on an unsigned source is a
902 * nop, but it would be nice to confirm.
903 */
904 assert(!"unimplemented: abs unsigned immediate");
905 case BRW_REGISTER_TYPE_V:
906 assert(!"unimplemented: abs V immediate");
907 case BRW_REGISTER_TYPE_Q:
908 assert(!"unimplemented: abs Q immediate");
909 case BRW_REGISTER_TYPE_DF:
910 case BRW_REGISTER_TYPE_HF:
911 assert(!"unimplemented: abs DF/HF immediate");
912 }
913
914 return false;
915 }
916
917 backend_shader::backend_shader(const struct brw_compiler *compiler,
918 void *log_data,
919 void *mem_ctx,
920 struct gl_shader_program *shader_prog,
921 struct gl_program *prog,
922 struct brw_stage_prog_data *stage_prog_data,
923 gl_shader_stage stage)
924 : compiler(compiler),
925 log_data(log_data),
926 devinfo(compiler->devinfo),
927 shader(shader_prog ?
928 (struct brw_shader *)shader_prog->_LinkedShaders[stage] : NULL),
929 shader_prog(shader_prog),
930 prog(prog),
931 stage_prog_data(stage_prog_data),
932 mem_ctx(mem_ctx),
933 cfg(NULL),
934 stage(stage)
935 {
936 debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage);
937 stage_name = _mesa_shader_stage_to_string(stage);
938 stage_abbrev = _mesa_shader_stage_to_abbrev(stage);
939 }
940
941 bool
942 backend_reg::is_zero() const
943 {
944 if (file != IMM)
945 return false;
946
947 return fixed_hw_reg.dw1.d == 0;
948 }
949
950 bool
951 backend_reg::is_one() const
952 {
953 if (file != IMM)
954 return false;
955
956 return type == BRW_REGISTER_TYPE_F
957 ? fixed_hw_reg.dw1.f == 1.0
958 : fixed_hw_reg.dw1.d == 1;
959 }
960
961 bool
962 backend_reg::is_negative_one() const
963 {
964 if (file != IMM)
965 return false;
966
967 switch (type) {
968 case BRW_REGISTER_TYPE_F:
969 return fixed_hw_reg.dw1.f == -1.0;
970 case BRW_REGISTER_TYPE_D:
971 return fixed_hw_reg.dw1.d == -1;
972 default:
973 return false;
974 }
975 }
976
977 bool
978 backend_reg::is_null() const
979 {
980 return file == HW_REG &&
981 fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
982 fixed_hw_reg.nr == BRW_ARF_NULL;
983 }
984
985
986 bool
987 backend_reg::is_accumulator() const
988 {
989 return file == HW_REG &&
990 fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
991 fixed_hw_reg.nr == BRW_ARF_ACCUMULATOR;
992 }
993
994 bool
995 backend_reg::in_range(const backend_reg &r, unsigned n) const
996 {
997 return (file == r.file &&
998 reg == r.reg &&
999 reg_offset >= r.reg_offset &&
1000 reg_offset < r.reg_offset + n);
1001 }
1002
1003 bool
1004 backend_instruction::is_commutative() const
1005 {
1006 switch (opcode) {
1007 case BRW_OPCODE_AND:
1008 case BRW_OPCODE_OR:
1009 case BRW_OPCODE_XOR:
1010 case BRW_OPCODE_ADD:
1011 case BRW_OPCODE_MUL:
1012 case SHADER_OPCODE_MULH:
1013 return true;
1014 case BRW_OPCODE_SEL:
1015 /* MIN and MAX are commutative. */
1016 if (conditional_mod == BRW_CONDITIONAL_GE ||
1017 conditional_mod == BRW_CONDITIONAL_L) {
1018 return true;
1019 }
1020 /* fallthrough */
1021 default:
1022 return false;
1023 }
1024 }
1025
1026 bool
1027 backend_instruction::is_3src() const
1028 {
1029 return opcode < ARRAY_SIZE(opcode_descs) && opcode_descs[opcode].nsrc == 3;
1030 }
1031
1032 bool
1033 backend_instruction::is_tex() const
1034 {
1035 return (opcode == SHADER_OPCODE_TEX ||
1036 opcode == FS_OPCODE_TXB ||
1037 opcode == SHADER_OPCODE_TXD ||
1038 opcode == SHADER_OPCODE_TXF ||
1039 opcode == SHADER_OPCODE_TXF_CMS ||
1040 opcode == SHADER_OPCODE_TXF_UMS ||
1041 opcode == SHADER_OPCODE_TXF_MCS ||
1042 opcode == SHADER_OPCODE_TXL ||
1043 opcode == SHADER_OPCODE_TXS ||
1044 opcode == SHADER_OPCODE_LOD ||
1045 opcode == SHADER_OPCODE_TG4 ||
1046 opcode == SHADER_OPCODE_TG4_OFFSET);
1047 }
1048
1049 bool
1050 backend_instruction::is_math() const
1051 {
1052 return (opcode == SHADER_OPCODE_RCP ||
1053 opcode == SHADER_OPCODE_RSQ ||
1054 opcode == SHADER_OPCODE_SQRT ||
1055 opcode == SHADER_OPCODE_EXP2 ||
1056 opcode == SHADER_OPCODE_LOG2 ||
1057 opcode == SHADER_OPCODE_SIN ||
1058 opcode == SHADER_OPCODE_COS ||
1059 opcode == SHADER_OPCODE_INT_QUOTIENT ||
1060 opcode == SHADER_OPCODE_INT_REMAINDER ||
1061 opcode == SHADER_OPCODE_POW);
1062 }
1063
1064 bool
1065 backend_instruction::is_control_flow() const
1066 {
1067 switch (opcode) {
1068 case BRW_OPCODE_DO:
1069 case BRW_OPCODE_WHILE:
1070 case BRW_OPCODE_IF:
1071 case BRW_OPCODE_ELSE:
1072 case BRW_OPCODE_ENDIF:
1073 case BRW_OPCODE_BREAK:
1074 case BRW_OPCODE_CONTINUE:
1075 return true;
1076 default:
1077 return false;
1078 }
1079 }
1080
1081 bool
1082 backend_instruction::can_do_source_mods() const
1083 {
1084 switch (opcode) {
1085 case BRW_OPCODE_ADDC:
1086 case BRW_OPCODE_BFE:
1087 case BRW_OPCODE_BFI1:
1088 case BRW_OPCODE_BFI2:
1089 case BRW_OPCODE_BFREV:
1090 case BRW_OPCODE_CBIT:
1091 case BRW_OPCODE_FBH:
1092 case BRW_OPCODE_FBL:
1093 case BRW_OPCODE_SUBB:
1094 return false;
1095 default:
1096 return true;
1097 }
1098 }
1099
1100 bool
1101 backend_instruction::can_do_saturate() const
1102 {
1103 switch (opcode) {
1104 case BRW_OPCODE_ADD:
1105 case BRW_OPCODE_ASR:
1106 case BRW_OPCODE_AVG:
1107 case BRW_OPCODE_DP2:
1108 case BRW_OPCODE_DP3:
1109 case BRW_OPCODE_DP4:
1110 case BRW_OPCODE_DPH:
1111 case BRW_OPCODE_F16TO32:
1112 case BRW_OPCODE_F32TO16:
1113 case BRW_OPCODE_LINE:
1114 case BRW_OPCODE_LRP:
1115 case BRW_OPCODE_MAC:
1116 case BRW_OPCODE_MAD:
1117 case BRW_OPCODE_MATH:
1118 case BRW_OPCODE_MOV:
1119 case BRW_OPCODE_MUL:
1120 case SHADER_OPCODE_MULH:
1121 case BRW_OPCODE_PLN:
1122 case BRW_OPCODE_RNDD:
1123 case BRW_OPCODE_RNDE:
1124 case BRW_OPCODE_RNDU:
1125 case BRW_OPCODE_RNDZ:
1126 case BRW_OPCODE_SEL:
1127 case BRW_OPCODE_SHL:
1128 case BRW_OPCODE_SHR:
1129 case FS_OPCODE_LINTERP:
1130 case SHADER_OPCODE_COS:
1131 case SHADER_OPCODE_EXP2:
1132 case SHADER_OPCODE_LOG2:
1133 case SHADER_OPCODE_POW:
1134 case SHADER_OPCODE_RCP:
1135 case SHADER_OPCODE_RSQ:
1136 case SHADER_OPCODE_SIN:
1137 case SHADER_OPCODE_SQRT:
1138 return true;
1139 default:
1140 return false;
1141 }
1142 }
1143
1144 bool
1145 backend_instruction::can_do_cmod() const
1146 {
1147 switch (opcode) {
1148 case BRW_OPCODE_ADD:
1149 case BRW_OPCODE_ADDC:
1150 case BRW_OPCODE_AND:
1151 case BRW_OPCODE_ASR:
1152 case BRW_OPCODE_AVG:
1153 case BRW_OPCODE_CMP:
1154 case BRW_OPCODE_CMPN:
1155 case BRW_OPCODE_DP2:
1156 case BRW_OPCODE_DP3:
1157 case BRW_OPCODE_DP4:
1158 case BRW_OPCODE_DPH:
1159 case BRW_OPCODE_F16TO32:
1160 case BRW_OPCODE_F32TO16:
1161 case BRW_OPCODE_FRC:
1162 case BRW_OPCODE_LINE:
1163 case BRW_OPCODE_LRP:
1164 case BRW_OPCODE_LZD:
1165 case BRW_OPCODE_MAC:
1166 case BRW_OPCODE_MACH:
1167 case BRW_OPCODE_MAD:
1168 case BRW_OPCODE_MOV:
1169 case BRW_OPCODE_MUL:
1170 case BRW_OPCODE_NOT:
1171 case BRW_OPCODE_OR:
1172 case BRW_OPCODE_PLN:
1173 case BRW_OPCODE_RNDD:
1174 case BRW_OPCODE_RNDE:
1175 case BRW_OPCODE_RNDU:
1176 case BRW_OPCODE_RNDZ:
1177 case BRW_OPCODE_SAD2:
1178 case BRW_OPCODE_SADA2:
1179 case BRW_OPCODE_SHL:
1180 case BRW_OPCODE_SHR:
1181 case BRW_OPCODE_SUBB:
1182 case BRW_OPCODE_XOR:
1183 case FS_OPCODE_CINTERP:
1184 case FS_OPCODE_LINTERP:
1185 return true;
1186 default:
1187 return false;
1188 }
1189 }
1190
1191 bool
1192 backend_instruction::reads_accumulator_implicitly() const
1193 {
1194 switch (opcode) {
1195 case BRW_OPCODE_MAC:
1196 case BRW_OPCODE_MACH:
1197 case BRW_OPCODE_SADA2:
1198 return true;
1199 default:
1200 return false;
1201 }
1202 }
1203
1204 bool
1205 backend_instruction::writes_accumulator_implicitly(const struct brw_device_info *devinfo) const
1206 {
1207 return writes_accumulator ||
1208 (devinfo->gen < 6 &&
1209 ((opcode >= BRW_OPCODE_ADD && opcode < BRW_OPCODE_NOP) ||
1210 (opcode >= FS_OPCODE_DDX_COARSE && opcode <= FS_OPCODE_LINTERP &&
1211 opcode != FS_OPCODE_CINTERP)));
1212 }
1213
1214 bool
1215 backend_instruction::has_side_effects() const
1216 {
1217 switch (opcode) {
1218 case SHADER_OPCODE_UNTYPED_ATOMIC:
1219 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
1220 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1221 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
1222 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
1223 case SHADER_OPCODE_TYPED_ATOMIC:
1224 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
1225 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
1226 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
1227 case SHADER_OPCODE_MEMORY_FENCE:
1228 case SHADER_OPCODE_URB_WRITE_SIMD8:
1229 case FS_OPCODE_FB_WRITE:
1230 case SHADER_OPCODE_BARRIER:
1231 return true;
1232 default:
1233 return false;
1234 }
1235 }
1236
1237 #ifndef NDEBUG
1238 static bool
1239 inst_is_in_block(const bblock_t *block, const backend_instruction *inst)
1240 {
1241 bool found = false;
1242 foreach_inst_in_block (backend_instruction, i, block) {
1243 if (inst == i) {
1244 found = true;
1245 }
1246 }
1247 return found;
1248 }
1249 #endif
1250
1251 static void
1252 adjust_later_block_ips(bblock_t *start_block, int ip_adjustment)
1253 {
1254 for (bblock_t *block_iter = start_block->next();
1255 !block_iter->link.is_tail_sentinel();
1256 block_iter = block_iter->next()) {
1257 block_iter->start_ip += ip_adjustment;
1258 block_iter->end_ip += ip_adjustment;
1259 }
1260 }
1261
1262 void
1263 backend_instruction::insert_after(bblock_t *block, backend_instruction *inst)
1264 {
1265 if (!this->is_head_sentinel())
1266 assert(inst_is_in_block(block, this) || !"Instruction not in block");
1267
1268 block->end_ip++;
1269
1270 adjust_later_block_ips(block, 1);
1271
1272 exec_node::insert_after(inst);
1273 }
1274
1275 void
1276 backend_instruction::insert_before(bblock_t *block, backend_instruction *inst)
1277 {
1278 if (!this->is_tail_sentinel())
1279 assert(inst_is_in_block(block, this) || !"Instruction not in block");
1280
1281 block->end_ip++;
1282
1283 adjust_later_block_ips(block, 1);
1284
1285 exec_node::insert_before(inst);
1286 }
1287
1288 void
1289 backend_instruction::insert_before(bblock_t *block, exec_list *list)
1290 {
1291 assert(inst_is_in_block(block, this) || !"Instruction not in block");
1292
1293 unsigned num_inst = list->length();
1294
1295 block->end_ip += num_inst;
1296
1297 adjust_later_block_ips(block, num_inst);
1298
1299 exec_node::insert_before(list);
1300 }
1301
1302 void
1303 backend_instruction::remove(bblock_t *block)
1304 {
1305 assert(inst_is_in_block(block, this) || !"Instruction not in block");
1306
1307 adjust_later_block_ips(block, -1);
1308
1309 if (block->start_ip == block->end_ip) {
1310 block->cfg->remove_block(block);
1311 } else {
1312 block->end_ip--;
1313 }
1314
1315 exec_node::remove();
1316 }
1317
1318 void
1319 backend_shader::dump_instructions()
1320 {
1321 dump_instructions(NULL);
1322 }
1323
1324 void
1325 backend_shader::dump_instructions(const char *name)
1326 {
1327 FILE *file = stderr;
1328 if (name && geteuid() != 0) {
1329 file = fopen(name, "w");
1330 if (!file)
1331 file = stderr;
1332 }
1333
1334 if (cfg) {
1335 int ip = 0;
1336 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
1337 fprintf(file, "%4d: ", ip++);
1338 dump_instruction(inst, file);
1339 }
1340 } else {
1341 int ip = 0;
1342 foreach_in_list(backend_instruction, inst, &instructions) {
1343 fprintf(file, "%4d: ", ip++);
1344 dump_instruction(inst, file);
1345 }
1346 }
1347
1348 if (file != stderr) {
1349 fclose(file);
1350 }
1351 }
1352
1353 void
1354 backend_shader::calculate_cfg()
1355 {
1356 if (this->cfg)
1357 return;
1358 cfg = new(mem_ctx) cfg_t(&this->instructions);
1359 }
1360
1361 void
1362 backend_shader::invalidate_cfg()
1363 {
1364 ralloc_free(this->cfg);
1365 this->cfg = NULL;
1366 }
1367
1368 /**
1369 * Sets up the starting offsets for the groups of binding table entries
1370 * commong to all pipeline stages.
1371 *
1372 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
1373 * unused but also make sure that addition of small offsets to them will
1374 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
1375 */
1376 void
1377 backend_shader::assign_common_binding_table_offsets(uint32_t next_binding_table_offset)
1378 {
1379 int num_textures = _mesa_fls(prog->SamplersUsed);
1380
1381 stage_prog_data->binding_table.texture_start = next_binding_table_offset;
1382 next_binding_table_offset += num_textures;
1383
1384 if (shader) {
1385 stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
1386 next_binding_table_offset += shader->base.NumUniformBlocks;
1387 } else {
1388 stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
1389 }
1390
1391 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1392 stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
1393 next_binding_table_offset++;
1394 } else {
1395 stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
1396 }
1397
1398 if (prog->UsesGather) {
1399 if (devinfo->gen >= 8) {
1400 stage_prog_data->binding_table.gather_texture_start =
1401 stage_prog_data->binding_table.texture_start;
1402 } else {
1403 stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
1404 next_binding_table_offset += num_textures;
1405 }
1406 } else {
1407 stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
1408 }
1409
1410 if (shader_prog && shader_prog->NumAtomicBuffers) {
1411 stage_prog_data->binding_table.abo_start = next_binding_table_offset;
1412 next_binding_table_offset += shader_prog->NumAtomicBuffers;
1413 } else {
1414 stage_prog_data->binding_table.abo_start = 0xd0d0d0d0;
1415 }
1416
1417 if (shader && shader->base.NumImages) {
1418 stage_prog_data->binding_table.image_start = next_binding_table_offset;
1419 next_binding_table_offset += shader->base.NumImages;
1420 } else {
1421 stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
1422 }
1423
1424 /* This may or may not be used depending on how the compile goes. */
1425 stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
1426 next_binding_table_offset++;
1427
1428 assert(next_binding_table_offset <= BRW_MAX_SURFACES);
1429
1430 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
1431 }
1432
1433 void
1434 backend_shader::setup_image_uniform_values(unsigned param_offset,
1435 const gl_uniform_storage *storage)
1436 {
1437 const unsigned stage = _mesa_program_enum_to_shader_stage(prog->Target);
1438
1439 for (unsigned i = 0; i < MAX2(storage->array_elements, 1); i++) {
1440 const unsigned image_idx = storage->image[stage].index + i;
1441 const brw_image_param *param = &stage_prog_data->image_param[image_idx];
1442
1443 /* Upload the brw_image_param structure. The order is expected to match
1444 * the BRW_IMAGE_PARAM_*_OFFSET defines.
1445 */
1446 setup_vec4_uniform_value(param_offset + BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET,
1447 (const gl_constant_value *)&param->surface_idx, 1);
1448 setup_vec4_uniform_value(param_offset + BRW_IMAGE_PARAM_OFFSET_OFFSET,
1449 (const gl_constant_value *)param->offset, 2);
1450 setup_vec4_uniform_value(param_offset + BRW_IMAGE_PARAM_SIZE_OFFSET,
1451 (const gl_constant_value *)param->size, 3);
1452 setup_vec4_uniform_value(param_offset + BRW_IMAGE_PARAM_STRIDE_OFFSET,
1453 (const gl_constant_value *)param->stride, 4);
1454 setup_vec4_uniform_value(param_offset + BRW_IMAGE_PARAM_TILING_OFFSET,
1455 (const gl_constant_value *)param->tiling, 3);
1456 setup_vec4_uniform_value(param_offset + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
1457 (const gl_constant_value *)param->swizzling, 2);
1458 param_offset += BRW_IMAGE_PARAM_SIZE;
1459
1460 brw_mark_surface_used(
1461 stage_prog_data,
1462 stage_prog_data->binding_table.image_start + image_idx);
1463 }
1464 }