2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "brw_context.h"
31 #include "glsl/ir_optimization.h"
32 #include "glsl/glsl_parser_extras.h"
33 #include "main/shaderapi.h"
36 shader_debug_log_mesa(void *data
, const char *fmt
, ...)
38 struct brw_context
*brw
= (struct brw_context
*)data
;
43 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
44 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
45 MESA_DEBUG_TYPE_OTHER
,
46 MESA_DEBUG_SEVERITY_NOTIFICATION
, fmt
, args
);
51 shader_perf_log_mesa(void *data
, const char *fmt
, ...)
53 struct brw_context
*brw
= (struct brw_context
*)data
;
58 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
)) {
60 va_copy(args_copy
, args
);
61 vfprintf(stderr
, fmt
, args_copy
);
65 if (brw
->perf_debug
) {
67 _mesa_gl_vdebug(&brw
->ctx
, &msg_id
,
68 MESA_DEBUG_SOURCE_SHADER_COMPILER
,
69 MESA_DEBUG_TYPE_PERFORMANCE
,
70 MESA_DEBUG_SEVERITY_MEDIUM
, fmt
, args
);
76 brw_compiler_create(void *mem_ctx
, const struct brw_device_info
*devinfo
)
78 struct brw_compiler
*compiler
= rzalloc(mem_ctx
, struct brw_compiler
);
80 compiler
->devinfo
= devinfo
;
81 compiler
->shader_debug_log
= shader_debug_log_mesa
;
82 compiler
->shader_perf_log
= shader_perf_log_mesa
;
84 brw_fs_alloc_reg_sets(compiler
);
85 brw_vec4_alloc_reg_set(compiler
);
87 if (devinfo
->gen
>= 8 && !(INTEL_DEBUG
& DEBUG_VEC4VS
))
88 compiler
->scalar_vs
= true;
90 nir_shader_compiler_options
*nir_options
=
91 rzalloc(compiler
, nir_shader_compiler_options
);
92 nir_options
->native_integers
= true;
93 /* In order to help allow for better CSE at the NIR level we tell NIR
94 * to split all ffma instructions during opt_algebraic and we then
95 * re-combine them as a later step.
97 nir_options
->lower_ffma
= true;
98 nir_options
->lower_sub
= true;
100 /* We want the GLSL compiler to emit code that uses condition codes */
101 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
102 compiler
->glsl_compiler_options
[i
].MaxUnrollIterations
= 32;
103 compiler
->glsl_compiler_options
[i
].MaxIfDepth
=
104 devinfo
->gen
< 6 ? 16 : UINT_MAX
;
106 compiler
->glsl_compiler_options
[i
].EmitCondCodes
= true;
107 compiler
->glsl_compiler_options
[i
].EmitNoNoise
= true;
108 compiler
->glsl_compiler_options
[i
].EmitNoMainReturn
= true;
109 compiler
->glsl_compiler_options
[i
].EmitNoIndirectInput
= true;
110 compiler
->glsl_compiler_options
[i
].EmitNoIndirectOutput
=
111 (i
== MESA_SHADER_FRAGMENT
);
112 compiler
->glsl_compiler_options
[i
].EmitNoIndirectTemp
=
113 (i
== MESA_SHADER_FRAGMENT
);
114 compiler
->glsl_compiler_options
[i
].EmitNoIndirectUniform
= false;
115 compiler
->glsl_compiler_options
[i
].LowerClipDistance
= true;
118 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].OptimizeForAOS
= true;
119 compiler
->glsl_compiler_options
[MESA_SHADER_GEOMETRY
].OptimizeForAOS
= true;
121 if (compiler
->scalar_vs
) {
122 /* If we're using the scalar backend for vertex shaders, we need to
123 * configure these accordingly.
125 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].EmitNoIndirectOutput
= true;
126 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].EmitNoIndirectTemp
= true;
127 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].OptimizeForAOS
= false;
129 compiler
->glsl_compiler_options
[MESA_SHADER_VERTEX
].NirOptions
= nir_options
;
132 compiler
->glsl_compiler_options
[MESA_SHADER_FRAGMENT
].NirOptions
= nir_options
;
133 compiler
->glsl_compiler_options
[MESA_SHADER_COMPUTE
].NirOptions
= nir_options
;
139 brw_new_shader(struct gl_context
*ctx
, GLuint name
, GLuint type
)
141 struct brw_shader
*shader
;
143 shader
= rzalloc(NULL
, struct brw_shader
);
145 shader
->base
.Type
= type
;
146 shader
->base
.Stage
= _mesa_shader_enum_to_shader_stage(type
);
147 shader
->base
.Name
= name
;
148 _mesa_init_shader(ctx
, &shader
->base
);
151 return &shader
->base
;
155 * Performs a compile of the shader stages even when we don't know
156 * what non-orthogonal state will be set, in the hope that it reflects
157 * the eventual NOS used, and thus allows us to produce link failures.
160 brw_shader_precompile(struct gl_context
*ctx
,
161 struct gl_shader_program
*sh_prog
)
163 struct gl_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
164 struct gl_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
165 struct gl_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
166 struct gl_shader
*cs
= sh_prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
168 if (fs
&& !brw_fs_precompile(ctx
, sh_prog
, fs
->Program
))
171 if (gs
&& !brw_gs_precompile(ctx
, sh_prog
, gs
->Program
))
174 if (vs
&& !brw_vs_precompile(ctx
, sh_prog
, vs
->Program
))
177 if (cs
&& !brw_cs_precompile(ctx
, sh_prog
, cs
->Program
))
184 is_scalar_shader_stage(struct brw_context
*brw
, int stage
)
187 case MESA_SHADER_FRAGMENT
:
189 case MESA_SHADER_VERTEX
:
190 return brw
->intelScreen
->compiler
->scalar_vs
;
197 brw_lower_packing_builtins(struct brw_context
*brw
,
198 gl_shader_stage shader_type
,
201 int ops
= LOWER_PACK_SNORM_2x16
202 | LOWER_UNPACK_SNORM_2x16
203 | LOWER_PACK_UNORM_2x16
204 | LOWER_UNPACK_UNORM_2x16
;
206 if (is_scalar_shader_stage(brw
, shader_type
)) {
207 ops
|= LOWER_UNPACK_UNORM_4x8
208 | LOWER_UNPACK_SNORM_4x8
209 | LOWER_PACK_UNORM_4x8
210 | LOWER_PACK_SNORM_4x8
;
214 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
215 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
216 * lowering is needed. For SOA code, the Half2x16 ops must be
219 if (is_scalar_shader_stage(brw
, shader_type
)) {
220 ops
|= LOWER_PACK_HALF_2x16_TO_SPLIT
221 | LOWER_UNPACK_HALF_2x16_TO_SPLIT
;
224 ops
|= LOWER_PACK_HALF_2x16
225 | LOWER_UNPACK_HALF_2x16
;
228 lower_packing_builtins(ir
, ops
);
232 process_glsl_ir(struct brw_context
*brw
,
233 struct gl_shader_program
*shader_prog
,
234 struct gl_shader
*shader
)
236 struct gl_context
*ctx
= &brw
->ctx
;
237 const struct gl_shader_compiler_options
*options
=
238 &ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
];
240 /* Temporary memory context for any new IR. */
241 void *mem_ctx
= ralloc_context(NULL
);
243 ralloc_adopt(mem_ctx
, shader
->ir
);
245 /* lower_packing_builtins() inserts arithmetic instructions, so it
246 * must precede lower_instructions().
248 brw_lower_packing_builtins(brw
, shader
->Stage
, shader
->ir
);
249 do_mat_op_to_vec(shader
->ir
);
250 const int bitfield_insert
= brw
->gen
>= 7 ? BITFIELD_INSERT_TO_BFM_BFI
: 0;
251 lower_instructions(shader
->ir
,
260 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
261 * if-statements need to be flattened.
264 lower_if_to_cond_assign(shader
->ir
, 16);
266 do_lower_texture_projection(shader
->ir
);
267 brw_lower_texture_gradients(brw
, shader
->ir
);
268 do_vec_index_to_cond_assign(shader
->ir
);
269 lower_vector_insert(shader
->ir
, true);
270 if (options
->NirOptions
== NULL
)
271 brw_do_cubemap_normalize(shader
->ir
);
272 lower_offset_arrays(shader
->ir
);
273 brw_do_lower_unnormalized_offset(shader
->ir
);
274 lower_noise(shader
->ir
);
275 lower_quadop_vector(shader
->ir
, false);
277 bool lowered_variable_indexing
=
278 lower_variable_index_to_cond_assign(shader
->ir
,
279 options
->EmitNoIndirectInput
,
280 options
->EmitNoIndirectOutput
,
281 options
->EmitNoIndirectTemp
,
282 options
->EmitNoIndirectUniform
);
284 if (unlikely(brw
->perf_debug
&& lowered_variable_indexing
)) {
285 perf_debug("Unsupported form of variable indexing in FS; falling "
286 "back to very inefficient code generation\n");
289 lower_ubo_reference(shader
, shader
->ir
);
295 if (is_scalar_shader_stage(brw
, shader
->Stage
)) {
296 brw_do_channel_expressions(shader
->ir
);
297 brw_do_vector_splitting(shader
->ir
);
300 progress
= do_lower_jumps(shader
->ir
, true, true,
301 true, /* main return */
302 false, /* continue */
306 progress
= do_common_optimization(shader
->ir
, true, true,
307 options
, ctx
->Const
.NativeIntegers
) || progress
;
310 if (options
->NirOptions
!= NULL
)
311 lower_output_reads(shader
->ir
);
313 validate_ir_tree(shader
->ir
);
315 /* Now that we've finished altering the linked IR, reparent any live IR back
316 * to the permanent memory context, and free the temporary one (discarding any
317 * junk we optimized away).
319 reparent_ir(shader
->ir
, shader
->ir
);
320 ralloc_free(mem_ctx
);
322 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
323 fprintf(stderr
, "\n");
324 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
325 _mesa_shader_stage_to_string(shader
->Stage
),
327 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
328 fprintf(stderr
, "\n");
333 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
335 struct brw_context
*brw
= brw_context(ctx
);
338 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
339 struct gl_shader
*shader
= shProg
->_LinkedShaders
[stage
];
340 const struct gl_shader_compiler_options
*options
=
341 &ctx
->Const
.ShaderCompilerOptions
[stage
];
346 struct gl_program
*prog
=
347 ctx
->Driver
.NewProgram(ctx
, _mesa_shader_stage_to_program(stage
),
351 prog
->Parameters
= _mesa_new_parameter_list();
353 _mesa_copy_linked_program_data((gl_shader_stage
) stage
, shProg
, prog
);
355 process_glsl_ir(brw
, shProg
, shader
);
357 /* Make a pass over the IR to add state references for any built-in
358 * uniforms that are used. This has to be done now (during linking).
359 * Code generation doesn't happen until the first time this shader is
360 * used for rendering. Waiting until then to generate the parameters is
361 * too late. At that point, the values for the built-in uniforms won't
362 * get sent to the shader.
364 foreach_in_list(ir_instruction
, node
, shader
->ir
) {
365 ir_variable
*var
= node
->as_variable();
367 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_uniform
)
368 || (strncmp(var
->name
, "gl_", 3) != 0))
371 const ir_state_slot
*const slots
= var
->get_state_slots();
372 assert(slots
!= NULL
);
374 for (unsigned int i
= 0; i
< var
->get_num_state_slots(); i
++) {
375 _mesa_add_state_reference(prog
->Parameters
,
376 (gl_state_index
*) slots
[i
].tokens
);
380 do_set_program_inouts(shader
->ir
, prog
, shader
->Stage
);
382 prog
->SamplersUsed
= shader
->active_samplers
;
383 prog
->ShadowSamplers
= shader
->shadow_samplers
;
384 _mesa_update_shader_textures_used(shProg
, prog
);
386 _mesa_reference_program(ctx
, &shader
->Program
, prog
);
388 brw_add_texrect_params(prog
);
390 if (options
->NirOptions
)
391 prog
->nir
= brw_create_nir(brw
, shProg
, prog
, (gl_shader_stage
) stage
);
393 _mesa_reference_program(ctx
, &prog
, NULL
);
396 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
397 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
398 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
402 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
403 _mesa_shader_stage_to_string(sh
->Stage
),
405 fprintf(stderr
, "%s", sh
->Source
);
406 fprintf(stderr
, "\n");
410 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))
418 brw_type_for_base_type(const struct glsl_type
*type
)
420 switch (type
->base_type
) {
421 case GLSL_TYPE_FLOAT
:
422 return BRW_REGISTER_TYPE_F
;
425 return BRW_REGISTER_TYPE_D
;
427 return BRW_REGISTER_TYPE_UD
;
428 case GLSL_TYPE_ARRAY
:
429 return brw_type_for_base_type(type
->fields
.array
);
430 case GLSL_TYPE_STRUCT
:
431 case GLSL_TYPE_SAMPLER
:
432 case GLSL_TYPE_ATOMIC_UINT
:
433 /* These should be overridden with the type of the member when
434 * dereferenced into. BRW_REGISTER_TYPE_UD seems like a likely
435 * way to trip up if we don't.
437 return BRW_REGISTER_TYPE_UD
;
438 case GLSL_TYPE_IMAGE
:
439 return BRW_REGISTER_TYPE_UD
;
441 case GLSL_TYPE_ERROR
:
442 case GLSL_TYPE_INTERFACE
:
443 case GLSL_TYPE_DOUBLE
:
444 unreachable("not reached");
447 return BRW_REGISTER_TYPE_F
;
450 enum brw_conditional_mod
451 brw_conditional_for_comparison(unsigned int op
)
455 return BRW_CONDITIONAL_L
;
456 case ir_binop_greater
:
457 return BRW_CONDITIONAL_G
;
458 case ir_binop_lequal
:
459 return BRW_CONDITIONAL_LE
;
460 case ir_binop_gequal
:
461 return BRW_CONDITIONAL_GE
;
463 case ir_binop_all_equal
: /* same as equal for scalars */
464 return BRW_CONDITIONAL_Z
;
465 case ir_binop_nequal
:
466 case ir_binop_any_nequal
: /* same as nequal for scalars */
467 return BRW_CONDITIONAL_NZ
;
469 unreachable("not reached: bad operation for comparison");
474 brw_math_function(enum opcode op
)
477 case SHADER_OPCODE_RCP
:
478 return BRW_MATH_FUNCTION_INV
;
479 case SHADER_OPCODE_RSQ
:
480 return BRW_MATH_FUNCTION_RSQ
;
481 case SHADER_OPCODE_SQRT
:
482 return BRW_MATH_FUNCTION_SQRT
;
483 case SHADER_OPCODE_EXP2
:
484 return BRW_MATH_FUNCTION_EXP
;
485 case SHADER_OPCODE_LOG2
:
486 return BRW_MATH_FUNCTION_LOG
;
487 case SHADER_OPCODE_POW
:
488 return BRW_MATH_FUNCTION_POW
;
489 case SHADER_OPCODE_SIN
:
490 return BRW_MATH_FUNCTION_SIN
;
491 case SHADER_OPCODE_COS
:
492 return BRW_MATH_FUNCTION_COS
;
493 case SHADER_OPCODE_INT_QUOTIENT
:
494 return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT
;
495 case SHADER_OPCODE_INT_REMAINDER
:
496 return BRW_MATH_FUNCTION_INT_DIV_REMAINDER
;
498 unreachable("not reached: unknown math function");
503 brw_texture_offset(int *offsets
, unsigned num_components
)
505 if (!offsets
) return 0; /* nonconstant offset; caller will handle it. */
507 /* Combine all three offsets into a single unsigned dword:
509 * bits 11:8 - U Offset (X component)
510 * bits 7:4 - V Offset (Y component)
511 * bits 3:0 - R Offset (Z component)
513 unsigned offset_bits
= 0;
514 for (unsigned i
= 0; i
< num_components
; i
++) {
515 const unsigned shift
= 4 * (2 - i
);
516 offset_bits
|= (offsets
[i
] << shift
) & (0xF << shift
);
522 brw_instruction_name(enum opcode op
)
525 case BRW_OPCODE_MOV
... BRW_OPCODE_NOP
:
526 assert(opcode_descs
[op
].name
);
527 return opcode_descs
[op
].name
;
528 case FS_OPCODE_FB_WRITE
:
530 case FS_OPCODE_BLORP_FB_WRITE
:
531 return "blorp_fb_write";
532 case FS_OPCODE_REP_FB_WRITE
:
533 return "rep_fb_write";
535 case SHADER_OPCODE_RCP
:
537 case SHADER_OPCODE_RSQ
:
539 case SHADER_OPCODE_SQRT
:
541 case SHADER_OPCODE_EXP2
:
543 case SHADER_OPCODE_LOG2
:
545 case SHADER_OPCODE_POW
:
547 case SHADER_OPCODE_INT_QUOTIENT
:
549 case SHADER_OPCODE_INT_REMAINDER
:
551 case SHADER_OPCODE_SIN
:
553 case SHADER_OPCODE_COS
:
556 case SHADER_OPCODE_TEX
:
558 case SHADER_OPCODE_TXD
:
560 case SHADER_OPCODE_TXF
:
562 case SHADER_OPCODE_TXL
:
564 case SHADER_OPCODE_TXS
:
568 case SHADER_OPCODE_TXF_CMS
:
570 case SHADER_OPCODE_TXF_UMS
:
572 case SHADER_OPCODE_TXF_MCS
:
574 case SHADER_OPCODE_LOD
:
576 case SHADER_OPCODE_TG4
:
578 case SHADER_OPCODE_TG4_OFFSET
:
580 case SHADER_OPCODE_SHADER_TIME_ADD
:
581 return "shader_time_add";
583 case SHADER_OPCODE_UNTYPED_ATOMIC
:
584 return "untyped_atomic";
585 case SHADER_OPCODE_UNTYPED_SURFACE_READ
:
586 return "untyped_surface_read";
587 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
588 return "untyped_surface_write";
589 case SHADER_OPCODE_TYPED_ATOMIC
:
590 return "typed_atomic";
591 case SHADER_OPCODE_TYPED_SURFACE_READ
:
592 return "typed_surface_read";
593 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
594 return "typed_surface_write";
595 case SHADER_OPCODE_MEMORY_FENCE
:
596 return "memory_fence";
598 case SHADER_OPCODE_LOAD_PAYLOAD
:
599 return "load_payload";
601 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
602 return "gen4_scratch_read";
603 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
604 return "gen4_scratch_write";
605 case SHADER_OPCODE_GEN7_SCRATCH_READ
:
606 return "gen7_scratch_read";
607 case SHADER_OPCODE_URB_WRITE_SIMD8
:
608 return "gen8_urb_write_simd8";
610 case SHADER_OPCODE_FIND_LIVE_CHANNEL
:
611 return "find_live_channel";
612 case SHADER_OPCODE_BROADCAST
:
615 case VEC4_OPCODE_MOV_BYTES
:
617 case VEC4_OPCODE_PACK_BYTES
:
619 case VEC4_OPCODE_UNPACK_UNIFORM
:
620 return "unpack_uniform";
622 case FS_OPCODE_DDX_COARSE
:
624 case FS_OPCODE_DDX_FINE
:
626 case FS_OPCODE_DDY_COARSE
:
628 case FS_OPCODE_DDY_FINE
:
631 case FS_OPCODE_CINTERP
:
633 case FS_OPCODE_LINTERP
:
636 case FS_OPCODE_PIXEL_X
:
638 case FS_OPCODE_PIXEL_Y
:
641 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
:
642 return "uniform_pull_const";
643 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7
:
644 return "uniform_pull_const_gen7";
645 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD
:
646 return "varying_pull_const";
647 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7
:
648 return "varying_pull_const_gen7";
650 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS
:
651 return "mov_dispatch_to_flags";
652 case FS_OPCODE_DISCARD_JUMP
:
653 return "discard_jump";
655 case FS_OPCODE_SET_OMASK
:
657 case FS_OPCODE_SET_SAMPLE_ID
:
658 return "set_sample_id";
659 case FS_OPCODE_SET_SIMD4X2_OFFSET
:
660 return "set_simd4x2_offset";
662 case FS_OPCODE_PACK_HALF_2x16_SPLIT
:
663 return "pack_half_2x16_split";
664 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
:
665 return "unpack_half_2x16_split_x";
666 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
:
667 return "unpack_half_2x16_split_y";
669 case FS_OPCODE_PLACEHOLDER_HALT
:
670 return "placeholder_halt";
672 case FS_OPCODE_INTERPOLATE_AT_CENTROID
:
673 return "interp_centroid";
674 case FS_OPCODE_INTERPOLATE_AT_SAMPLE
:
675 return "interp_sample";
676 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
:
677 return "interp_shared_offset";
678 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
:
679 return "interp_per_slot_offset";
681 case VS_OPCODE_URB_WRITE
:
682 return "vs_urb_write";
683 case VS_OPCODE_PULL_CONSTANT_LOAD
:
684 return "pull_constant_load";
685 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7
:
686 return "pull_constant_load_gen7";
688 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9
:
689 return "set_simd4x2_header_gen9";
691 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2
:
692 return "unpack_flags_simd4x2";
694 case GS_OPCODE_URB_WRITE
:
695 return "gs_urb_write";
696 case GS_OPCODE_URB_WRITE_ALLOCATE
:
697 return "gs_urb_write_allocate";
698 case GS_OPCODE_THREAD_END
:
699 return "gs_thread_end";
700 case GS_OPCODE_SET_WRITE_OFFSET
:
701 return "set_write_offset";
702 case GS_OPCODE_SET_VERTEX_COUNT
:
703 return "set_vertex_count";
704 case GS_OPCODE_SET_DWORD_2
:
705 return "set_dword_2";
706 case GS_OPCODE_PREPARE_CHANNEL_MASKS
:
707 return "prepare_channel_masks";
708 case GS_OPCODE_SET_CHANNEL_MASKS
:
709 return "set_channel_masks";
710 case GS_OPCODE_GET_INSTANCE_ID
:
711 return "get_instance_id";
712 case GS_OPCODE_FF_SYNC
:
714 case GS_OPCODE_SET_PRIMITIVE_ID
:
715 return "set_primitive_id";
716 case GS_OPCODE_SVB_WRITE
:
717 return "gs_svb_write";
718 case GS_OPCODE_SVB_SET_DST_INDEX
:
719 return "gs_svb_set_dst_index";
720 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES
:
721 return "gs_ff_sync_set_primitives";
722 case CS_OPCODE_CS_TERMINATE
:
723 return "cs_terminate";
724 case SHADER_OPCODE_BARRIER
:
728 unreachable("not reached");
732 brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
738 } imm
= { reg
->dw1
.ud
}, sat_imm
= { 0 };
741 case BRW_REGISTER_TYPE_UD
:
742 case BRW_REGISTER_TYPE_D
:
743 case BRW_REGISTER_TYPE_UQ
:
744 case BRW_REGISTER_TYPE_Q
:
747 case BRW_REGISTER_TYPE_UW
:
748 sat_imm
.ud
= CLAMP(imm
.ud
, 0, USHRT_MAX
);
750 case BRW_REGISTER_TYPE_W
:
751 sat_imm
.d
= CLAMP(imm
.d
, SHRT_MIN
, SHRT_MAX
);
753 case BRW_REGISTER_TYPE_F
:
754 sat_imm
.f
= CLAMP(imm
.f
, 0.0f
, 1.0f
);
756 case BRW_REGISTER_TYPE_UB
:
757 case BRW_REGISTER_TYPE_B
:
758 unreachable("no UB/B immediates");
759 case BRW_REGISTER_TYPE_V
:
760 case BRW_REGISTER_TYPE_UV
:
761 case BRW_REGISTER_TYPE_VF
:
762 unreachable("unimplemented: saturate vector immediate");
763 case BRW_REGISTER_TYPE_DF
:
764 case BRW_REGISTER_TYPE_HF
:
765 unreachable("unimplemented: saturate DF/HF immediate");
768 if (imm
.ud
!= sat_imm
.ud
) {
769 reg
->dw1
.ud
= sat_imm
.ud
;
776 brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
779 case BRW_REGISTER_TYPE_D
:
780 case BRW_REGISTER_TYPE_UD
:
781 reg
->dw1
.d
= -reg
->dw1
.d
;
783 case BRW_REGISTER_TYPE_W
:
784 case BRW_REGISTER_TYPE_UW
:
785 reg
->dw1
.d
= -(int16_t)reg
->dw1
.ud
;
787 case BRW_REGISTER_TYPE_F
:
788 reg
->dw1
.f
= -reg
->dw1
.f
;
790 case BRW_REGISTER_TYPE_VF
:
791 reg
->dw1
.ud
^= 0x80808080;
793 case BRW_REGISTER_TYPE_UB
:
794 case BRW_REGISTER_TYPE_B
:
795 unreachable("no UB/B immediates");
796 case BRW_REGISTER_TYPE_UV
:
797 case BRW_REGISTER_TYPE_V
:
798 assert(!"unimplemented: negate UV/V immediate");
799 case BRW_REGISTER_TYPE_UQ
:
800 case BRW_REGISTER_TYPE_Q
:
801 assert(!"unimplemented: negate UQ/Q immediate");
802 case BRW_REGISTER_TYPE_DF
:
803 case BRW_REGISTER_TYPE_HF
:
804 assert(!"unimplemented: negate DF/HF immediate");
811 brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
)
814 case BRW_REGISTER_TYPE_D
:
815 reg
->dw1
.d
= abs(reg
->dw1
.d
);
817 case BRW_REGISTER_TYPE_W
:
818 reg
->dw1
.d
= abs((int16_t)reg
->dw1
.ud
);
820 case BRW_REGISTER_TYPE_F
:
821 reg
->dw1
.f
= fabsf(reg
->dw1
.f
);
823 case BRW_REGISTER_TYPE_VF
:
824 reg
->dw1
.ud
&= ~0x80808080;
826 case BRW_REGISTER_TYPE_UB
:
827 case BRW_REGISTER_TYPE_B
:
828 unreachable("no UB/B immediates");
829 case BRW_REGISTER_TYPE_UQ
:
830 case BRW_REGISTER_TYPE_UD
:
831 case BRW_REGISTER_TYPE_UW
:
832 case BRW_REGISTER_TYPE_UV
:
833 /* Presumably the absolute value modifier on an unsigned source is a
834 * nop, but it would be nice to confirm.
836 assert(!"unimplemented: abs unsigned immediate");
837 case BRW_REGISTER_TYPE_V
:
838 assert(!"unimplemented: abs V immediate");
839 case BRW_REGISTER_TYPE_Q
:
840 assert(!"unimplemented: abs Q immediate");
841 case BRW_REGISTER_TYPE_DF
:
842 case BRW_REGISTER_TYPE_HF
:
843 assert(!"unimplemented: abs DF/HF immediate");
849 backend_shader::backend_shader(const struct brw_compiler
*compiler
,
852 struct gl_shader_program
*shader_prog
,
853 struct gl_program
*prog
,
854 struct brw_stage_prog_data
*stage_prog_data
,
855 gl_shader_stage stage
)
856 : compiler(compiler
),
858 devinfo(compiler
->devinfo
),
860 (struct brw_shader
*)shader_prog
->_LinkedShaders
[stage
] : NULL
),
861 shader_prog(shader_prog
),
863 stage_prog_data(stage_prog_data
),
868 debug_enabled
= INTEL_DEBUG
& intel_debug_flag_for_shader_stage(stage
);
869 stage_name
= _mesa_shader_stage_to_string(stage
);
870 stage_abbrev
= _mesa_shader_stage_to_abbrev(stage
);
874 backend_reg::is_zero() const
879 return fixed_hw_reg
.dw1
.d
== 0;
883 backend_reg::is_one() const
888 return type
== BRW_REGISTER_TYPE_F
889 ? fixed_hw_reg
.dw1
.f
== 1.0
890 : fixed_hw_reg
.dw1
.d
== 1;
894 backend_reg::is_negative_one() const
900 case BRW_REGISTER_TYPE_F
:
901 return fixed_hw_reg
.dw1
.f
== -1.0;
902 case BRW_REGISTER_TYPE_D
:
903 return fixed_hw_reg
.dw1
.d
== -1;
910 backend_reg::is_null() const
912 return file
== HW_REG
&&
913 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
914 fixed_hw_reg
.nr
== BRW_ARF_NULL
;
919 backend_reg::is_accumulator() const
921 return file
== HW_REG
&&
922 fixed_hw_reg
.file
== BRW_ARCHITECTURE_REGISTER_FILE
&&
923 fixed_hw_reg
.nr
== BRW_ARF_ACCUMULATOR
;
927 backend_reg::in_range(const backend_reg
&r
, unsigned n
) const
929 return (file
== r
.file
&&
931 reg_offset
>= r
.reg_offset
&&
932 reg_offset
< r
.reg_offset
+ n
);
936 backend_instruction::is_commutative() const
946 /* MIN and MAX are commutative. */
947 if (conditional_mod
== BRW_CONDITIONAL_GE
||
948 conditional_mod
== BRW_CONDITIONAL_L
) {
958 backend_instruction::is_3src() const
960 return opcode
< ARRAY_SIZE(opcode_descs
) && opcode_descs
[opcode
].nsrc
== 3;
964 backend_instruction::is_tex() const
966 return (opcode
== SHADER_OPCODE_TEX
||
967 opcode
== FS_OPCODE_TXB
||
968 opcode
== SHADER_OPCODE_TXD
||
969 opcode
== SHADER_OPCODE_TXF
||
970 opcode
== SHADER_OPCODE_TXF_CMS
||
971 opcode
== SHADER_OPCODE_TXF_UMS
||
972 opcode
== SHADER_OPCODE_TXF_MCS
||
973 opcode
== SHADER_OPCODE_TXL
||
974 opcode
== SHADER_OPCODE_TXS
||
975 opcode
== SHADER_OPCODE_LOD
||
976 opcode
== SHADER_OPCODE_TG4
||
977 opcode
== SHADER_OPCODE_TG4_OFFSET
);
981 backend_instruction::is_math() const
983 return (opcode
== SHADER_OPCODE_RCP
||
984 opcode
== SHADER_OPCODE_RSQ
||
985 opcode
== SHADER_OPCODE_SQRT
||
986 opcode
== SHADER_OPCODE_EXP2
||
987 opcode
== SHADER_OPCODE_LOG2
||
988 opcode
== SHADER_OPCODE_SIN
||
989 opcode
== SHADER_OPCODE_COS
||
990 opcode
== SHADER_OPCODE_INT_QUOTIENT
||
991 opcode
== SHADER_OPCODE_INT_REMAINDER
||
992 opcode
== SHADER_OPCODE_POW
);
996 backend_instruction::is_control_flow() const
1000 case BRW_OPCODE_WHILE
:
1002 case BRW_OPCODE_ELSE
:
1003 case BRW_OPCODE_ENDIF
:
1004 case BRW_OPCODE_BREAK
:
1005 case BRW_OPCODE_CONTINUE
:
1013 backend_instruction::can_do_source_mods() const
1016 case BRW_OPCODE_ADDC
:
1017 case BRW_OPCODE_BFE
:
1018 case BRW_OPCODE_BFI1
:
1019 case BRW_OPCODE_BFI2
:
1020 case BRW_OPCODE_BFREV
:
1021 case BRW_OPCODE_CBIT
:
1022 case BRW_OPCODE_FBH
:
1023 case BRW_OPCODE_FBL
:
1024 case BRW_OPCODE_SUBB
:
1032 backend_instruction::can_do_saturate() const
1035 case BRW_OPCODE_ADD
:
1036 case BRW_OPCODE_ASR
:
1037 case BRW_OPCODE_AVG
:
1038 case BRW_OPCODE_DP2
:
1039 case BRW_OPCODE_DP3
:
1040 case BRW_OPCODE_DP4
:
1041 case BRW_OPCODE_DPH
:
1042 case BRW_OPCODE_F16TO32
:
1043 case BRW_OPCODE_F32TO16
:
1044 case BRW_OPCODE_LINE
:
1045 case BRW_OPCODE_LRP
:
1046 case BRW_OPCODE_MAC
:
1047 case BRW_OPCODE_MAD
:
1048 case BRW_OPCODE_MATH
:
1049 case BRW_OPCODE_MOV
:
1050 case BRW_OPCODE_MUL
:
1051 case BRW_OPCODE_PLN
:
1052 case BRW_OPCODE_RNDD
:
1053 case BRW_OPCODE_RNDE
:
1054 case BRW_OPCODE_RNDU
:
1055 case BRW_OPCODE_RNDZ
:
1056 case BRW_OPCODE_SEL
:
1057 case BRW_OPCODE_SHL
:
1058 case BRW_OPCODE_SHR
:
1059 case FS_OPCODE_LINTERP
:
1060 case SHADER_OPCODE_COS
:
1061 case SHADER_OPCODE_EXP2
:
1062 case SHADER_OPCODE_LOG2
:
1063 case SHADER_OPCODE_POW
:
1064 case SHADER_OPCODE_RCP
:
1065 case SHADER_OPCODE_RSQ
:
1066 case SHADER_OPCODE_SIN
:
1067 case SHADER_OPCODE_SQRT
:
1075 backend_instruction::can_do_cmod() const
1078 case BRW_OPCODE_ADD
:
1079 case BRW_OPCODE_ADDC
:
1080 case BRW_OPCODE_AND
:
1081 case BRW_OPCODE_ASR
:
1082 case BRW_OPCODE_AVG
:
1083 case BRW_OPCODE_CMP
:
1084 case BRW_OPCODE_CMPN
:
1085 case BRW_OPCODE_DP2
:
1086 case BRW_OPCODE_DP3
:
1087 case BRW_OPCODE_DP4
:
1088 case BRW_OPCODE_DPH
:
1089 case BRW_OPCODE_F16TO32
:
1090 case BRW_OPCODE_F32TO16
:
1091 case BRW_OPCODE_FRC
:
1092 case BRW_OPCODE_LINE
:
1093 case BRW_OPCODE_LRP
:
1094 case BRW_OPCODE_LZD
:
1095 case BRW_OPCODE_MAC
:
1096 case BRW_OPCODE_MACH
:
1097 case BRW_OPCODE_MAD
:
1098 case BRW_OPCODE_MOV
:
1099 case BRW_OPCODE_MUL
:
1100 case BRW_OPCODE_NOT
:
1102 case BRW_OPCODE_PLN
:
1103 case BRW_OPCODE_RNDD
:
1104 case BRW_OPCODE_RNDE
:
1105 case BRW_OPCODE_RNDU
:
1106 case BRW_OPCODE_RNDZ
:
1107 case BRW_OPCODE_SAD2
:
1108 case BRW_OPCODE_SADA2
:
1109 case BRW_OPCODE_SHL
:
1110 case BRW_OPCODE_SHR
:
1111 case BRW_OPCODE_SUBB
:
1112 case BRW_OPCODE_XOR
:
1113 case FS_OPCODE_CINTERP
:
1114 case FS_OPCODE_LINTERP
:
1122 backend_instruction::reads_accumulator_implicitly() const
1125 case BRW_OPCODE_MAC
:
1126 case BRW_OPCODE_MACH
:
1127 case BRW_OPCODE_SADA2
:
1135 backend_instruction::writes_accumulator_implicitly(const struct brw_device_info
*devinfo
) const
1137 return writes_accumulator
||
1138 (devinfo
->gen
< 6 &&
1139 ((opcode
>= BRW_OPCODE_ADD
&& opcode
< BRW_OPCODE_NOP
) ||
1140 (opcode
>= FS_OPCODE_DDX_COARSE
&& opcode
<= FS_OPCODE_LINTERP
&&
1141 opcode
!= FS_OPCODE_CINTERP
)));
1145 backend_instruction::has_side_effects() const
1148 case SHADER_OPCODE_UNTYPED_ATOMIC
:
1149 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
1150 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE
:
1151 case SHADER_OPCODE_TYPED_ATOMIC
:
1152 case SHADER_OPCODE_TYPED_SURFACE_WRITE
:
1153 case SHADER_OPCODE_MEMORY_FENCE
:
1154 case SHADER_OPCODE_URB_WRITE_SIMD8
:
1155 case FS_OPCODE_FB_WRITE
:
1156 case SHADER_OPCODE_BARRIER
:
1165 inst_is_in_block(const bblock_t
*block
, const backend_instruction
*inst
)
1168 foreach_inst_in_block (backend_instruction
, i
, block
) {
1178 adjust_later_block_ips(bblock_t
*start_block
, int ip_adjustment
)
1180 for (bblock_t
*block_iter
= start_block
->next();
1181 !block_iter
->link
.is_tail_sentinel();
1182 block_iter
= block_iter
->next()) {
1183 block_iter
->start_ip
+= ip_adjustment
;
1184 block_iter
->end_ip
+= ip_adjustment
;
1189 backend_instruction::insert_after(bblock_t
*block
, backend_instruction
*inst
)
1191 if (!this->is_head_sentinel())
1192 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1196 adjust_later_block_ips(block
, 1);
1198 exec_node::insert_after(inst
);
1202 backend_instruction::insert_before(bblock_t
*block
, backend_instruction
*inst
)
1204 if (!this->is_tail_sentinel())
1205 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1209 adjust_later_block_ips(block
, 1);
1211 exec_node::insert_before(inst
);
1215 backend_instruction::insert_before(bblock_t
*block
, exec_list
*list
)
1217 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1219 unsigned num_inst
= list
->length();
1221 block
->end_ip
+= num_inst
;
1223 adjust_later_block_ips(block
, num_inst
);
1225 exec_node::insert_before(list
);
1229 backend_instruction::remove(bblock_t
*block
)
1231 assert(inst_is_in_block(block
, this) || !"Instruction not in block");
1233 adjust_later_block_ips(block
, -1);
1235 if (block
->start_ip
== block
->end_ip
) {
1236 block
->cfg
->remove_block(block
);
1241 exec_node::remove();
1245 backend_shader::dump_instructions()
1247 dump_instructions(NULL
);
1251 backend_shader::dump_instructions(const char *name
)
1253 FILE *file
= stderr
;
1254 if (name
&& geteuid() != 0) {
1255 file
= fopen(name
, "w");
1262 foreach_block_and_inst(block
, backend_instruction
, inst
, cfg
) {
1263 fprintf(file
, "%4d: ", ip
++);
1264 dump_instruction(inst
, file
);
1268 foreach_in_list(backend_instruction
, inst
, &instructions
) {
1269 fprintf(file
, "%4d: ", ip
++);
1270 dump_instruction(inst
, file
);
1274 if (file
!= stderr
) {
1280 backend_shader::calculate_cfg()
1284 cfg
= new(mem_ctx
) cfg_t(&this->instructions
);
1288 backend_shader::invalidate_cfg()
1290 ralloc_free(this->cfg
);
1295 * Sets up the starting offsets for the groups of binding table entries
1296 * commong to all pipeline stages.
1298 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
1299 * unused but also make sure that addition of small offsets to them will
1300 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
1303 backend_shader::assign_common_binding_table_offsets(uint32_t next_binding_table_offset
)
1305 int num_textures
= _mesa_fls(prog
->SamplersUsed
);
1307 stage_prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
1308 next_binding_table_offset
+= num_textures
;
1311 stage_prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
1312 next_binding_table_offset
+= shader
->base
.NumUniformBlocks
;
1314 stage_prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
1317 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
1318 stage_prog_data
->binding_table
.shader_time_start
= next_binding_table_offset
;
1319 next_binding_table_offset
++;
1321 stage_prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
1324 if (prog
->UsesGather
) {
1325 if (devinfo
->gen
>= 8) {
1326 stage_prog_data
->binding_table
.gather_texture_start
=
1327 stage_prog_data
->binding_table
.texture_start
;
1329 stage_prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
1330 next_binding_table_offset
+= num_textures
;
1333 stage_prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
1336 if (shader_prog
&& shader_prog
->NumAtomicBuffers
) {
1337 stage_prog_data
->binding_table
.abo_start
= next_binding_table_offset
;
1338 next_binding_table_offset
+= shader_prog
->NumAtomicBuffers
;
1340 stage_prog_data
->binding_table
.abo_start
= 0xd0d0d0d0;
1343 if (shader
&& shader
->base
.NumImages
) {
1344 stage_prog_data
->binding_table
.image_start
= next_binding_table_offset
;
1345 next_binding_table_offset
+= shader
->base
.NumImages
;
1347 stage_prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
1350 /* This may or may not be used depending on how the compile goes. */
1351 stage_prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
1352 next_binding_table_offset
++;
1354 assert(next_binding_table_offset
<= BRW_MAX_SURFACES
);
1356 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */