2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "brw_shader.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "glsl/nir/nir_builder.h"
28 #include "program/prog_to_nir.h"
31 is_input(nir_intrinsic_instr
*intrin
)
33 return intrin
->intrinsic
== nir_intrinsic_load_input
||
34 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
;
38 is_output(nir_intrinsic_instr
*intrin
)
40 return intrin
->intrinsic
== nir_intrinsic_load_output
||
41 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
42 intrin
->intrinsic
== nir_intrinsic_store_output
||
43 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
47 * In many cases, we just add the base and offset together, so there's no
48 * reason to keep them separate. Sometimes, combining them is essential:
49 * if a shader only accesses part of a compound variable (such as a matrix
50 * or array), the variable's base may not actually exist in the VUE map.
52 * This pass adds constant offsets to instr->const_index[0], and resets
53 * the offset source to 0. Non-constant offsets remain unchanged - since
54 * we don't know what part of a compound variable is accessed, we allocate
55 * storage for the entire thing.
57 struct add_const_offset_to_base_params
{
59 nir_variable_mode mode
;
63 add_const_offset_to_base(nir_block
*block
, void *closure
)
65 struct add_const_offset_to_base_params
*params
= closure
;
66 nir_builder
*b
= ¶ms
->b
;
68 nir_foreach_instr_safe(block
, instr
) {
69 if (instr
->type
!= nir_instr_type_intrinsic
)
72 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
74 if ((params
->mode
== nir_var_shader_in
&& is_input(intrin
)) ||
75 (params
->mode
== nir_var_shader_out
&& is_output(intrin
))) {
76 nir_src
*offset
= nir_get_io_offset_src(intrin
);
77 nir_const_value
*const_offset
= nir_src_as_const_value(*offset
);
80 intrin
->const_index
[0] += const_offset
->u
[0];
81 b
->cursor
= nir_before_instr(&intrin
->instr
);
82 nir_instr_rewrite_src(&intrin
->instr
, offset
,
83 nir_src_for_ssa(nir_imm_int(b
, 0)));
92 remap_vs_attrs(nir_block
*block
, void *closure
)
94 GLbitfield64 inputs_read
= *((GLbitfield64
*) closure
);
96 nir_foreach_instr(block
, instr
) {
97 if (instr
->type
!= nir_instr_type_intrinsic
)
100 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
102 if (intrin
->intrinsic
== nir_intrinsic_load_input
) {
103 /* Attributes come in a contiguous block, ordered by their
104 * gl_vert_attrib value. That means we can compute the slot
105 * number for an attribute by masking out the enabled attributes
106 * before it and counting the bits.
108 int attr
= intrin
->const_index
[0];
109 int slot
= _mesa_bitcount_64(inputs_read
& BITFIELD64_MASK(attr
));
111 intrin
->const_index
[0] = 4 * slot
;
118 remap_inputs_with_vue_map(nir_block
*block
, void *closure
)
120 const struct brw_vue_map
*vue_map
= closure
;
122 nir_foreach_instr(block
, instr
) {
123 if (instr
->type
!= nir_instr_type_intrinsic
)
126 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
128 if (intrin
->intrinsic
== nir_intrinsic_load_input
||
129 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
) {
130 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
131 assert(vue_slot
!= -1);
132 intrin
->const_index
[0] = vue_slot
;
138 struct remap_patch_urb_offsets_state
{
140 struct brw_vue_map vue_map
;
144 remap_patch_urb_offsets(nir_block
*block
, void *closure
)
146 struct remap_patch_urb_offsets_state
*state
= closure
;
148 nir_foreach_instr_safe(block
, instr
) {
149 if (instr
->type
!= nir_instr_type_intrinsic
)
152 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
154 gl_shader_stage stage
= state
->b
.shader
->stage
;
156 if ((stage
== MESA_SHADER_TESS_CTRL
&& is_output(intrin
)) ||
157 (stage
== MESA_SHADER_TESS_EVAL
&& is_input(intrin
))) {
158 int vue_slot
= state
->vue_map
.varying_to_slot
[intrin
->const_index
[0]];
159 assert(vue_slot
!= -1);
160 intrin
->const_index
[0] = vue_slot
;
162 nir_src
*vertex
= nir_get_io_vertex_index_src(intrin
);
164 nir_const_value
*const_vertex
= nir_src_as_const_value(*vertex
);
166 intrin
->const_index
[0] += const_vertex
->u
[0] *
167 state
->vue_map
.num_per_vertex_slots
;
169 state
->b
.cursor
= nir_before_instr(&intrin
->instr
);
171 /* Multiply by the number of per-vertex slots. */
172 nir_ssa_def
*vertex_offset
=
174 nir_ssa_for_src(&state
->b
, *vertex
, 1),
175 nir_imm_int(&state
->b
,
176 state
->vue_map
.num_per_vertex_slots
));
178 /* Add it to the existing offset */
179 nir_src
*offset
= nir_get_io_offset_src(intrin
);
180 nir_ssa_def
*total_offset
=
181 nir_iadd(&state
->b
, vertex_offset
,
182 nir_ssa_for_src(&state
->b
, *offset
, 1));
184 nir_instr_rewrite_src(&intrin
->instr
, offset
,
185 nir_src_for_ssa(total_offset
));
194 brw_nir_lower_inputs(nir_shader
*nir
,
195 const struct brw_device_info
*devinfo
,
198 struct add_const_offset_to_base_params params
= {
199 .mode
= nir_var_shader_in
202 switch (nir
->stage
) {
203 case MESA_SHADER_VERTEX
:
204 /* Start with the location of the variable's base. */
205 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
206 var
->data
.driver_location
= var
->data
.location
;
209 /* Now use nir_lower_io to walk dereference chains. Attribute arrays
210 * are loaded as one vec4 per element (or matrix column), so we use
211 * type_size_vec4 here.
213 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
);
216 /* Finally, translate VERT_ATTRIB_* values into the actual registers.
218 * Note that we can use nir->info.inputs_read instead of
219 * key->inputs_read since the two are identical aside from Gen4-5
220 * edge flag differences.
222 GLbitfield64 inputs_read
= nir
->info
.inputs_read
;
224 /* This pass needs actual constants */
225 nir_opt_constant_folding(nir
);
227 nir_foreach_overload(nir
, overload
) {
228 if (overload
->impl
) {
229 nir_builder_init(¶ms
.b
, overload
->impl
);
230 nir_foreach_block(overload
->impl
, add_const_offset_to_base
, ¶ms
);
231 nir_foreach_block(overload
->impl
, remap_vs_attrs
, &inputs_read
);
236 case MESA_SHADER_TESS_CTRL
:
237 case MESA_SHADER_GEOMETRY
: {
238 if (!is_scalar
&& nir
->stage
== MESA_SHADER_GEOMETRY
) {
239 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
240 var
->data
.driver_location
= var
->data
.location
;
243 /* The GLSL linker will have already matched up GS inputs and
244 * the outputs of prior stages. The driver does extend VS outputs
245 * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
246 * neither of which offer geometry shader support. So we can
247 * safely ignore that.
249 * For SSO pipelines, we use a fixed VUE map layout based on variable
250 * locations, so we can rely on rendezvous-by-location to make this
253 * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
254 * written by previous stages and shows up via payload magic.
256 struct brw_vue_map input_vue_map
;
257 GLbitfield64 inputs_read
=
258 nir
->info
.inputs_read
& ~VARYING_BIT_PRIMITIVE_ID
;
259 brw_compute_vue_map(devinfo
, &input_vue_map
, inputs_read
,
260 nir
->info
.separate_shader
||
261 nir
->stage
== MESA_SHADER_TESS_CTRL
);
263 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
264 var
->data
.driver_location
= var
->data
.location
;
267 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
268 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
);
270 /* This pass needs actual constants */
271 nir_opt_constant_folding(nir
);
273 nir_foreach_overload(nir
, overload
) {
274 if (overload
->impl
) {
275 nir_builder_init(¶ms
.b
, overload
->impl
);
276 nir_foreach_block(overload
->impl
, add_const_offset_to_base
, ¶ms
);
277 nir_foreach_block(overload
->impl
, remap_inputs_with_vue_map
,
284 case MESA_SHADER_TESS_EVAL
: {
285 struct remap_patch_urb_offsets_state state
;
286 brw_compute_tess_vue_map(&state
.vue_map
,
287 nir
->info
.inputs_read
& ~VARYING_BIT_PRIMITIVE_ID
,
288 nir
->info
.patch_inputs_read
);
290 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
291 var
->data
.driver_location
= var
->data
.location
;
294 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
);
296 /* This pass needs actual constants */
297 nir_opt_constant_folding(nir
);
299 nir_foreach_overload(nir
, overload
) {
300 if (overload
->impl
) {
301 nir_builder_init(¶ms
.b
, overload
->impl
);
302 nir_foreach_block(overload
->impl
, add_const_offset_to_base
, ¶ms
);
303 nir_builder_init(&state
.b
, overload
->impl
);
304 nir_foreach_block(overload
->impl
, remap_patch_urb_offsets
, &state
);
309 case MESA_SHADER_FRAGMENT
:
311 nir_assign_var_locations(&nir
->inputs
, &nir
->num_inputs
,
314 case MESA_SHADER_COMPUTE
:
315 /* Compute shaders have no inputs. */
316 assert(exec_list_is_empty(&nir
->inputs
));
319 unreachable("unsupported shader stage");
324 brw_nir_lower_outputs(nir_shader
*nir
,
325 const struct brw_device_info
*devinfo
,
328 switch (nir
->stage
) {
329 case MESA_SHADER_VERTEX
:
330 case MESA_SHADER_TESS_EVAL
:
331 case MESA_SHADER_GEOMETRY
:
333 nir_assign_var_locations(&nir
->outputs
, &nir
->num_outputs
,
334 type_size_vec4_times_4
);
335 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4_times_4
);
337 nir_foreach_variable(var
, &nir
->outputs
)
338 var
->data
.driver_location
= var
->data
.location
;
341 case MESA_SHADER_TESS_CTRL
: {
342 struct add_const_offset_to_base_params params
= {
343 .mode
= nir_var_shader_out
346 struct remap_patch_urb_offsets_state state
;
347 brw_compute_tess_vue_map(&state
.vue_map
, nir
->info
.outputs_written
,
348 nir
->info
.patch_outputs_written
);
350 nir_foreach_variable(var
, &nir
->outputs
) {
351 var
->data
.driver_location
= var
->data
.location
;
354 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
);
356 /* This pass needs actual constants */
357 nir_opt_constant_folding(nir
);
359 nir_foreach_overload(nir
, overload
) {
360 if (overload
->impl
) {
361 nir_builder_init(¶ms
.b
, overload
->impl
);
362 nir_foreach_block(overload
->impl
, add_const_offset_to_base
, ¶ms
);
363 nir_builder_init(&state
.b
, overload
->impl
);
364 nir_foreach_block(overload
->impl
, remap_patch_urb_offsets
, &state
);
369 case MESA_SHADER_FRAGMENT
:
370 nir_assign_var_locations(&nir
->outputs
, &nir
->num_outputs
,
373 case MESA_SHADER_COMPUTE
:
374 /* Compute shaders have no outputs. */
375 assert(exec_list_is_empty(&nir
->outputs
));
378 unreachable("unsupported shader stage");
383 type_size_scalar_bytes(const struct glsl_type
*type
)
385 return type_size_scalar(type
) * 4;
389 type_size_vec4_bytes(const struct glsl_type
*type
)
391 return type_size_vec4(type
) * 16;
395 brw_nir_lower_uniforms(nir_shader
*nir
, bool is_scalar
)
398 nir_assign_var_locations(&nir
->uniforms
, &nir
->num_uniforms
,
399 type_size_scalar_bytes
);
400 nir_lower_io(nir
, nir_var_uniform
, type_size_scalar_bytes
);
402 nir_assign_var_locations(&nir
->uniforms
, &nir
->num_uniforms
,
403 type_size_vec4_bytes
);
404 nir_lower_io(nir
, nir_var_uniform
, type_size_vec4_bytes
);
408 #include "util/debug.h"
413 static int should_clone
= -1;
414 if (should_clone
< 1)
415 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
420 #define _OPT(do_pass) (({ \
421 bool this_progress = true; \
423 nir_validate_shader(nir); \
424 if (should_clone_nir()) { \
425 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
432 #define OPT(pass, ...) _OPT( \
433 nir_metadata_set_validation_flag(nir); \
434 this_progress = pass(nir ,##__VA_ARGS__); \
435 if (this_progress) { \
437 nir_metadata_check_validation_flag(nir); \
441 #define OPT_V(pass, ...) _OPT( \
442 pass(nir, ##__VA_ARGS__); \
446 nir_optimize(nir_shader
*nir
, bool is_scalar
)
451 OPT_V(nir_lower_vars_to_ssa
);
454 OPT_V(nir_lower_alu_to_scalar
);
460 OPT_V(nir_lower_phis_to_scalar
);
466 OPT(nir_opt_peephole_select
);
467 OPT(nir_opt_algebraic
);
468 OPT(nir_opt_constant_folding
);
469 OPT(nir_opt_dead_cf
);
470 OPT(nir_opt_remove_phis
);
477 /* Does some simple lowering and runs the standard suite of optimizations
479 * This is intended to be called more-or-less directly after you get the
480 * shader out of GLSL or some other source. While it is geared towards i965,
481 * it is not at all generator-specific except for the is_scalar flag. Even
482 * there, it is safe to call with is_scalar = false for a shader that is
483 * intended for the FS backend as long as nir_optimize is called again with
484 * is_scalar = true to scalarize everything prior to code gen.
487 brw_preprocess_nir(nir_shader
*nir
, bool is_scalar
)
489 bool progress
; /* Written by OPT and OPT_V */
492 if (nir
->stage
== MESA_SHADER_GEOMETRY
)
493 OPT(nir_lower_gs_intrinsics
);
495 static const nir_lower_tex_options tex_options
= {
499 OPT(nir_lower_tex
, &tex_options
);
500 OPT(nir_normalize_cubemap_coords
);
502 OPT(nir_lower_global_vars_to_local
);
504 OPT(nir_split_var_copies
);
506 nir
= nir_optimize(nir
, is_scalar
);
508 /* Lower a bunch of stuff */
509 OPT_V(nir_lower_var_copies
);
511 /* Get rid of split copies */
512 nir
= nir_optimize(nir
, is_scalar
);
514 OPT(nir_remove_dead_variables
);
519 /** Lower input and output loads and stores for i965. */
521 brw_nir_lower_io(nir_shader
*nir
,
522 const struct brw_device_info
*devinfo
,
525 bool progress
; /* Written by OPT and OPT_V */
528 OPT_V(brw_nir_lower_inputs
, devinfo
, is_scalar
);
529 OPT_V(brw_nir_lower_outputs
, devinfo
, is_scalar
);
530 OPT_V(nir_lower_io
, nir_var_all
, is_scalar
? type_size_scalar
: type_size_vec4
);
532 return nir_optimize(nir
, is_scalar
);
535 /* Prepare the given shader for codegen
537 * This function is intended to be called right before going into the actual
538 * backend and is highly backend-specific. Also, once this function has been
539 * called on a shader, it will no longer be in SSA form so most optimizations
543 brw_postprocess_nir(nir_shader
*nir
,
544 const struct brw_device_info
*devinfo
,
548 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(nir
->stage
));
550 bool progress
; /* Written by OPT and OPT_V */
553 if (devinfo
->gen
>= 6) {
554 /* Try and fuse multiply-adds */
555 OPT(brw_nir_opt_peephole_ffma
);
558 OPT(nir_opt_algebraic_late
);
560 OPT(nir_lower_locals_to_regs
);
562 OPT_V(nir_lower_to_source_mods
);
566 if (unlikely(debug_enabled
)) {
567 /* Re-index SSA defs so we print more sensible numbers. */
568 nir_foreach_overload(nir
, overload
) {
570 nir_index_ssa_defs(overload
->impl
);
573 fprintf(stderr
, "NIR (SSA form) for %s shader:\n",
574 _mesa_shader_stage_to_string(nir
->stage
));
575 nir_print_shader(nir
, stderr
);
578 OPT_V(nir_convert_from_ssa
, true);
581 OPT_V(nir_move_vec_src_uses_to_dest
);
582 OPT(nir_lower_vec_to_movs
);
585 /* This is the last pass we run before we start emitting stuff. It
586 * determines when we need to insert boolean resolves on Gen <= 5. We
587 * run it last because it stashes data in instr->pass_flags and we don't
588 * want that to be squashed by other NIR passes.
590 if (devinfo
->gen
<= 5)
591 brw_nir_analyze_boolean_resolves(nir
);
595 if (unlikely(debug_enabled
)) {
596 fprintf(stderr
, "NIR (final form) for %s shader:\n",
597 _mesa_shader_stage_to_string(nir
->stage
));
598 nir_print_shader(nir
, stderr
);
605 brw_create_nir(struct brw_context
*brw
,
606 const struct gl_shader_program
*shader_prog
,
607 const struct gl_program
*prog
,
608 gl_shader_stage stage
,
611 struct gl_context
*ctx
= &brw
->ctx
;
612 const struct brw_device_info
*devinfo
= brw
->intelScreen
->devinfo
;
613 const nir_shader_compiler_options
*options
=
614 ctx
->Const
.ShaderCompilerOptions
[stage
].NirOptions
;
618 /* First, lower the GLSL IR or Mesa IR to NIR */
620 nir
= glsl_to_nir(shader_prog
, stage
, options
);
622 if (nir
->stage
== MESA_SHADER_TESS_EVAL
&&
623 shader_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
]) {
624 const struct gl_program
*tcs
=
625 shader_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
]->Program
;
626 /* Work around the TCS having bonus outputs used as shared memory
627 * segments, which makes OutputsWritten not match InputsRead
629 nir
->info
.inputs_read
= tcs
->OutputsWritten
;
630 nir
->info
.patch_inputs_read
= tcs
->PatchOutputsWritten
;
633 nir
= prog_to_nir(prog
, options
);
634 OPT_V(nir_convert_to_ssa
); /* turn registers into SSA */
636 nir_validate_shader(nir
);
640 nir
= brw_preprocess_nir(nir
, is_scalar
);
642 OPT(nir_lower_system_values
);
643 OPT_V(brw_nir_lower_uniforms
, is_scalar
);
646 OPT_V(nir_lower_samplers
, shader_prog
);
647 OPT_V(nir_lower_atomics
, shader_prog
);
650 if (nir
->stage
!= MESA_SHADER_TESS_CTRL
&&
651 nir
->stage
!= MESA_SHADER_TESS_EVAL
) {
652 nir
= brw_nir_lower_io(nir
, devinfo
, is_scalar
);
659 brw_nir_apply_sampler_key(nir_shader
*nir
,
660 const struct brw_device_info
*devinfo
,
661 const struct brw_sampler_prog_key_data
*key_tex
,
664 nir_lower_tex_options tex_options
= { 0 };
666 /* Iron Lake and prior require lowering of all rectangle textures */
667 if (devinfo
->gen
< 6)
668 tex_options
.lower_rect
= true;
670 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
671 if (devinfo
->gen
< 8) {
672 tex_options
.saturate_s
= key_tex
->gl_clamp_mask
[0];
673 tex_options
.saturate_t
= key_tex
->gl_clamp_mask
[1];
674 tex_options
.saturate_r
= key_tex
->gl_clamp_mask
[2];
677 /* Prior to Haswell, we have to fake texture swizzle */
678 for (unsigned s
= 0; s
< MAX_SAMPLERS
; s
++) {
679 if (key_tex
->swizzles
[s
] == SWIZZLE_NOOP
)
682 tex_options
.swizzle_result
|= (1 << s
);
683 for (unsigned c
= 0; c
< 4; c
++)
684 tex_options
.swizzles
[s
][c
] = GET_SWZ(key_tex
->swizzles
[s
], c
);
687 if (nir_lower_tex(nir
, &tex_options
)) {
688 nir_validate_shader(nir
);
689 nir
= nir_optimize(nir
, is_scalar
);
696 brw_type_for_nir_type(nir_alu_type type
)
700 return BRW_REGISTER_TYPE_UD
;
703 return BRW_REGISTER_TYPE_D
;
705 return BRW_REGISTER_TYPE_F
;
707 unreachable("unknown type");
710 return BRW_REGISTER_TYPE_F
;
713 /* Returns the glsl_base_type corresponding to a nir_alu_type.
714 * This is used by both brw_vec4_nir and brw_fs_nir.
717 brw_glsl_base_type_for_nir_type(nir_alu_type type
)
721 return GLSL_TYPE_FLOAT
;
724 return GLSL_TYPE_INT
;
727 return GLSL_TYPE_UINT
;
730 unreachable("bad type");