2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "brw_shader.h"
26 #include "compiler/glsl_types.h"
27 #include "compiler/nir/nir_builder.h"
30 is_input(nir_intrinsic_instr
*intrin
)
32 return intrin
->intrinsic
== nir_intrinsic_load_input
||
33 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
;
37 is_output(nir_intrinsic_instr
*intrin
)
39 return intrin
->intrinsic
== nir_intrinsic_load_output
||
40 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
41 intrin
->intrinsic
== nir_intrinsic_store_output
||
42 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
46 * In many cases, we just add the base and offset together, so there's no
47 * reason to keep them separate. Sometimes, combining them is essential:
48 * if a shader only accesses part of a compound variable (such as a matrix
49 * or array), the variable's base may not actually exist in the VUE map.
51 * This pass adds constant offsets to instr->const_index[0], and resets
52 * the offset source to 0. Non-constant offsets remain unchanged - since
53 * we don't know what part of a compound variable is accessed, we allocate
54 * storage for the entire thing.
58 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
59 nir_variable_mode mode
)
61 nir_foreach_instr_safe(instr
, block
) {
62 if (instr
->type
!= nir_instr_type_intrinsic
)
65 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
67 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
68 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
69 nir_src
*offset
= nir_get_io_offset_src(intrin
);
70 nir_const_value
*const_offset
= nir_src_as_const_value(*offset
);
73 intrin
->const_index
[0] += const_offset
->u32
[0];
74 b
->cursor
= nir_before_instr(&intrin
->instr
);
75 nir_instr_rewrite_src(&intrin
->instr
, offset
,
76 nir_src_for_ssa(nir_imm_int(b
, 0)));
84 add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
86 nir_foreach_function(f
, nir
) {
89 nir_builder_init(&b
, f
->impl
);
90 nir_foreach_block(block
, f
->impl
) {
91 add_const_offset_to_base_block(block
, &b
, mode
);
98 remap_vs_attrs(nir_block
*block
, struct nir_shader_info
*nir_info
)
100 nir_foreach_instr(instr
, block
) {
101 if (instr
->type
!= nir_instr_type_intrinsic
)
104 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
106 if (intrin
->intrinsic
== nir_intrinsic_load_input
) {
107 /* Attributes come in a contiguous block, ordered by their
108 * gl_vert_attrib value. That means we can compute the slot
109 * number for an attribute by masking out the enabled attributes
110 * before it and counting the bits.
112 int attr
= intrin
->const_index
[0];
113 int slot
= _mesa_bitcount_64(nir_info
->inputs_read
&
114 BITFIELD64_MASK(attr
));
115 int dslot
= _mesa_bitcount_64(nir_info
->double_inputs_read
&
116 BITFIELD64_MASK(attr
));
117 intrin
->const_index
[0] = 4 * (slot
+ dslot
);
124 remap_inputs_with_vue_map(nir_block
*block
, const struct brw_vue_map
*vue_map
)
126 nir_foreach_instr(instr
, block
) {
127 if (instr
->type
!= nir_instr_type_intrinsic
)
130 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
132 if (intrin
->intrinsic
== nir_intrinsic_load_input
||
133 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
) {
134 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
135 assert(vue_slot
!= -1);
136 intrin
->const_index
[0] = vue_slot
;
143 remap_patch_urb_offsets(nir_block
*block
, nir_builder
*b
,
144 const struct brw_vue_map
*vue_map
)
146 nir_foreach_instr_safe(instr
, block
) {
147 if (instr
->type
!= nir_instr_type_intrinsic
)
150 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
152 gl_shader_stage stage
= b
->shader
->stage
;
154 if ((stage
== MESA_SHADER_TESS_CTRL
&& is_output(intrin
)) ||
155 (stage
== MESA_SHADER_TESS_EVAL
&& is_input(intrin
))) {
156 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
157 assert(vue_slot
!= -1);
158 intrin
->const_index
[0] = vue_slot
;
160 nir_src
*vertex
= nir_get_io_vertex_index_src(intrin
);
162 nir_const_value
*const_vertex
= nir_src_as_const_value(*vertex
);
164 intrin
->const_index
[0] += const_vertex
->u32
[0] *
165 vue_map
->num_per_vertex_slots
;
167 b
->cursor
= nir_before_instr(&intrin
->instr
);
169 /* Multiply by the number of per-vertex slots. */
170 nir_ssa_def
*vertex_offset
=
172 nir_ssa_for_src(b
, *vertex
, 1),
174 vue_map
->num_per_vertex_slots
));
176 /* Add it to the existing offset */
177 nir_src
*offset
= nir_get_io_offset_src(intrin
);
178 nir_ssa_def
*total_offset
=
179 nir_iadd(b
, vertex_offset
,
180 nir_ssa_for_src(b
, *offset
, 1));
182 nir_instr_rewrite_src(&intrin
->instr
, offset
,
183 nir_src_for_ssa(total_offset
));
192 brw_nir_lower_vs_inputs(nir_shader
*nir
,
193 const struct brw_device_info
*devinfo
,
195 bool use_legacy_snorm_formula
,
196 const uint8_t *vs_attrib_wa_flags
)
198 /* Start with the location of the variable's base. */
199 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
200 var
->data
.driver_location
= var
->data
.location
;
203 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
204 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
205 * whether it is a double-precision type or not.
207 nir_lower_io(nir
, nir_var_shader_in
, type_size_vs_input
);
209 /* This pass needs actual constants */
210 nir_opt_constant_folding(nir
);
212 add_const_offset_to_base(nir
, nir_var_shader_in
);
214 brw_nir_apply_attribute_workarounds(nir
, use_legacy_snorm_formula
,
218 /* Finally, translate VERT_ATTRIB_* values into the actual registers. */
220 nir_foreach_function(function
, nir
) {
221 if (function
->impl
) {
222 nir_foreach_block(block
, function
->impl
) {
223 remap_vs_attrs(block
, &nir
->info
);
231 brw_nir_lower_vue_inputs(nir_shader
*nir
, bool is_scalar
,
232 const struct brw_vue_map
*vue_map
)
234 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
235 var
->data
.driver_location
= var
->data
.location
;
238 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
239 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
);
241 if (is_scalar
|| nir
->stage
!= MESA_SHADER_GEOMETRY
) {
242 /* This pass needs actual constants */
243 nir_opt_constant_folding(nir
);
245 add_const_offset_to_base(nir
, nir_var_shader_in
);
247 nir_foreach_function(function
, nir
) {
248 if (function
->impl
) {
249 nir_foreach_block(block
, function
->impl
) {
250 remap_inputs_with_vue_map(block
, vue_map
);
258 brw_nir_lower_tes_inputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
)
260 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
261 var
->data
.driver_location
= var
->data
.location
;
264 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
);
266 /* This pass needs actual constants */
267 nir_opt_constant_folding(nir
);
269 add_const_offset_to_base(nir
, nir_var_shader_in
);
271 nir_foreach_function(function
, nir
) {
272 if (function
->impl
) {
274 nir_builder_init(&b
, function
->impl
);
275 nir_foreach_block(block
, function
->impl
) {
276 remap_patch_urb_offsets(block
, &b
, vue_map
);
283 brw_nir_lower_fs_inputs(nir_shader
*nir
)
285 nir_assign_var_locations(&nir
->inputs
, &nir
->num_inputs
, type_size_scalar
);
286 nir_lower_io(nir
, nir_var_shader_in
, type_size_scalar
);
290 brw_nir_lower_vue_outputs(nir_shader
*nir
,
294 nir_assign_var_locations(&nir
->outputs
, &nir
->num_outputs
,
295 type_size_vec4_times_4
);
296 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4_times_4
);
298 nir_foreach_variable(var
, &nir
->outputs
)
299 var
->data
.driver_location
= var
->data
.location
;
300 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
);
305 brw_nir_lower_tcs_outputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
)
307 nir_foreach_variable(var
, &nir
->outputs
) {
308 var
->data
.driver_location
= var
->data
.location
;
311 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
);
313 /* This pass needs actual constants */
314 nir_opt_constant_folding(nir
);
316 add_const_offset_to_base(nir
, nir_var_shader_out
);
318 nir_foreach_function(function
, nir
) {
319 if (function
->impl
) {
321 nir_builder_init(&b
, function
->impl
);
322 nir_foreach_block(block
, function
->impl
) {
323 remap_patch_urb_offsets(block
, &b
, vue_map
);
330 brw_nir_lower_fs_outputs(nir_shader
*nir
)
332 nir_assign_var_locations(&nir
->outputs
, &nir
->num_outputs
,
334 nir_lower_io(nir
, nir_var_shader_out
, type_size_scalar
);
338 brw_nir_lower_cs_shared(nir_shader
*nir
)
340 nir_assign_var_locations(&nir
->shared
, &nir
->num_shared
,
341 type_size_scalar_bytes
);
342 nir_lower_io(nir
, nir_var_shared
, type_size_scalar_bytes
);
345 #define OPT(pass, ...) ({ \
346 bool this_progress = false; \
347 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
353 #define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
356 nir_optimize(nir_shader
*nir
, bool is_scalar
)
361 OPT_V(nir_lower_vars_to_ssa
);
364 OPT_V(nir_lower_alu_to_scalar
);
370 OPT_V(nir_lower_phis_to_scalar
);
376 OPT(nir_opt_peephole_select
);
377 OPT(nir_opt_algebraic
);
378 OPT(nir_opt_constant_folding
);
379 OPT(nir_opt_dead_cf
);
380 OPT(nir_opt_remove_phis
);
382 OPT_V(nir_lower_doubles
, nir_lower_drcp
|
389 nir_lower_dround_even
|
391 OPT_V(nir_lower_double_pack
);
397 /* Does some simple lowering and runs the standard suite of optimizations
399 * This is intended to be called more-or-less directly after you get the
400 * shader out of GLSL or some other source. While it is geared towards i965,
401 * it is not at all generator-specific except for the is_scalar flag. Even
402 * there, it is safe to call with is_scalar = false for a shader that is
403 * intended for the FS backend as long as nir_optimize is called again with
404 * is_scalar = true to scalarize everything prior to code gen.
407 brw_preprocess_nir(const struct brw_compiler
*compiler
, nir_shader
*nir
)
409 bool progress
; /* Written by OPT and OPT_V */
412 const bool is_scalar
= compiler
->scalar_stage
[nir
->stage
];
414 if (nir
->stage
== MESA_SHADER_GEOMETRY
)
415 OPT(nir_lower_gs_intrinsics
);
417 if (compiler
->precise_trig
)
418 OPT(brw_nir_apply_trig_workarounds
);
420 static const nir_lower_tex_options tex_options
= {
424 OPT(nir_lower_tex
, &tex_options
);
425 OPT(nir_normalize_cubemap_coords
);
427 OPT(nir_lower_global_vars_to_local
);
429 OPT(nir_split_var_copies
);
431 nir
= nir_optimize(nir
, is_scalar
);
434 OPT_V(nir_lower_load_const_to_scalar
);
437 /* Lower a bunch of stuff */
438 OPT_V(nir_lower_var_copies
);
440 /* Get rid of split copies */
441 nir
= nir_optimize(nir
, is_scalar
);
443 OPT(nir_remove_dead_variables
, nir_var_local
);
448 /* Prepare the given shader for codegen
450 * This function is intended to be called right before going into the actual
451 * backend and is highly backend-specific. Also, once this function has been
452 * called on a shader, it will no longer be in SSA form so most optimizations
456 brw_postprocess_nir(nir_shader
*nir
,
457 const struct brw_device_info
*devinfo
,
461 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(nir
->stage
));
463 bool progress
; /* Written by OPT and OPT_V */
466 nir
= nir_optimize(nir
, is_scalar
);
468 if (devinfo
->gen
>= 6) {
469 /* Try and fuse multiply-adds */
470 OPT(brw_nir_opt_peephole_ffma
);
473 OPT(nir_opt_algebraic_late
);
475 OPT(nir_lower_locals_to_regs
);
477 OPT_V(nir_lower_to_source_mods
);
481 if (unlikely(debug_enabled
)) {
482 /* Re-index SSA defs so we print more sensible numbers. */
483 nir_foreach_function(function
, nir
) {
485 nir_index_ssa_defs(function
->impl
);
488 fprintf(stderr
, "NIR (SSA form) for %s shader:\n",
489 _mesa_shader_stage_to_string(nir
->stage
));
490 nir_print_shader(nir
, stderr
);
493 OPT_V(nir_convert_from_ssa
, true);
496 OPT_V(nir_move_vec_src_uses_to_dest
);
497 OPT(nir_lower_vec_to_movs
);
500 /* This is the last pass we run before we start emitting stuff. It
501 * determines when we need to insert boolean resolves on Gen <= 5. We
502 * run it last because it stashes data in instr->pass_flags and we don't
503 * want that to be squashed by other NIR passes.
505 if (devinfo
->gen
<= 5)
506 brw_nir_analyze_boolean_resolves(nir
);
510 if (unlikely(debug_enabled
)) {
511 fprintf(stderr
, "NIR (final form) for %s shader:\n",
512 _mesa_shader_stage_to_string(nir
->stage
));
513 nir_print_shader(nir
, stderr
);
520 brw_nir_apply_sampler_key(nir_shader
*nir
,
521 const struct brw_device_info
*devinfo
,
522 const struct brw_sampler_prog_key_data
*key_tex
,
525 nir_lower_tex_options tex_options
= { 0 };
527 /* Iron Lake and prior require lowering of all rectangle textures */
528 if (devinfo
->gen
< 6)
529 tex_options
.lower_rect
= true;
531 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
532 if (devinfo
->gen
< 8) {
533 tex_options
.saturate_s
= key_tex
->gl_clamp_mask
[0];
534 tex_options
.saturate_t
= key_tex
->gl_clamp_mask
[1];
535 tex_options
.saturate_r
= key_tex
->gl_clamp_mask
[2];
538 /* Prior to Haswell, we have to fake texture swizzle */
539 for (unsigned s
= 0; s
< MAX_SAMPLERS
; s
++) {
540 if (key_tex
->swizzles
[s
] == SWIZZLE_NOOP
)
543 tex_options
.swizzle_result
|= (1 << s
);
544 for (unsigned c
= 0; c
< 4; c
++)
545 tex_options
.swizzles
[s
][c
] = GET_SWZ(key_tex
->swizzles
[s
], c
);
548 tex_options
.lower_y_uv_external
= key_tex
->y_uv_image_mask
;
549 tex_options
.lower_y_u_v_external
= key_tex
->y_u_v_image_mask
;
550 tex_options
.lower_yx_xuxv_external
= key_tex
->yx_xuxv_image_mask
;
552 if (nir_lower_tex(nir
, &tex_options
)) {
553 nir_validate_shader(nir
);
554 nir
= nir_optimize(nir
, is_scalar
);
561 brw_type_for_nir_type(nir_alu_type type
)
565 case nir_type_uint32
:
566 return BRW_REGISTER_TYPE_UD
;
569 case nir_type_bool32
:
571 return BRW_REGISTER_TYPE_D
;
573 case nir_type_float32
:
574 return BRW_REGISTER_TYPE_F
;
575 case nir_type_float64
:
576 return BRW_REGISTER_TYPE_DF
;
578 case nir_type_uint64
:
579 /* TODO we should only see these in moves, so for now it's ok, but when
580 * we add actual 64-bit integer support we should fix this.
582 return BRW_REGISTER_TYPE_DF
;
584 unreachable("unknown type");
587 return BRW_REGISTER_TYPE_F
;
590 /* Returns the glsl_base_type corresponding to a nir_alu_type.
591 * This is used by both brw_vec4_nir and brw_fs_nir.
594 brw_glsl_base_type_for_nir_type(nir_alu_type type
)
598 case nir_type_float32
:
599 return GLSL_TYPE_FLOAT
;
601 case nir_type_float64
:
602 return GLSL_TYPE_DOUBLE
;
606 return GLSL_TYPE_INT
;
609 case nir_type_uint32
:
610 return GLSL_TYPE_UINT
;
613 unreachable("bad type");