2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "brw_shader.h"
26 #include "common/gen_debug.h"
27 #include "compiler/glsl_types.h"
28 #include "compiler/nir/nir_builder.h"
29 #include "util/u_math.h"
32 is_input(nir_intrinsic_instr
*intrin
)
34 return intrin
->intrinsic
== nir_intrinsic_load_input
||
35 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
36 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
;
40 is_output(nir_intrinsic_instr
*intrin
)
42 return intrin
->intrinsic
== nir_intrinsic_load_output
||
43 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
44 intrin
->intrinsic
== nir_intrinsic_store_output
||
45 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
49 * In many cases, we just add the base and offset together, so there's no
50 * reason to keep them separate. Sometimes, combining them is essential:
51 * if a shader only accesses part of a compound variable (such as a matrix
52 * or array), the variable's base may not actually exist in the VUE map.
54 * This pass adds constant offsets to instr->const_index[0], and resets
55 * the offset source to 0. Non-constant offsets remain unchanged - since
56 * we don't know what part of a compound variable is accessed, we allocate
57 * storage for the entire thing.
61 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
62 nir_variable_mode mode
)
64 nir_foreach_instr_safe(instr
, block
) {
65 if (instr
->type
!= nir_instr_type_intrinsic
)
68 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
70 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
71 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
72 nir_src
*offset
= nir_get_io_offset_src(intrin
);
73 nir_const_value
*const_offset
= nir_src_as_const_value(*offset
);
76 intrin
->const_index
[0] += const_offset
->u32
[0];
77 b
->cursor
= nir_before_instr(&intrin
->instr
);
78 nir_instr_rewrite_src(&intrin
->instr
, offset
,
79 nir_src_for_ssa(nir_imm_int(b
, 0)));
87 add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
89 nir_foreach_function(f
, nir
) {
92 nir_builder_init(&b
, f
->impl
);
93 nir_foreach_block(block
, f
->impl
) {
94 add_const_offset_to_base_block(block
, &b
, mode
);
101 remap_tess_levels(nir_builder
*b
, nir_intrinsic_instr
*intr
,
102 GLenum primitive_mode
)
104 const int location
= nir_intrinsic_base(intr
);
105 const unsigned component
= nir_intrinsic_component(intr
);
108 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
) {
109 switch (primitive_mode
) {
111 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
112 nir_intrinsic_set_base(intr
, 0);
113 nir_intrinsic_set_component(intr
, 3 - component
);
114 out_of_bounds
= false;
117 /* gl_TessLevelInner[0] lives at DWord 4. */
118 nir_intrinsic_set_base(intr
, 1);
119 out_of_bounds
= component
> 0;
122 out_of_bounds
= true;
125 unreachable("Bogus tessellation domain");
127 } else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
) {
128 if (primitive_mode
== GL_ISOLINES
) {
129 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
130 nir_intrinsic_set_base(intr
, 1);
131 nir_intrinsic_set_component(intr
, 2 + nir_intrinsic_component(intr
));
132 out_of_bounds
= component
> 1;
134 /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
135 nir_intrinsic_set_base(intr
, 1);
136 nir_intrinsic_set_component(intr
, 3 - nir_intrinsic_component(intr
));
137 out_of_bounds
= component
== 3 && primitive_mode
== GL_TRIANGLES
;
144 if (nir_intrinsic_infos
[intr
->intrinsic
].has_dest
) {
145 b
->cursor
= nir_before_instr(&intr
->instr
);
146 nir_ssa_def
*undef
= nir_ssa_undef(b
, 1, 32);
147 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(undef
));
149 nir_instr_remove(&intr
->instr
);
156 remap_patch_urb_offsets(nir_block
*block
, nir_builder
*b
,
157 const struct brw_vue_map
*vue_map
,
158 GLenum tes_primitive_mode
)
160 const bool is_passthrough_tcs
= b
->shader
->info
.name
&&
161 strcmp(b
->shader
->info
.name
, "passthrough") == 0;
163 nir_foreach_instr_safe(instr
, block
) {
164 if (instr
->type
!= nir_instr_type_intrinsic
)
167 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
169 gl_shader_stage stage
= b
->shader
->info
.stage
;
171 if ((stage
== MESA_SHADER_TESS_CTRL
&& is_output(intrin
)) ||
172 (stage
== MESA_SHADER_TESS_EVAL
&& is_input(intrin
))) {
174 if (!is_passthrough_tcs
&&
175 remap_tess_levels(b
, intrin
, tes_primitive_mode
))
178 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
179 assert(vue_slot
!= -1);
180 intrin
->const_index
[0] = vue_slot
;
182 nir_src
*vertex
= nir_get_io_vertex_index_src(intrin
);
184 nir_const_value
*const_vertex
= nir_src_as_const_value(*vertex
);
186 intrin
->const_index
[0] += const_vertex
->u32
[0] *
187 vue_map
->num_per_vertex_slots
;
189 b
->cursor
= nir_before_instr(&intrin
->instr
);
191 /* Multiply by the number of per-vertex slots. */
192 nir_ssa_def
*vertex_offset
=
194 nir_ssa_for_src(b
, *vertex
, 1),
196 vue_map
->num_per_vertex_slots
));
198 /* Add it to the existing offset */
199 nir_src
*offset
= nir_get_io_offset_src(intrin
);
200 nir_ssa_def
*total_offset
=
201 nir_iadd(b
, vertex_offset
,
202 nir_ssa_for_src(b
, *offset
, 1));
204 nir_instr_rewrite_src(&intrin
->instr
, offset
,
205 nir_src_for_ssa(total_offset
));
214 brw_nir_lower_vs_inputs(nir_shader
*nir
,
215 const uint8_t *vs_attrib_wa_flags
)
217 /* Start with the location of the variable's base. */
218 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
219 var
->data
.driver_location
= var
->data
.location
;
222 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
223 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
224 * whether it is a double-precision type or not.
226 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
228 /* This pass needs actual constants */
229 nir_opt_constant_folding(nir
);
231 add_const_offset_to_base(nir
, nir_var_shader_in
);
233 brw_nir_apply_attribute_workarounds(nir
, vs_attrib_wa_flags
);
235 /* The last step is to remap VERT_ATTRIB_* to actual registers */
237 /* Whether or not we have any system generated values. gl_DrawID is not
238 * included here as it lives in its own vec4.
240 const bool has_sgvs
=
241 nir
->info
.system_values_read
&
242 (BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX
) |
243 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
244 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
245 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
));
247 const unsigned num_inputs
= util_bitcount64(nir
->info
.inputs_read
);
249 nir_foreach_function(function
, nir
) {
254 nir_builder_init(&b
, function
->impl
);
256 nir_foreach_block(block
, function
->impl
) {
257 nir_foreach_instr_safe(instr
, block
) {
258 if (instr
->type
!= nir_instr_type_intrinsic
)
261 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
263 switch (intrin
->intrinsic
) {
264 case nir_intrinsic_load_first_vertex
:
265 case nir_intrinsic_load_base_instance
:
266 case nir_intrinsic_load_vertex_id_zero_base
:
267 case nir_intrinsic_load_instance_id
:
268 case nir_intrinsic_load_is_indexed_draw
:
269 case nir_intrinsic_load_draw_id
: {
270 b
.cursor
= nir_after_instr(&intrin
->instr
);
272 /* gl_VertexID and friends are stored by the VF as the last
273 * vertex element. We convert them to load_input intrinsics at
274 * the right location.
276 nir_intrinsic_instr
*load
=
277 nir_intrinsic_instr_create(nir
, nir_intrinsic_load_input
);
278 load
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
280 nir_intrinsic_set_base(load
, num_inputs
);
281 switch (intrin
->intrinsic
) {
282 case nir_intrinsic_load_first_vertex
:
283 nir_intrinsic_set_component(load
, 0);
285 case nir_intrinsic_load_base_instance
:
286 nir_intrinsic_set_component(load
, 1);
288 case nir_intrinsic_load_vertex_id_zero_base
:
289 nir_intrinsic_set_component(load
, 2);
291 case nir_intrinsic_load_instance_id
:
292 nir_intrinsic_set_component(load
, 3);
294 case nir_intrinsic_load_draw_id
:
295 case nir_intrinsic_load_is_indexed_draw
:
296 /* gl_DrawID and IsIndexedDraw are stored right after
297 * gl_VertexID and friends if any of them exist.
299 nir_intrinsic_set_base(load
, num_inputs
+ has_sgvs
);
300 if (intrin
->intrinsic
== nir_intrinsic_load_draw_id
)
301 nir_intrinsic_set_component(load
, 0);
303 nir_intrinsic_set_component(load
, 1);
306 unreachable("Invalid system value intrinsic");
309 load
->num_components
= 1;
310 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
311 nir_builder_instr_insert(&b
, &load
->instr
);
313 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
314 nir_src_for_ssa(&load
->dest
.ssa
));
315 nir_instr_remove(&intrin
->instr
);
319 case nir_intrinsic_load_input
: {
320 /* Attributes come in a contiguous block, ordered by their
321 * gl_vert_attrib value. That means we can compute the slot
322 * number for an attribute by masking out the enabled attributes
323 * before it and counting the bits.
325 int attr
= nir_intrinsic_base(intrin
);
326 int slot
= util_bitcount64(nir
->info
.inputs_read
&
327 BITFIELD64_MASK(attr
));
328 nir_intrinsic_set_base(intrin
, slot
);
333 break; /* Nothing to do */
341 brw_nir_lower_vue_inputs(nir_shader
*nir
,
342 const struct brw_vue_map
*vue_map
)
344 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
345 var
->data
.driver_location
= var
->data
.location
;
348 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
349 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
351 /* This pass needs actual constants */
352 nir_opt_constant_folding(nir
);
354 add_const_offset_to_base(nir
, nir_var_shader_in
);
356 nir_foreach_function(function
, nir
) {
360 nir_foreach_block(block
, function
->impl
) {
361 nir_foreach_instr(instr
, block
) {
362 if (instr
->type
!= nir_instr_type_intrinsic
)
365 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
367 if (intrin
->intrinsic
== nir_intrinsic_load_input
||
368 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
) {
369 /* Offset 0 is the VUE header, which contains
370 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
371 * VARYING_SLOT_PSIZ [.w].
373 int varying
= nir_intrinsic_base(intrin
);
376 case VARYING_SLOT_PSIZ
:
377 nir_intrinsic_set_base(intrin
, 0);
378 nir_intrinsic_set_component(intrin
, 3);
382 vue_slot
= vue_map
->varying_to_slot
[varying
];
383 assert(vue_slot
!= -1);
384 nir_intrinsic_set_base(intrin
, vue_slot
);
394 brw_nir_lower_tes_inputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
)
396 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
397 var
->data
.driver_location
= var
->data
.location
;
400 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
402 /* This pass needs actual constants */
403 nir_opt_constant_folding(nir
);
405 add_const_offset_to_base(nir
, nir_var_shader_in
);
407 nir_foreach_function(function
, nir
) {
408 if (function
->impl
) {
410 nir_builder_init(&b
, function
->impl
);
411 nir_foreach_block(block
, function
->impl
) {
412 remap_patch_urb_offsets(block
, &b
, vue_map
,
413 nir
->info
.tess
.primitive_mode
);
420 brw_nir_lower_fs_inputs(nir_shader
*nir
,
421 const struct gen_device_info
*devinfo
,
422 const struct brw_wm_prog_key
*key
)
424 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
425 var
->data
.driver_location
= var
->data
.location
;
427 /* Apply default interpolation mode.
429 * Everything defaults to smooth except for the legacy GL color
430 * built-in variables, which might be flat depending on API state.
432 if (var
->data
.interpolation
== INTERP_MODE_NONE
) {
433 const bool flat
= key
->flat_shade
&&
434 (var
->data
.location
== VARYING_SLOT_COL0
||
435 var
->data
.location
== VARYING_SLOT_COL1
);
437 var
->data
.interpolation
= flat
? INTERP_MODE_FLAT
438 : INTERP_MODE_SMOOTH
;
441 /* On Ironlake and below, there is only one interpolation mode.
442 * Centroid interpolation doesn't mean anything on this hardware --
443 * there is no multisampling.
445 if (devinfo
->gen
< 6) {
446 var
->data
.centroid
= false;
447 var
->data
.sample
= false;
451 nir_lower_io_options lower_io_options
= 0;
452 if (key
->persample_interp
)
453 lower_io_options
|= nir_lower_io_force_sample_interpolation
;
455 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, lower_io_options
);
457 /* This pass needs actual constants */
458 nir_opt_constant_folding(nir
);
460 add_const_offset_to_base(nir
, nir_var_shader_in
);
464 brw_nir_lower_vue_outputs(nir_shader
*nir
)
466 nir_foreach_variable(var
, &nir
->outputs
) {
467 var
->data
.driver_location
= var
->data
.location
;
470 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
, 0);
474 brw_nir_lower_tcs_outputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
,
475 GLenum tes_primitive_mode
)
477 nir_foreach_variable(var
, &nir
->outputs
) {
478 var
->data
.driver_location
= var
->data
.location
;
481 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
, 0);
483 /* This pass needs actual constants */
484 nir_opt_constant_folding(nir
);
486 add_const_offset_to_base(nir
, nir_var_shader_out
);
488 nir_foreach_function(function
, nir
) {
489 if (function
->impl
) {
491 nir_builder_init(&b
, function
->impl
);
492 nir_foreach_block(block
, function
->impl
) {
493 remap_patch_urb_offsets(block
, &b
, vue_map
, tes_primitive_mode
);
500 brw_nir_lower_fs_outputs(nir_shader
*nir
)
502 nir_foreach_variable(var
, &nir
->outputs
) {
503 var
->data
.driver_location
=
504 SET_FIELD(var
->data
.index
, BRW_NIR_FRAG_OUTPUT_INDEX
) |
505 SET_FIELD(var
->data
.location
, BRW_NIR_FRAG_OUTPUT_LOCATION
);
508 nir_lower_io(nir
, nir_var_shader_out
, type_size_dvec4
, 0);
511 #define OPT(pass, ...) ({ \
512 bool this_progress = false; \
513 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
519 static nir_variable_mode
520 brw_nir_no_indirect_mask(const struct brw_compiler
*compiler
,
521 gl_shader_stage stage
)
523 nir_variable_mode indirect_mask
= 0;
525 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectInput
)
526 indirect_mask
|= nir_var_shader_in
;
527 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectOutput
)
528 indirect_mask
|= nir_var_shader_out
;
529 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectTemp
)
530 indirect_mask
|= nir_var_local
;
532 return indirect_mask
;
536 brw_nir_optimize(nir_shader
*nir
, const struct brw_compiler
*compiler
,
537 bool is_scalar
, bool allow_copies
)
539 nir_variable_mode indirect_mask
=
540 brw_nir_no_indirect_mask(compiler
, nir
->info
.stage
);
545 OPT(nir_split_array_vars
, nir_var_local
);
546 OPT(nir_shrink_vec_array_vars
, nir_var_local
);
547 OPT(nir_lower_vars_to_ssa
);
549 /* Only run this pass in the first call to brw_nir_optimize. Later
550 * calls assume that we've lowered away any copy_deref instructions
551 * and we don't want to introduce any more.
553 OPT(nir_opt_find_array_copies
);
555 OPT(nir_opt_copy_prop_vars
);
556 OPT(nir_opt_dead_write_vars
);
559 OPT(nir_lower_alu_to_scalar
);
565 OPT(nir_lower_phis_to_scalar
);
571 OPT(nir_opt_peephole_select
, 0);
572 OPT(nir_opt_intrinsics
);
573 OPT(nir_opt_algebraic
);
574 OPT(nir_opt_constant_folding
);
575 OPT(nir_opt_dead_cf
);
576 if (OPT(nir_opt_trivial_continues
)) {
577 /* If nir_opt_trivial_continues makes progress, then we need to clean
578 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
585 if (nir
->options
->max_unroll_iterations
!= 0) {
586 OPT(nir_opt_loop_unroll
, indirect_mask
);
588 OPT(nir_opt_remove_phis
);
590 OPT(nir_lower_doubles
, nir_lower_drcp
|
597 nir_lower_dround_even
|
602 /* Workaround Gfxbench unused local sampler variable which will trigger an
603 * assert in the opt_large_constants pass.
605 OPT(nir_remove_dead_variables
, nir_var_local
);
611 lower_bit_size_callback(const nir_alu_instr
*alu
, UNUSED
void *data
)
613 assert(alu
->dest
.dest
.is_ssa
);
614 if (alu
->dest
.dest
.ssa
.bit_size
!= 16)
629 /* Does some simple lowering and runs the standard suite of optimizations
631 * This is intended to be called more-or-less directly after you get the
632 * shader out of GLSL or some other source. While it is geared towards i965,
633 * it is not at all generator-specific except for the is_scalar flag. Even
634 * there, it is safe to call with is_scalar = false for a shader that is
635 * intended for the FS backend as long as nir_optimize is called again with
636 * is_scalar = true to scalarize everything prior to code gen.
639 brw_preprocess_nir(const struct brw_compiler
*compiler
, nir_shader
*nir
)
641 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
642 UNUSED
bool progress
; /* Written by OPT */
644 const bool is_scalar
= compiler
->scalar_stage
[nir
->info
.stage
];
646 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
)
647 OPT(nir_lower_gs_intrinsics
);
649 /* See also brw_nir_trig_workarounds.py */
650 if (compiler
->precise_trig
&&
651 !(devinfo
->gen
>= 10 || devinfo
->is_kabylake
))
652 OPT(brw_nir_apply_trig_workarounds
);
654 static const nir_lower_tex_options tex_options
= {
656 .lower_txf_offset
= true,
657 .lower_rect_offset
= true,
658 .lower_txd_cube_map
= true,
659 .lower_txb_shadow_clamp
= true,
660 .lower_txd_shadow_clamp
= true,
661 .lower_txd_offset_clamp
= true,
664 OPT(nir_lower_tex
, &tex_options
);
665 OPT(nir_normalize_cubemap_coords
);
667 OPT(nir_lower_global_vars_to_local
);
669 OPT(nir_split_var_copies
);
670 OPT(nir_split_struct_vars
, nir_var_local
);
672 /* Run opt_algebraic before int64 lowering so we can hopefully get rid
673 * of some int64 instructions.
675 OPT(nir_opt_algebraic
);
677 /* Lower int64 instructions before nir_optimize so that loop unrolling
678 * sees their actual cost.
680 OPT(nir_lower_int64
, nir_lower_imul64
|
684 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
, true);
686 /* This needs to be run after the first optimization pass but before we
687 * lower indirect derefs away
689 if (compiler
->supports_shader_constants
) {
690 OPT(nir_opt_large_constants
, NULL
, 32);
693 OPT(nir_lower_bit_size
, lower_bit_size_callback
, NULL
);
696 OPT(nir_lower_load_const_to_scalar
);
699 /* Lower a bunch of stuff */
700 OPT(nir_lower_var_copies
);
702 OPT(nir_lower_system_values
);
704 const nir_lower_subgroups_options subgroups_options
= {
705 .subgroup_size
= BRW_SUBGROUP_SIZE
,
706 .ballot_bit_size
= 32,
707 .lower_to_scalar
= true,
708 .lower_subgroup_masks
= true,
709 .lower_vote_trivial
= !is_scalar
,
710 .lower_shuffle
= true,
712 OPT(nir_lower_subgroups
, &subgroups_options
);
714 OPT(nir_lower_clip_cull_distance_arrays
);
716 nir_variable_mode indirect_mask
=
717 brw_nir_no_indirect_mask(compiler
, nir
->info
.stage
);
718 OPT(nir_lower_indirect_derefs
, indirect_mask
);
720 OPT(brw_nir_lower_mem_access_bit_sizes
);
722 /* Get rid of split copies */
723 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
, false);
729 brw_nir_link_shaders(const struct brw_compiler
*compiler
,
730 nir_shader
**producer
, nir_shader
**consumer
)
732 nir_lower_io_arrays_to_elements(*producer
, *consumer
);
733 nir_validate_shader(*producer
, "after nir_lower_io_arrays_to_elements");
734 nir_validate_shader(*consumer
, "after nir_lower_io_arrays_to_elements");
736 const bool p_is_scalar
=
737 compiler
->scalar_stage
[(*producer
)->info
.stage
];
738 const bool c_is_scalar
=
739 compiler
->scalar_stage
[(*consumer
)->info
.stage
];
741 if (p_is_scalar
&& c_is_scalar
) {
742 NIR_PASS_V(*producer
, nir_lower_io_to_scalar_early
, nir_var_shader_out
);
743 NIR_PASS_V(*consumer
, nir_lower_io_to_scalar_early
, nir_var_shader_in
);
744 *producer
= brw_nir_optimize(*producer
, compiler
, p_is_scalar
, false);
745 *consumer
= brw_nir_optimize(*consumer
, compiler
, c_is_scalar
, false);
748 if (nir_link_constant_varyings(*producer
, *consumer
))
749 *consumer
= brw_nir_optimize(*consumer
, compiler
, c_is_scalar
, false);
751 NIR_PASS_V(*producer
, nir_remove_dead_variables
, nir_var_shader_out
);
752 NIR_PASS_V(*consumer
, nir_remove_dead_variables
, nir_var_shader_in
);
754 if (nir_remove_unused_varyings(*producer
, *consumer
)) {
755 NIR_PASS_V(*producer
, nir_lower_global_vars_to_local
);
756 NIR_PASS_V(*consumer
, nir_lower_global_vars_to_local
);
758 /* The backend might not be able to handle indirects on
759 * temporaries so we need to lower indirects on any of the
760 * varyings we have demoted here.
762 NIR_PASS_V(*producer
, nir_lower_indirect_derefs
,
763 brw_nir_no_indirect_mask(compiler
, (*producer
)->info
.stage
));
764 NIR_PASS_V(*consumer
, nir_lower_indirect_derefs
,
765 brw_nir_no_indirect_mask(compiler
, (*consumer
)->info
.stage
));
767 *producer
= brw_nir_optimize(*producer
, compiler
, p_is_scalar
, false);
768 *consumer
= brw_nir_optimize(*consumer
, compiler
, c_is_scalar
, false);
772 /* Prepare the given shader for codegen
774 * This function is intended to be called right before going into the actual
775 * backend and is highly backend-specific. Also, once this function has been
776 * called on a shader, it will no longer be in SSA form so most optimizations
780 brw_postprocess_nir(nir_shader
*nir
, const struct brw_compiler
*compiler
,
783 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
785 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(nir
->info
.stage
));
787 UNUSED
bool progress
; /* Written by OPT */
792 OPT(nir_opt_algebraic_before_ffma
);
795 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
, false);
797 if (devinfo
->gen
>= 6) {
798 /* Try and fuse multiply-adds */
799 OPT(brw_nir_opt_peephole_ffma
);
802 OPT(nir_opt_algebraic_late
);
804 OPT(nir_lower_to_source_mods
, nir_lower_all_source_mods
);
807 OPT(nir_opt_move_comparisons
);
809 OPT(nir_lower_locals_to_regs
);
811 if (unlikely(debug_enabled
)) {
812 /* Re-index SSA defs so we print more sensible numbers. */
813 nir_foreach_function(function
, nir
) {
815 nir_index_ssa_defs(function
->impl
);
818 fprintf(stderr
, "NIR (SSA form) for %s shader:\n",
819 _mesa_shader_stage_to_string(nir
->info
.stage
));
820 nir_print_shader(nir
, stderr
);
823 OPT(nir_convert_from_ssa
, true);
826 OPT(nir_move_vec_src_uses_to_dest
);
827 OPT(nir_lower_vec_to_movs
);
832 /* This is the last pass we run before we start emitting stuff. It
833 * determines when we need to insert boolean resolves on Gen <= 5. We
834 * run it last because it stashes data in instr->pass_flags and we don't
835 * want that to be squashed by other NIR passes.
837 if (devinfo
->gen
<= 5)
838 brw_nir_analyze_boolean_resolves(nir
);
842 if (unlikely(debug_enabled
)) {
843 fprintf(stderr
, "NIR (final form) for %s shader:\n",
844 _mesa_shader_stage_to_string(nir
->info
.stage
));
845 nir_print_shader(nir
, stderr
);
852 brw_nir_apply_sampler_key(nir_shader
*nir
,
853 const struct brw_compiler
*compiler
,
854 const struct brw_sampler_prog_key_data
*key_tex
,
857 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
858 nir_lower_tex_options tex_options
= { 0 };
860 /* Iron Lake and prior require lowering of all rectangle textures */
861 if (devinfo
->gen
< 6)
862 tex_options
.lower_rect
= true;
864 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
865 if (devinfo
->gen
< 8) {
866 tex_options
.saturate_s
= key_tex
->gl_clamp_mask
[0];
867 tex_options
.saturate_t
= key_tex
->gl_clamp_mask
[1];
868 tex_options
.saturate_r
= key_tex
->gl_clamp_mask
[2];
871 /* Prior to Haswell, we have to fake texture swizzle */
872 for (unsigned s
= 0; s
< MAX_SAMPLERS
; s
++) {
873 if (key_tex
->swizzles
[s
] == SWIZZLE_NOOP
)
876 tex_options
.swizzle_result
|= (1 << s
);
877 for (unsigned c
= 0; c
< 4; c
++)
878 tex_options
.swizzles
[s
][c
] = GET_SWZ(key_tex
->swizzles
[s
], c
);
881 /* Prior to Haswell, we have to lower gradients on shadow samplers */
882 tex_options
.lower_txd_shadow
= devinfo
->gen
< 8 && !devinfo
->is_haswell
;
884 tex_options
.lower_y_uv_external
= key_tex
->y_uv_image_mask
;
885 tex_options
.lower_y_u_v_external
= key_tex
->y_u_v_image_mask
;
886 tex_options
.lower_yx_xuxv_external
= key_tex
->yx_xuxv_image_mask
;
887 tex_options
.lower_xy_uxvx_external
= key_tex
->xy_uxvx_image_mask
;
888 tex_options
.lower_ayuv_external
= key_tex
->ayuv_image_mask
;
890 if (nir_lower_tex(nir
, &tex_options
)) {
891 nir_validate_shader(nir
, "after nir_lower_tex");
892 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
, false);
899 brw_type_for_nir_type(const struct gen_device_info
*devinfo
, nir_alu_type type
)
903 case nir_type_uint32
:
904 return BRW_REGISTER_TYPE_UD
;
907 case nir_type_bool32
:
909 return BRW_REGISTER_TYPE_D
;
911 case nir_type_float32
:
912 return BRW_REGISTER_TYPE_F
;
913 case nir_type_float16
:
914 return BRW_REGISTER_TYPE_HF
;
915 case nir_type_float64
:
916 return BRW_REGISTER_TYPE_DF
;
918 return devinfo
->gen
< 8 ? BRW_REGISTER_TYPE_DF
: BRW_REGISTER_TYPE_Q
;
919 case nir_type_uint64
:
920 return devinfo
->gen
< 8 ? BRW_REGISTER_TYPE_DF
: BRW_REGISTER_TYPE_UQ
;
922 return BRW_REGISTER_TYPE_W
;
923 case nir_type_uint16
:
924 return BRW_REGISTER_TYPE_UW
;
926 return BRW_REGISTER_TYPE_B
;
928 return BRW_REGISTER_TYPE_UB
;
930 unreachable("unknown type");
933 return BRW_REGISTER_TYPE_F
;
936 /* Returns the glsl_base_type corresponding to a nir_alu_type.
937 * This is used by both brw_vec4_nir and brw_fs_nir.
940 brw_glsl_base_type_for_nir_type(nir_alu_type type
)
944 case nir_type_float32
:
945 return GLSL_TYPE_FLOAT
;
947 case nir_type_float16
:
948 return GLSL_TYPE_FLOAT16
;
950 case nir_type_float64
:
951 return GLSL_TYPE_DOUBLE
;
955 return GLSL_TYPE_INT
;
958 case nir_type_uint32
:
959 return GLSL_TYPE_UINT
;
962 return GLSL_TYPE_INT16
;
964 case nir_type_uint16
:
965 return GLSL_TYPE_UINT16
;
968 unreachable("bad type");
973 brw_nir_create_passthrough_tcs(void *mem_ctx
, const struct brw_compiler
*compiler
,
974 const nir_shader_compiler_options
*options
,
975 const struct brw_tcs_prog_key
*key
)
978 nir_builder_init_simple_shader(&b
, mem_ctx
, MESA_SHADER_TESS_CTRL
,
980 nir_shader
*nir
= b
.shader
;
982 nir_intrinsic_instr
*load
;
983 nir_intrinsic_instr
*store
;
984 nir_ssa_def
*zero
= nir_imm_int(&b
, 0);
985 nir_ssa_def
*invoc_id
=
986 nir_load_system_value(&b
, nir_intrinsic_load_invocation_id
, 0);
988 nir
->info
.inputs_read
= key
->outputs_written
&
989 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
990 nir
->info
.outputs_written
= key
->outputs_written
;
991 nir
->info
.tess
.tcs_vertices_out
= key
->input_vertices
;
992 nir
->info
.name
= ralloc_strdup(nir
, "passthrough");
993 nir
->num_uniforms
= 8 * sizeof(uint32_t);
995 var
= nir_variable_create(nir
, nir_var_uniform
, glsl_vec4_type(), "hdr_0");
996 var
->data
.location
= 0;
997 var
= nir_variable_create(nir
, nir_var_uniform
, glsl_vec4_type(), "hdr_1");
998 var
->data
.location
= 1;
1000 /* Write the patch URB header. */
1001 for (int i
= 0; i
<= 1; i
++) {
1002 load
= nir_intrinsic_instr_create(nir
, nir_intrinsic_load_uniform
);
1003 load
->num_components
= 4;
1004 load
->src
[0] = nir_src_for_ssa(zero
);
1005 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, 32, NULL
);
1006 nir_intrinsic_set_base(load
, i
* 4 * sizeof(uint32_t));
1007 nir_builder_instr_insert(&b
, &load
->instr
);
1009 store
= nir_intrinsic_instr_create(nir
, nir_intrinsic_store_output
);
1010 store
->num_components
= 4;
1011 store
->src
[0] = nir_src_for_ssa(&load
->dest
.ssa
);
1012 store
->src
[1] = nir_src_for_ssa(zero
);
1013 nir_intrinsic_set_base(store
, VARYING_SLOT_TESS_LEVEL_INNER
- i
);
1014 nir_intrinsic_set_write_mask(store
, WRITEMASK_XYZW
);
1015 nir_builder_instr_insert(&b
, &store
->instr
);
1018 /* Copy inputs to outputs. */
1019 uint64_t varyings
= nir
->info
.inputs_read
;
1021 while (varyings
!= 0) {
1022 const int varying
= ffsll(varyings
) - 1;
1024 load
= nir_intrinsic_instr_create(nir
,
1025 nir_intrinsic_load_per_vertex_input
);
1026 load
->num_components
= 4;
1027 load
->src
[0] = nir_src_for_ssa(invoc_id
);
1028 load
->src
[1] = nir_src_for_ssa(zero
);
1029 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, 32, NULL
);
1030 nir_intrinsic_set_base(load
, varying
);
1031 nir_builder_instr_insert(&b
, &load
->instr
);
1033 store
= nir_intrinsic_instr_create(nir
,
1034 nir_intrinsic_store_per_vertex_output
);
1035 store
->num_components
= 4;
1036 store
->src
[0] = nir_src_for_ssa(&load
->dest
.ssa
);
1037 store
->src
[1] = nir_src_for_ssa(invoc_id
);
1038 store
->src
[2] = nir_src_for_ssa(zero
);
1039 nir_intrinsic_set_base(store
, varying
);
1040 nir_intrinsic_set_write_mask(store
, WRITEMASK_XYZW
);
1041 nir_builder_instr_insert(&b
, &store
->instr
);
1043 varyings
&= ~BITFIELD64_BIT(varying
);
1046 nir_validate_shader(nir
, "in brw_nir_create_passthrough_tcs");
1048 nir
= brw_preprocess_nir(compiler
, nir
);