2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "brw_shader.h"
26 #include "common/gen_debug.h"
27 #include "compiler/glsl_types.h"
28 #include "compiler/nir/nir_builder.h"
31 is_input(nir_intrinsic_instr
*intrin
)
33 return intrin
->intrinsic
== nir_intrinsic_load_input
||
34 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
35 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
;
39 is_output(nir_intrinsic_instr
*intrin
)
41 return intrin
->intrinsic
== nir_intrinsic_load_output
||
42 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
43 intrin
->intrinsic
== nir_intrinsic_store_output
||
44 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
48 * In many cases, we just add the base and offset together, so there's no
49 * reason to keep them separate. Sometimes, combining them is essential:
50 * if a shader only accesses part of a compound variable (such as a matrix
51 * or array), the variable's base may not actually exist in the VUE map.
53 * This pass adds constant offsets to instr->const_index[0], and resets
54 * the offset source to 0. Non-constant offsets remain unchanged - since
55 * we don't know what part of a compound variable is accessed, we allocate
56 * storage for the entire thing.
60 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
61 nir_variable_mode mode
)
63 nir_foreach_instr_safe(instr
, block
) {
64 if (instr
->type
!= nir_instr_type_intrinsic
)
67 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
69 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
70 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
71 nir_src
*offset
= nir_get_io_offset_src(intrin
);
72 nir_const_value
*const_offset
= nir_src_as_const_value(*offset
);
75 intrin
->const_index
[0] += const_offset
->u32
[0];
76 b
->cursor
= nir_before_instr(&intrin
->instr
);
77 nir_instr_rewrite_src(&intrin
->instr
, offset
,
78 nir_src_for_ssa(nir_imm_int(b
, 0)));
86 add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
88 nir_foreach_function(f
, nir
) {
91 nir_builder_init(&b
, f
->impl
);
92 nir_foreach_block(block
, f
->impl
) {
93 add_const_offset_to_base_block(block
, &b
, mode
);
100 remap_tess_levels(nir_builder
*b
, nir_intrinsic_instr
*intr
,
101 GLenum primitive_mode
)
103 const int location
= nir_intrinsic_base(intr
);
104 const unsigned component
= nir_intrinsic_component(intr
);
107 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
) {
108 switch (primitive_mode
) {
110 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
111 nir_intrinsic_set_base(intr
, 0);
112 nir_intrinsic_set_component(intr
, 3 - component
);
113 out_of_bounds
= false;
116 /* gl_TessLevelInner[0] lives at DWord 4. */
117 nir_intrinsic_set_base(intr
, 1);
118 out_of_bounds
= component
> 0;
121 out_of_bounds
= true;
124 unreachable("Bogus tessellation domain");
126 } else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
) {
127 if (primitive_mode
== GL_ISOLINES
) {
128 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
129 nir_intrinsic_set_base(intr
, 1);
130 nir_intrinsic_set_component(intr
, 2 + nir_intrinsic_component(intr
));
131 out_of_bounds
= component
> 1;
133 /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
134 nir_intrinsic_set_base(intr
, 1);
135 nir_intrinsic_set_component(intr
, 3 - nir_intrinsic_component(intr
));
136 out_of_bounds
= component
== 3 && primitive_mode
== GL_TRIANGLES
;
143 if (nir_intrinsic_infos
[intr
->intrinsic
].has_dest
) {
144 b
->cursor
= nir_before_instr(&intr
->instr
);
145 nir_ssa_def
*undef
= nir_ssa_undef(b
, 1, 32);
146 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(undef
));
148 nir_instr_remove(&intr
->instr
);
155 remap_patch_urb_offsets(nir_block
*block
, nir_builder
*b
,
156 const struct brw_vue_map
*vue_map
,
157 GLenum tes_primitive_mode
)
159 const bool is_passthrough_tcs
= b
->shader
->info
.name
&&
160 strcmp(b
->shader
->info
.name
, "passthrough") == 0;
162 nir_foreach_instr_safe(instr
, block
) {
163 if (instr
->type
!= nir_instr_type_intrinsic
)
166 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
168 gl_shader_stage stage
= b
->shader
->info
.stage
;
170 if ((stage
== MESA_SHADER_TESS_CTRL
&& is_output(intrin
)) ||
171 (stage
== MESA_SHADER_TESS_EVAL
&& is_input(intrin
))) {
173 if (!is_passthrough_tcs
&&
174 remap_tess_levels(b
, intrin
, tes_primitive_mode
))
177 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
178 assert(vue_slot
!= -1);
179 intrin
->const_index
[0] = vue_slot
;
181 nir_src
*vertex
= nir_get_io_vertex_index_src(intrin
);
183 nir_const_value
*const_vertex
= nir_src_as_const_value(*vertex
);
185 intrin
->const_index
[0] += const_vertex
->u32
[0] *
186 vue_map
->num_per_vertex_slots
;
188 b
->cursor
= nir_before_instr(&intrin
->instr
);
190 /* Multiply by the number of per-vertex slots. */
191 nir_ssa_def
*vertex_offset
=
193 nir_ssa_for_src(b
, *vertex
, 1),
195 vue_map
->num_per_vertex_slots
));
197 /* Add it to the existing offset */
198 nir_src
*offset
= nir_get_io_offset_src(intrin
);
199 nir_ssa_def
*total_offset
=
200 nir_iadd(b
, vertex_offset
,
201 nir_ssa_for_src(b
, *offset
, 1));
203 nir_instr_rewrite_src(&intrin
->instr
, offset
,
204 nir_src_for_ssa(total_offset
));
213 brw_nir_lower_vs_inputs(nir_shader
*nir
,
214 const uint8_t *vs_attrib_wa_flags
)
216 /* Start with the location of the variable's base. */
217 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
218 var
->data
.driver_location
= var
->data
.location
;
221 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
222 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
223 * whether it is a double-precision type or not.
225 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
227 /* This pass needs actual constants */
228 nir_opt_constant_folding(nir
);
230 add_const_offset_to_base(nir
, nir_var_shader_in
);
232 brw_nir_apply_attribute_workarounds(nir
, vs_attrib_wa_flags
);
234 /* The last step is to remap VERT_ATTRIB_* to actual registers */
236 /* Whether or not we have any system generated values. gl_DrawID is not
237 * included here as it lives in its own vec4.
239 const bool has_sgvs
=
240 nir
->info
.system_values_read
&
241 (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
) |
242 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
243 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
244 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
));
246 const unsigned num_inputs
= _mesa_bitcount_64(nir
->info
.inputs_read
);
248 nir_foreach_function(function
, nir
) {
253 nir_builder_init(&b
, function
->impl
);
255 nir_foreach_block(block
, function
->impl
) {
256 nir_foreach_instr_safe(instr
, block
) {
257 if (instr
->type
!= nir_instr_type_intrinsic
)
260 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
262 switch (intrin
->intrinsic
) {
263 case nir_intrinsic_load_base_vertex
:
264 case nir_intrinsic_load_base_instance
:
265 case nir_intrinsic_load_vertex_id_zero_base
:
266 case nir_intrinsic_load_instance_id
:
267 case nir_intrinsic_load_draw_id
: {
268 b
.cursor
= nir_after_instr(&intrin
->instr
);
270 /* gl_VertexID and friends are stored by the VF as the last
271 * vertex element. We convert them to load_input intrinsics at
272 * the right location.
274 nir_intrinsic_instr
*load
=
275 nir_intrinsic_instr_create(nir
, nir_intrinsic_load_input
);
276 load
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
278 nir_intrinsic_set_base(load
, num_inputs
);
279 switch (intrin
->intrinsic
) {
280 case nir_intrinsic_load_base_vertex
:
281 nir_intrinsic_set_component(load
, 0);
283 case nir_intrinsic_load_base_instance
:
284 nir_intrinsic_set_component(load
, 1);
286 case nir_intrinsic_load_vertex_id_zero_base
:
287 nir_intrinsic_set_component(load
, 2);
289 case nir_intrinsic_load_instance_id
:
290 nir_intrinsic_set_component(load
, 3);
292 case nir_intrinsic_load_draw_id
:
293 /* gl_DrawID is stored right after gl_VertexID and friends
294 * if any of them exist.
296 nir_intrinsic_set_base(load
, num_inputs
+ has_sgvs
);
297 nir_intrinsic_set_component(load
, 0);
300 unreachable("Invalid system value intrinsic");
303 load
->num_components
= 1;
304 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
305 nir_builder_instr_insert(&b
, &load
->instr
);
307 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
308 nir_src_for_ssa(&load
->dest
.ssa
));
309 nir_instr_remove(&intrin
->instr
);
313 case nir_intrinsic_load_input
: {
314 /* Attributes come in a contiguous block, ordered by their
315 * gl_vert_attrib value. That means we can compute the slot
316 * number for an attribute by masking out the enabled attributes
317 * before it and counting the bits.
319 int attr
= nir_intrinsic_base(intrin
);
320 int slot
= _mesa_bitcount_64(nir
->info
.inputs_read
&
321 BITFIELD64_MASK(attr
));
322 nir_intrinsic_set_base(intrin
, slot
);
327 break; /* Nothing to do */
335 brw_nir_lower_vue_inputs(nir_shader
*nir
,
336 const struct brw_vue_map
*vue_map
)
338 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
339 var
->data
.driver_location
= var
->data
.location
;
342 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
343 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
345 /* This pass needs actual constants */
346 nir_opt_constant_folding(nir
);
348 add_const_offset_to_base(nir
, nir_var_shader_in
);
350 nir_foreach_function(function
, nir
) {
354 nir_foreach_block(block
, function
->impl
) {
355 nir_foreach_instr(instr
, block
) {
356 if (instr
->type
!= nir_instr_type_intrinsic
)
359 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
361 if (intrin
->intrinsic
== nir_intrinsic_load_input
||
362 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
) {
363 /* Offset 0 is the VUE header, which contains
364 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
365 * VARYING_SLOT_PSIZ [.w].
367 int varying
= nir_intrinsic_base(intrin
);
370 case VARYING_SLOT_PSIZ
:
371 nir_intrinsic_set_base(intrin
, 0);
372 nir_intrinsic_set_component(intrin
, 3);
376 vue_slot
= vue_map
->varying_to_slot
[varying
];
377 assert(vue_slot
!= -1);
378 nir_intrinsic_set_base(intrin
, vue_slot
);
388 brw_nir_lower_tes_inputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
)
390 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
391 var
->data
.driver_location
= var
->data
.location
;
394 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
396 /* This pass needs actual constants */
397 nir_opt_constant_folding(nir
);
399 add_const_offset_to_base(nir
, nir_var_shader_in
);
401 nir_foreach_function(function
, nir
) {
402 if (function
->impl
) {
404 nir_builder_init(&b
, function
->impl
);
405 nir_foreach_block(block
, function
->impl
) {
406 remap_patch_urb_offsets(block
, &b
, vue_map
,
407 nir
->info
.tess
.primitive_mode
);
414 brw_nir_lower_fs_inputs(nir_shader
*nir
,
415 const struct gen_device_info
*devinfo
,
416 const struct brw_wm_prog_key
*key
)
418 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
419 var
->data
.driver_location
= var
->data
.location
;
421 /* Apply default interpolation mode.
423 * Everything defaults to smooth except for the legacy GL color
424 * built-in variables, which might be flat depending on API state.
426 if (var
->data
.interpolation
== INTERP_MODE_NONE
) {
427 const bool flat
= key
->flat_shade
&&
428 (var
->data
.location
== VARYING_SLOT_COL0
||
429 var
->data
.location
== VARYING_SLOT_COL1
);
431 var
->data
.interpolation
= flat
? INTERP_MODE_FLAT
432 : INTERP_MODE_SMOOTH
;
435 /* On Ironlake and below, there is only one interpolation mode.
436 * Centroid interpolation doesn't mean anything on this hardware --
437 * there is no multisampling.
439 if (devinfo
->gen
< 6) {
440 var
->data
.centroid
= false;
441 var
->data
.sample
= false;
445 nir_lower_io_options lower_io_options
= 0;
446 if (key
->persample_interp
)
447 lower_io_options
|= nir_lower_io_force_sample_interpolation
;
449 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, lower_io_options
);
451 /* This pass needs actual constants */
452 nir_opt_constant_folding(nir
);
454 add_const_offset_to_base(nir
, nir_var_shader_in
);
458 brw_nir_lower_vue_outputs(nir_shader
*nir
,
461 nir_foreach_variable(var
, &nir
->outputs
) {
462 var
->data
.driver_location
= var
->data
.location
;
465 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
, 0);
469 brw_nir_lower_tcs_outputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
,
470 GLenum tes_primitive_mode
)
472 nir_foreach_variable(var
, &nir
->outputs
) {
473 var
->data
.driver_location
= var
->data
.location
;
476 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
, 0);
478 /* This pass needs actual constants */
479 nir_opt_constant_folding(nir
);
481 add_const_offset_to_base(nir
, nir_var_shader_out
);
483 nir_foreach_function(function
, nir
) {
484 if (function
->impl
) {
486 nir_builder_init(&b
, function
->impl
);
487 nir_foreach_block(block
, function
->impl
) {
488 remap_patch_urb_offsets(block
, &b
, vue_map
, tes_primitive_mode
);
495 brw_nir_lower_fs_outputs(nir_shader
*nir
)
497 nir_foreach_variable(var
, &nir
->outputs
) {
498 var
->data
.driver_location
=
499 SET_FIELD(var
->data
.index
, BRW_NIR_FRAG_OUTPUT_INDEX
) |
500 SET_FIELD(var
->data
.location
, BRW_NIR_FRAG_OUTPUT_LOCATION
);
503 nir_lower_io(nir
, nir_var_shader_out
, type_size_dvec4
, 0);
507 brw_nir_lower_cs_shared(nir_shader
*nir
)
509 nir_assign_var_locations(&nir
->shared
, &nir
->num_shared
,
510 type_size_scalar_bytes
);
511 nir_lower_io(nir
, nir_var_shared
, type_size_scalar_bytes
, 0);
514 #define OPT(pass, ...) ({ \
515 bool this_progress = false; \
516 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
522 static nir_variable_mode
523 brw_nir_no_indirect_mask(const struct brw_compiler
*compiler
,
524 gl_shader_stage stage
)
526 nir_variable_mode indirect_mask
= 0;
528 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectInput
)
529 indirect_mask
|= nir_var_shader_in
;
530 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectOutput
)
531 indirect_mask
|= nir_var_shader_out
;
532 if (compiler
->glsl_compiler_options
[stage
].EmitNoIndirectTemp
)
533 indirect_mask
|= nir_var_local
;
535 return indirect_mask
;
539 brw_nir_optimize(nir_shader
*nir
, const struct brw_compiler
*compiler
,
542 nir_variable_mode indirect_mask
=
543 brw_nir_no_indirect_mask(compiler
, nir
->info
.stage
);
548 OPT(nir_lower_vars_to_ssa
);
549 OPT(nir_opt_copy_prop_vars
);
552 OPT(nir_lower_alu_to_scalar
);
558 OPT(nir_lower_phis_to_scalar
);
564 OPT(nir_opt_peephole_select
, 0);
565 OPT(nir_opt_intrinsics
);
566 OPT(nir_opt_algebraic
);
567 OPT(nir_opt_constant_folding
);
568 OPT(nir_opt_dead_cf
);
569 if (OPT(nir_opt_trivial_continues
)) {
570 /* If nir_opt_trivial_continues makes progress, then we need to clean
571 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
578 if (nir
->options
->max_unroll_iterations
!= 0) {
579 OPT(nir_opt_loop_unroll
, indirect_mask
);
581 OPT(nir_opt_remove_phis
);
583 OPT(nir_lower_doubles
, nir_lower_drcp
|
590 nir_lower_dround_even
|
592 OPT(nir_lower_64bit_pack
);
598 /* Does some simple lowering and runs the standard suite of optimizations
600 * This is intended to be called more-or-less directly after you get the
601 * shader out of GLSL or some other source. While it is geared towards i965,
602 * it is not at all generator-specific except for the is_scalar flag. Even
603 * there, it is safe to call with is_scalar = false for a shader that is
604 * intended for the FS backend as long as nir_optimize is called again with
605 * is_scalar = true to scalarize everything prior to code gen.
608 brw_preprocess_nir(const struct brw_compiler
*compiler
, nir_shader
*nir
)
610 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
611 UNUSED
bool progress
; /* Written by OPT */
613 const bool is_scalar
= compiler
->scalar_stage
[nir
->info
.stage
];
615 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
)
616 OPT(nir_lower_gs_intrinsics
);
618 /* See also brw_nir_trig_workarounds.py */
619 if (compiler
->precise_trig
&&
620 !(devinfo
->gen
>= 10 || devinfo
->is_kabylake
))
621 OPT(brw_nir_apply_trig_workarounds
);
623 static const nir_lower_tex_options tex_options
= {
625 .lower_txf_offset
= true,
626 .lower_rect_offset
= true,
627 .lower_txd_cube_map
= true,
630 OPT(nir_lower_tex
, &tex_options
);
631 OPT(nir_normalize_cubemap_coords
);
633 OPT(nir_lower_global_vars_to_local
);
635 OPT(nir_split_var_copies
);
637 /* Run opt_algebraic before int64 lowering so we can hopefully get rid
638 * of some int64 instructions.
640 OPT(nir_opt_algebraic
);
642 /* Lower int64 instructions before nir_optimize so that loop unrolling
643 * sees their actual cost.
645 nir_lower_int64(nir
, nir_lower_imul64
|
649 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
);
652 OPT(nir_lower_load_const_to_scalar
);
655 /* Lower a bunch of stuff */
656 OPT(nir_lower_var_copies
);
658 OPT(nir_lower_system_values
);
660 const nir_lower_subgroups_options subgroups_options
= {
661 .subgroup_size
= nir
->info
.stage
== MESA_SHADER_COMPUTE
? 32 :
662 nir
->info
.stage
== MESA_SHADER_FRAGMENT
? 16 : 8,
663 .ballot_bit_size
= 32,
664 .lower_to_scalar
= true,
665 .lower_subgroup_masks
= true,
666 .lower_vote_trivial
= !is_scalar
,
668 OPT(nir_lower_subgroups
, &subgroups_options
);
670 OPT(nir_lower_clip_cull_distance_arrays
);
672 nir_variable_mode indirect_mask
=
673 brw_nir_no_indirect_mask(compiler
, nir
->info
.stage
);
674 nir_lower_indirect_derefs(nir
, indirect_mask
);
676 /* Get rid of split copies */
677 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
);
679 OPT(nir_remove_dead_variables
, nir_var_local
);
685 brw_nir_link_shaders(const struct brw_compiler
*compiler
,
686 nir_shader
**producer
, nir_shader
**consumer
)
688 NIR_PASS_V(*producer
, nir_remove_dead_variables
, nir_var_shader_out
);
689 NIR_PASS_V(*consumer
, nir_remove_dead_variables
, nir_var_shader_in
);
691 if (nir_remove_unused_varyings(*producer
, *consumer
)) {
692 NIR_PASS_V(*producer
, nir_lower_global_vars_to_local
);
693 NIR_PASS_V(*consumer
, nir_lower_global_vars_to_local
);
695 /* The backend might not be able to handle indirects on
696 * temporaries so we need to lower indirects on any of the
697 * varyings we have demoted here.
699 NIR_PASS_V(*producer
, nir_lower_indirect_derefs
,
700 brw_nir_no_indirect_mask(compiler
, (*producer
)->info
.stage
));
701 NIR_PASS_V(*consumer
, nir_lower_indirect_derefs
,
702 brw_nir_no_indirect_mask(compiler
, (*consumer
)->info
.stage
));
704 const bool p_is_scalar
=
705 compiler
->scalar_stage
[(*producer
)->info
.stage
];
706 *producer
= brw_nir_optimize(*producer
, compiler
, p_is_scalar
);
708 const bool c_is_scalar
=
709 compiler
->scalar_stage
[(*producer
)->info
.stage
];
710 *consumer
= brw_nir_optimize(*consumer
, compiler
, c_is_scalar
);
714 /* Prepare the given shader for codegen
716 * This function is intended to be called right before going into the actual
717 * backend and is highly backend-specific. Also, once this function has been
718 * called on a shader, it will no longer be in SSA form so most optimizations
722 brw_postprocess_nir(nir_shader
*nir
, const struct brw_compiler
*compiler
,
725 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
727 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(nir
->info
.stage
));
729 UNUSED
bool progress
; /* Written by OPT */
734 OPT(nir_opt_algebraic_before_ffma
);
737 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
);
739 if (devinfo
->gen
>= 6) {
740 /* Try and fuse multiply-adds */
741 OPT(brw_nir_opt_peephole_ffma
);
744 OPT(nir_opt_algebraic_late
);
746 OPT(nir_lower_to_source_mods
);
749 OPT(nir_opt_move_comparisons
);
751 OPT(nir_lower_locals_to_regs
);
753 if (unlikely(debug_enabled
)) {
754 /* Re-index SSA defs so we print more sensible numbers. */
755 nir_foreach_function(function
, nir
) {
757 nir_index_ssa_defs(function
->impl
);
760 fprintf(stderr
, "NIR (SSA form) for %s shader:\n",
761 _mesa_shader_stage_to_string(nir
->info
.stage
));
762 nir_print_shader(nir
, stderr
);
765 OPT(nir_convert_from_ssa
, true);
768 OPT(nir_move_vec_src_uses_to_dest
);
769 OPT(nir_lower_vec_to_movs
);
772 /* This is the last pass we run before we start emitting stuff. It
773 * determines when we need to insert boolean resolves on Gen <= 5. We
774 * run it last because it stashes data in instr->pass_flags and we don't
775 * want that to be squashed by other NIR passes.
777 if (devinfo
->gen
<= 5)
778 brw_nir_analyze_boolean_resolves(nir
);
782 if (unlikely(debug_enabled
)) {
783 fprintf(stderr
, "NIR (final form) for %s shader:\n",
784 _mesa_shader_stage_to_string(nir
->info
.stage
));
785 nir_print_shader(nir
, stderr
);
792 brw_nir_apply_sampler_key(nir_shader
*nir
,
793 const struct brw_compiler
*compiler
,
794 const struct brw_sampler_prog_key_data
*key_tex
,
797 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
798 nir_lower_tex_options tex_options
= { 0 };
800 /* Iron Lake and prior require lowering of all rectangle textures */
801 if (devinfo
->gen
< 6)
802 tex_options
.lower_rect
= true;
804 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
805 if (devinfo
->gen
< 8) {
806 tex_options
.saturate_s
= key_tex
->gl_clamp_mask
[0];
807 tex_options
.saturate_t
= key_tex
->gl_clamp_mask
[1];
808 tex_options
.saturate_r
= key_tex
->gl_clamp_mask
[2];
811 /* Prior to Haswell, we have to fake texture swizzle */
812 for (unsigned s
= 0; s
< MAX_SAMPLERS
; s
++) {
813 if (key_tex
->swizzles
[s
] == SWIZZLE_NOOP
)
816 tex_options
.swizzle_result
|= (1 << s
);
817 for (unsigned c
= 0; c
< 4; c
++)
818 tex_options
.swizzles
[s
][c
] = GET_SWZ(key_tex
->swizzles
[s
], c
);
821 /* Prior to Haswell, we have to lower gradients on shadow samplers */
822 tex_options
.lower_txd_shadow
= devinfo
->gen
< 8 && !devinfo
->is_haswell
;
824 tex_options
.lower_y_uv_external
= key_tex
->y_uv_image_mask
;
825 tex_options
.lower_y_u_v_external
= key_tex
->y_u_v_image_mask
;
826 tex_options
.lower_yx_xuxv_external
= key_tex
->yx_xuxv_image_mask
;
827 tex_options
.lower_xy_uxvx_external
= key_tex
->xy_uxvx_image_mask
;
829 if (nir_lower_tex(nir
, &tex_options
)) {
830 nir_validate_shader(nir
);
831 nir
= brw_nir_optimize(nir
, compiler
, is_scalar
);
838 brw_type_for_nir_type(const struct gen_device_info
*devinfo
, nir_alu_type type
)
842 case nir_type_uint32
:
843 return BRW_REGISTER_TYPE_UD
;
846 case nir_type_bool32
:
848 return BRW_REGISTER_TYPE_D
;
850 case nir_type_float32
:
851 return BRW_REGISTER_TYPE_F
;
852 case nir_type_float16
:
853 return BRW_REGISTER_TYPE_HF
;
854 case nir_type_float64
:
855 return BRW_REGISTER_TYPE_DF
;
857 return devinfo
->gen
< 8 ? BRW_REGISTER_TYPE_DF
: BRW_REGISTER_TYPE_Q
;
858 case nir_type_uint64
:
859 return devinfo
->gen
< 8 ? BRW_REGISTER_TYPE_DF
: BRW_REGISTER_TYPE_UQ
;
861 return BRW_REGISTER_TYPE_W
;
862 case nir_type_uint16
:
863 return BRW_REGISTER_TYPE_UW
;
865 unreachable("unknown type");
868 return BRW_REGISTER_TYPE_F
;
871 /* Returns the glsl_base_type corresponding to a nir_alu_type.
872 * This is used by both brw_vec4_nir and brw_fs_nir.
875 brw_glsl_base_type_for_nir_type(nir_alu_type type
)
879 case nir_type_float32
:
880 return GLSL_TYPE_FLOAT
;
882 case nir_type_float16
:
883 return GLSL_TYPE_FLOAT16
;
885 case nir_type_float64
:
886 return GLSL_TYPE_DOUBLE
;
890 return GLSL_TYPE_INT
;
893 case nir_type_uint32
:
894 return GLSL_TYPE_UINT
;
897 return GLSL_TYPE_INT16
;
899 case nir_type_uint16
:
900 return GLSL_TYPE_UINT16
;
903 unreachable("bad type");