2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "brw_shader.h"
26 #include "dev/gen_debug.h"
27 #include "compiler/glsl_types.h"
28 #include "compiler/nir/nir_builder.h"
29 #include "util/u_math.h"
32 remap_tess_levels(nir_builder
*b
, nir_intrinsic_instr
*intr
,
33 GLenum primitive_mode
)
35 const int location
= nir_intrinsic_base(intr
);
36 const unsigned component
= nir_intrinsic_component(intr
);
39 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
) {
40 switch (primitive_mode
) {
42 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
43 nir_intrinsic_set_base(intr
, 0);
44 nir_intrinsic_set_component(intr
, 3 - component
);
45 out_of_bounds
= false;
48 /* gl_TessLevelInner[0] lives at DWord 4. */
49 nir_intrinsic_set_base(intr
, 1);
50 out_of_bounds
= component
> 0;
56 unreachable("Bogus tessellation domain");
58 } else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
) {
59 if (primitive_mode
== GL_ISOLINES
) {
60 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
61 nir_intrinsic_set_base(intr
, 1);
62 nir_intrinsic_set_component(intr
, 2 + nir_intrinsic_component(intr
));
63 out_of_bounds
= component
> 1;
65 /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
66 nir_intrinsic_set_base(intr
, 1);
67 nir_intrinsic_set_component(intr
, 3 - nir_intrinsic_component(intr
));
68 out_of_bounds
= component
== 3 && primitive_mode
== GL_TRIANGLES
;
75 if (nir_intrinsic_infos
[intr
->intrinsic
].has_dest
) {
76 b
->cursor
= nir_before_instr(&intr
->instr
);
77 nir_ssa_def
*undef
= nir_ssa_undef(b
, 1, 32);
78 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(undef
));
80 nir_instr_remove(&intr
->instr
);
87 is_input(nir_intrinsic_instr
*intrin
)
89 return intrin
->intrinsic
== nir_intrinsic_load_input
||
90 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
91 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
;
95 is_output(nir_intrinsic_instr
*intrin
)
97 return intrin
->intrinsic
== nir_intrinsic_load_output
||
98 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
99 intrin
->intrinsic
== nir_intrinsic_store_output
||
100 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
105 remap_patch_urb_offsets(nir_block
*block
, nir_builder
*b
,
106 const struct brw_vue_map
*vue_map
,
107 GLenum tes_primitive_mode
)
109 const bool is_passthrough_tcs
= b
->shader
->info
.name
&&
110 strcmp(b
->shader
->info
.name
, "passthrough") == 0;
112 nir_foreach_instr_safe(instr
, block
) {
113 if (instr
->type
!= nir_instr_type_intrinsic
)
116 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
118 gl_shader_stage stage
= b
->shader
->info
.stage
;
120 if ((stage
== MESA_SHADER_TESS_CTRL
&& is_output(intrin
)) ||
121 (stage
== MESA_SHADER_TESS_EVAL
&& is_input(intrin
))) {
123 if (!is_passthrough_tcs
&&
124 remap_tess_levels(b
, intrin
, tes_primitive_mode
))
127 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
128 assert(vue_slot
!= -1);
129 intrin
->const_index
[0] = vue_slot
;
131 nir_src
*vertex
= nir_get_io_vertex_index_src(intrin
);
133 if (nir_src_is_const(*vertex
)) {
134 intrin
->const_index
[0] += nir_src_as_uint(*vertex
) *
135 vue_map
->num_per_vertex_slots
;
137 b
->cursor
= nir_before_instr(&intrin
->instr
);
139 /* Multiply by the number of per-vertex slots. */
140 nir_ssa_def
*vertex_offset
=
142 nir_ssa_for_src(b
, *vertex
, 1),
144 vue_map
->num_per_vertex_slots
));
146 /* Add it to the existing offset */
147 nir_src
*offset
= nir_get_io_offset_src(intrin
);
148 nir_ssa_def
*total_offset
=
149 nir_iadd(b
, vertex_offset
,
150 nir_ssa_for_src(b
, *offset
, 1));
152 nir_instr_rewrite_src(&intrin
->instr
, offset
,
153 nir_src_for_ssa(total_offset
));
162 brw_nir_lower_vs_inputs(nir_shader
*nir
,
163 const uint8_t *vs_attrib_wa_flags
)
165 /* Start with the location of the variable's base. */
166 nir_foreach_shader_in_variable(var
, nir
)
167 var
->data
.driver_location
= var
->data
.location
;
169 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
170 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
171 * whether it is a double-precision type or not.
173 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
,
174 nir_lower_io_lower_64bit_to_32
);
176 /* This pass needs actual constants */
177 nir_opt_constant_folding(nir
);
179 nir_io_add_const_offset_to_base(nir
, nir_var_shader_in
);
181 brw_nir_apply_attribute_workarounds(nir
, vs_attrib_wa_flags
);
183 /* The last step is to remap VERT_ATTRIB_* to actual registers */
185 /* Whether or not we have any system generated values. gl_DrawID is not
186 * included here as it lives in its own vec4.
188 const bool has_sgvs
=
189 nir
->info
.system_values_read
&
190 (BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX
) |
191 BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
) |
192 BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
) |
193 BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID
));
195 const unsigned num_inputs
= util_bitcount64(nir
->info
.inputs_read
);
197 nir_foreach_function(function
, nir
) {
202 nir_builder_init(&b
, function
->impl
);
204 nir_foreach_block(block
, function
->impl
) {
205 nir_foreach_instr_safe(instr
, block
) {
206 if (instr
->type
!= nir_instr_type_intrinsic
)
209 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
211 switch (intrin
->intrinsic
) {
212 case nir_intrinsic_load_first_vertex
:
213 case nir_intrinsic_load_base_instance
:
214 case nir_intrinsic_load_vertex_id_zero_base
:
215 case nir_intrinsic_load_instance_id
:
216 case nir_intrinsic_load_is_indexed_draw
:
217 case nir_intrinsic_load_draw_id
: {
218 b
.cursor
= nir_after_instr(&intrin
->instr
);
220 /* gl_VertexID and friends are stored by the VF as the last
221 * vertex element. We convert them to load_input intrinsics at
222 * the right location.
224 nir_intrinsic_instr
*load
=
225 nir_intrinsic_instr_create(nir
, nir_intrinsic_load_input
);
226 load
->src
[0] = nir_src_for_ssa(nir_imm_int(&b
, 0));
228 nir_intrinsic_set_base(load
, num_inputs
);
229 switch (intrin
->intrinsic
) {
230 case nir_intrinsic_load_first_vertex
:
231 nir_intrinsic_set_component(load
, 0);
233 case nir_intrinsic_load_base_instance
:
234 nir_intrinsic_set_component(load
, 1);
236 case nir_intrinsic_load_vertex_id_zero_base
:
237 nir_intrinsic_set_component(load
, 2);
239 case nir_intrinsic_load_instance_id
:
240 nir_intrinsic_set_component(load
, 3);
242 case nir_intrinsic_load_draw_id
:
243 case nir_intrinsic_load_is_indexed_draw
:
244 /* gl_DrawID and IsIndexedDraw are stored right after
245 * gl_VertexID and friends if any of them exist.
247 nir_intrinsic_set_base(load
, num_inputs
+ has_sgvs
);
248 if (intrin
->intrinsic
== nir_intrinsic_load_draw_id
)
249 nir_intrinsic_set_component(load
, 0);
251 nir_intrinsic_set_component(load
, 1);
254 unreachable("Invalid system value intrinsic");
257 load
->num_components
= 1;
258 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 1, 32, NULL
);
259 nir_builder_instr_insert(&b
, &load
->instr
);
261 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
262 nir_src_for_ssa(&load
->dest
.ssa
));
263 nir_instr_remove(&intrin
->instr
);
267 case nir_intrinsic_load_input
: {
268 /* Attributes come in a contiguous block, ordered by their
269 * gl_vert_attrib value. That means we can compute the slot
270 * number for an attribute by masking out the enabled attributes
271 * before it and counting the bits.
273 int attr
= nir_intrinsic_base(intrin
);
274 int slot
= util_bitcount64(nir
->info
.inputs_read
&
275 BITFIELD64_MASK(attr
));
276 nir_intrinsic_set_base(intrin
, slot
);
281 break; /* Nothing to do */
289 brw_nir_lower_vue_inputs(nir_shader
*nir
,
290 const struct brw_vue_map
*vue_map
)
292 nir_foreach_shader_in_variable(var
, nir
)
293 var
->data
.driver_location
= var
->data
.location
;
295 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
296 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
,
297 nir_lower_io_lower_64bit_to_32
);
299 /* This pass needs actual constants */
300 nir_opt_constant_folding(nir
);
302 nir_io_add_const_offset_to_base(nir
, nir_var_shader_in
);
304 nir_foreach_function(function
, nir
) {
308 nir_foreach_block(block
, function
->impl
) {
309 nir_foreach_instr(instr
, block
) {
310 if (instr
->type
!= nir_instr_type_intrinsic
)
313 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
315 if (intrin
->intrinsic
== nir_intrinsic_load_input
||
316 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
) {
317 /* Offset 0 is the VUE header, which contains
318 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
319 * VARYING_SLOT_PSIZ [.w].
321 int varying
= nir_intrinsic_base(intrin
);
324 case VARYING_SLOT_PSIZ
:
325 nir_intrinsic_set_base(intrin
, 0);
326 nir_intrinsic_set_component(intrin
, 3);
330 vue_slot
= vue_map
->varying_to_slot
[varying
];
331 assert(vue_slot
!= -1);
332 nir_intrinsic_set_base(intrin
, vue_slot
);
342 brw_nir_lower_tes_inputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
)
344 nir_foreach_shader_in_variable(var
, nir
)
345 var
->data
.driver_location
= var
->data
.location
;
347 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
,
348 nir_lower_io_lower_64bit_to_32
);
350 /* This pass needs actual constants */
351 nir_opt_constant_folding(nir
);
353 nir_io_add_const_offset_to_base(nir
, nir_var_shader_in
);
355 nir_foreach_function(function
, nir
) {
356 if (function
->impl
) {
358 nir_builder_init(&b
, function
->impl
);
359 nir_foreach_block(block
, function
->impl
) {
360 remap_patch_urb_offsets(block
, &b
, vue_map
,
361 nir
->info
.tess
.primitive_mode
);
368 brw_nir_lower_fs_inputs(nir_shader
*nir
,
369 const struct gen_device_info
*devinfo
,
370 const struct brw_wm_prog_key
*key
)
372 nir_foreach_shader_in_variable(var
, nir
) {
373 var
->data
.driver_location
= var
->data
.location
;
375 /* Apply default interpolation mode.
377 * Everything defaults to smooth except for the legacy GL color
378 * built-in variables, which might be flat depending on API state.
380 if (var
->data
.interpolation
== INTERP_MODE_NONE
) {
381 const bool flat
= key
->flat_shade
&&
382 (var
->data
.location
== VARYING_SLOT_COL0
||
383 var
->data
.location
== VARYING_SLOT_COL1
);
385 var
->data
.interpolation
= flat
? INTERP_MODE_FLAT
386 : INTERP_MODE_SMOOTH
;
389 /* On Ironlake and below, there is only one interpolation mode.
390 * Centroid interpolation doesn't mean anything on this hardware --
391 * there is no multisampling.
393 if (devinfo
->gen
< 6) {
394 var
->data
.centroid
= false;
395 var
->data
.sample
= false;
399 nir_lower_io_options lower_io_options
= nir_lower_io_lower_64bit_to_32
;
400 if (key
->persample_interp
)
401 lower_io_options
|= nir_lower_io_force_sample_interpolation
;
403 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, lower_io_options
);
404 if (devinfo
->gen
>= 11)
405 nir_lower_interpolation(nir
, ~0);
407 /* This pass needs actual constants */
408 nir_opt_constant_folding(nir
);
410 nir_io_add_const_offset_to_base(nir
, nir_var_shader_in
);
414 brw_nir_lower_vue_outputs(nir_shader
*nir
)
416 nir_foreach_shader_out_variable(var
, nir
) {
417 var
->data
.driver_location
= var
->data
.location
;
420 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
,
421 nir_lower_io_lower_64bit_to_32
);
425 brw_nir_lower_tcs_outputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
,
426 GLenum tes_primitive_mode
)
428 nir_foreach_shader_out_variable(var
, nir
) {
429 var
->data
.driver_location
= var
->data
.location
;
432 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
,
433 nir_lower_io_lower_64bit_to_32
);
435 /* This pass needs actual constants */
436 nir_opt_constant_folding(nir
);
438 nir_io_add_const_offset_to_base(nir
, nir_var_shader_out
);
440 nir_foreach_function(function
, nir
) {
441 if (function
->impl
) {
443 nir_builder_init(&b
, function
->impl
);
444 nir_foreach_block(block
, function
->impl
) {
445 remap_patch_urb_offsets(block
, &b
, vue_map
, tes_primitive_mode
);
452 brw_nir_lower_fs_outputs(nir_shader
*nir
)
454 nir_foreach_shader_out_variable(var
, nir
) {
455 var
->data
.driver_location
=
456 SET_FIELD(var
->data
.index
, BRW_NIR_FRAG_OUTPUT_INDEX
) |
457 SET_FIELD(var
->data
.location
, BRW_NIR_FRAG_OUTPUT_LOCATION
);
460 nir_lower_io(nir
, nir_var_shader_out
, type_size_dvec4
, 0);
463 #define OPT(pass, ...) ({ \
464 bool this_progress = false; \
465 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
471 static nir_variable_mode
472 brw_nir_no_indirect_mask(const struct brw_compiler
*compiler
,
473 gl_shader_stage stage
)
475 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
476 const bool is_scalar
= compiler
->scalar_stage
[stage
];
477 nir_variable_mode indirect_mask
= 0;
480 case MESA_SHADER_VERTEX
:
481 case MESA_SHADER_FRAGMENT
:
482 indirect_mask
|= nir_var_shader_in
;
485 case MESA_SHADER_GEOMETRY
:
487 indirect_mask
|= nir_var_shader_in
;
491 /* Everything else can handle indirect inputs */
495 if (is_scalar
&& stage
!= MESA_SHADER_TESS_CTRL
)
496 indirect_mask
|= nir_var_shader_out
;
498 /* On HSW+, we allow indirects in scalar shaders. They get implemented
499 * using nir_lower_vars_to_explicit_types and nir_lower_explicit_io in
500 * brw_postprocess_nir.
502 * We haven't plumbed through the indirect scratch messages on gen6 or
503 * earlier so doing indirects via scratch doesn't work there. On gen7 and
504 * earlier the scratch space size is limited to 12kB. If we allowed
505 * indirects as scratch all the time, we may easily exceed this limit
506 * without having any fallback.
508 if (is_scalar
&& devinfo
->gen
<= 7 && !devinfo
->is_haswell
)
509 indirect_mask
|= nir_var_function_temp
;
511 return indirect_mask
;
515 brw_nir_optimize(nir_shader
*nir
, const struct brw_compiler
*compiler
,
516 bool is_scalar
, bool allow_copies
)
518 nir_variable_mode loop_indirect_mask
=
519 brw_nir_no_indirect_mask(compiler
, nir
->info
.stage
);
521 /* We can handle indirects via scratch messages. However, they are
522 * expensive so we'd rather not if we can avoid it. Have loop unrolling
523 * try to get rid of them.
526 loop_indirect_mask
|= nir_var_function_temp
;
529 unsigned lower_flrp
=
530 (nir
->options
->lower_flrp16
? 16 : 0) |
531 (nir
->options
->lower_flrp32
? 32 : 0) |
532 (nir
->options
->lower_flrp64
? 64 : 0);
536 OPT(nir_split_array_vars
, nir_var_function_temp
);
537 OPT(nir_shrink_vec_array_vars
, nir_var_function_temp
);
539 OPT(nir_lower_vars_to_ssa
);
541 /* Only run this pass in the first call to brw_nir_optimize. Later
542 * calls assume that we've lowered away any copy_deref instructions
543 * and we don't want to introduce any more.
545 OPT(nir_opt_find_array_copies
);
547 OPT(nir_opt_copy_prop_vars
);
548 OPT(nir_opt_dead_write_vars
);
549 OPT(nir_opt_combine_stores
, nir_var_all
);
552 OPT(nir_lower_alu_to_scalar
, NULL
, NULL
);
554 OPT(nir_opt_shrink_vectors
);
560 OPT(nir_lower_phis_to_scalar
);
566 OPT(nir_opt_combine_stores
, nir_var_all
);
568 /* Passing 0 to the peephole select pass causes it to convert
569 * if-statements that contain only move instructions in the branches
570 * regardless of the count.
572 * Passing 1 to the peephole select pass causes it to convert
573 * if-statements that contain at most a single ALU instruction (total)
574 * in both branches. Before Gen6, some math instructions were
575 * prohibitively expensive and the results of compare operations need an
576 * extra resolve step. For these reasons, this pass is more harmful
577 * than good on those platforms.
579 * For indirect loads of uniforms (push constants), we assume that array
580 * indices will nearly always be in bounds and the cost of the load is
581 * low. Therefore there shouldn't be a performance benefit to avoid it.
582 * However, in vec4 tessellation shaders, these loads operate by
583 * actually pulling from memory.
585 const bool is_vec4_tessellation
= !is_scalar
&&
586 (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
||
587 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
);
588 OPT(nir_opt_peephole_select
, 0, !is_vec4_tessellation
, false);
589 OPT(nir_opt_peephole_select
, 8, !is_vec4_tessellation
,
590 compiler
->devinfo
->gen
>= 6);
592 OPT(nir_opt_intrinsics
);
593 OPT(nir_opt_idiv_const
, 32);
594 OPT(nir_opt_algebraic
);
595 OPT(nir_opt_constant_folding
);
597 if (lower_flrp
!= 0) {
598 if (OPT(nir_lower_flrp
,
600 false /* always_precise */,
601 compiler
->devinfo
->gen
>= 6)) {
602 OPT(nir_opt_constant_folding
);
605 /* Nothing should rematerialize any flrps, so we only need to do this
611 OPT(nir_opt_dead_cf
);
612 if (OPT(nir_opt_trivial_continues
)) {
613 /* If nir_opt_trivial_continues makes progress, then we need to clean
614 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
620 OPT(nir_opt_if
, false);
621 OPT(nir_opt_conditional_discard
);
622 if (nir
->options
->max_unroll_iterations
!= 0) {
623 OPT(nir_opt_loop_unroll
, loop_indirect_mask
);
625 OPT(nir_opt_remove_phis
);
630 /* Workaround Gfxbench unused local sampler variable which will trigger an
631 * assert in the opt_large_constants pass.
633 OPT(nir_remove_dead_variables
, nir_var_function_temp
, NULL
);
637 lower_bit_size_callback(const nir_alu_instr
*alu
, UNUSED
void *data
)
639 assert(alu
->dest
.dest
.is_ssa
);
640 if (alu
->dest
.dest
.ssa
.bit_size
>= 32)
643 const struct brw_compiler
*compiler
= (const struct brw_compiler
*) data
;
654 case nir_op_fround_even
:
665 return compiler
->devinfo
->gen
< 9 ? 32 : 0;
671 /* Does some simple lowering and runs the standard suite of optimizations
673 * This is intended to be called more-or-less directly after you get the
674 * shader out of GLSL or some other source. While it is geared towards i965,
675 * it is not at all generator-specific except for the is_scalar flag. Even
676 * there, it is safe to call with is_scalar = false for a shader that is
677 * intended for the FS backend as long as nir_optimize is called again with
678 * is_scalar = true to scalarize everything prior to code gen.
681 brw_preprocess_nir(const struct brw_compiler
*compiler
, nir_shader
*nir
,
682 const nir_shader
*softfp64
)
684 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
685 UNUSED
bool progress
; /* Written by OPT */
687 const bool is_scalar
= compiler
->scalar_stage
[nir
->info
.stage
];
690 OPT(nir_lower_alu_to_scalar
, NULL
, NULL
);
693 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
)
694 OPT(nir_lower_gs_intrinsics
, false);
696 /* See also brw_nir_trig_workarounds.py */
697 if (compiler
->precise_trig
&&
698 !(devinfo
->gen
>= 10 || devinfo
->is_kabylake
))
699 OPT(brw_nir_apply_trig_workarounds
);
701 if (devinfo
->gen
>= 12)
702 OPT(brw_nir_clamp_image_1d_2d_array_sizes
);
704 static const nir_lower_tex_options tex_options
= {
706 .lower_txf_offset
= true,
707 .lower_rect_offset
= true,
708 .lower_tex_without_implicit_lod
= true,
709 .lower_txd_cube_map
= true,
710 .lower_txb_shadow_clamp
= true,
711 .lower_txd_shadow_clamp
= true,
712 .lower_txd_offset_clamp
= true,
713 .lower_tg4_offsets
= true,
716 OPT(nir_lower_tex
, &tex_options
);
717 OPT(nir_normalize_cubemap_coords
);
719 OPT(nir_lower_global_vars_to_local
);
721 OPT(nir_split_var_copies
);
722 OPT(nir_split_struct_vars
, nir_var_function_temp
);
724 brw_nir_optimize(nir
, compiler
, is_scalar
, true);
726 OPT(nir_lower_doubles
, softfp64
, nir
->options
->lower_doubles_options
);
727 OPT(nir_lower_int64
);
729 OPT(nir_lower_bit_size
, lower_bit_size_callback
, (void *)compiler
);
732 OPT(nir_lower_load_const_to_scalar
);
735 /* Lower a bunch of stuff */
736 OPT(nir_lower_var_copies
);
738 /* This needs to be run after the first optimization pass but before we
739 * lower indirect derefs away
741 if (compiler
->supports_shader_constants
) {
742 OPT(nir_opt_large_constants
, NULL
, 32);
745 OPT(nir_lower_system_values
);
746 OPT(nir_lower_compute_system_values
, NULL
);
748 const nir_lower_subgroups_options subgroups_options
= {
749 .ballot_bit_size
= 32,
750 .lower_to_scalar
= true,
751 .lower_vote_trivial
= !is_scalar
,
752 .lower_shuffle
= true,
753 .lower_quad_broadcast_dynamic
= true,
755 OPT(nir_lower_subgroups
, &subgroups_options
);
757 OPT(nir_lower_clip_cull_distance_arrays
);
759 nir_variable_mode indirect_mask
=
760 brw_nir_no_indirect_mask(compiler
, nir
->info
.stage
);
761 OPT(nir_lower_indirect_derefs
, indirect_mask
, UINT32_MAX
);
763 /* Even in cases where we can handle indirect temporaries via scratch, we
764 * it can still be expensive. Lower indirects on small arrays to
765 * conditional load/stores.
767 * The threshold of 16 was chosen semi-arbitrarily. The idea is that an
768 * indirect on an array of 16 elements is about 30 instructions at which
769 * point, you may be better off doing a send. With a SIMD8 program, 16
770 * floats is 1/8 of the entire register file. Any array larger than that
771 * is likely to cause pressure issues. Also, this value is sufficiently
772 * high that the benchmarks known to suffer from large temporary array
773 * issues are helped but nothing else in shader-db is hurt except for maybe
774 * that one kerbal space program shader.
776 if (is_scalar
&& !(indirect_mask
& nir_var_function_temp
))
777 OPT(nir_lower_indirect_derefs
, nir_var_function_temp
, 16);
779 /* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
780 * SSBOs, our back-end is capable of loading an entire vec4 at a time and
781 * we would like to take advantage of that whenever possible regardless of
782 * whether or not the app gives us full loads. This should allow the
783 * optimizer to combine UBO and SSBO load operations and save us some send
786 OPT(nir_lower_array_deref_of_vec
,
787 nir_var_mem_ubo
| nir_var_mem_ssbo
,
788 nir_lower_direct_array_deref_of_vec_load
);
790 /* Get rid of split copies */
791 brw_nir_optimize(nir
, compiler
, is_scalar
, false);
795 brw_nir_link_shaders(const struct brw_compiler
*compiler
,
796 nir_shader
*producer
, nir_shader
*consumer
)
798 nir_lower_io_arrays_to_elements(producer
, consumer
);
799 nir_validate_shader(producer
, "after nir_lower_io_arrays_to_elements");
800 nir_validate_shader(consumer
, "after nir_lower_io_arrays_to_elements");
802 const bool p_is_scalar
= compiler
->scalar_stage
[producer
->info
.stage
];
803 const bool c_is_scalar
= compiler
->scalar_stage
[consumer
->info
.stage
];
805 if (p_is_scalar
&& c_is_scalar
) {
806 NIR_PASS_V(producer
, nir_lower_io_to_scalar_early
, nir_var_shader_out
);
807 NIR_PASS_V(consumer
, nir_lower_io_to_scalar_early
, nir_var_shader_in
);
808 brw_nir_optimize(producer
, compiler
, p_is_scalar
, false);
809 brw_nir_optimize(consumer
, compiler
, c_is_scalar
, false);
812 if (nir_link_opt_varyings(producer
, consumer
))
813 brw_nir_optimize(consumer
, compiler
, c_is_scalar
, false);
815 NIR_PASS_V(producer
, nir_remove_dead_variables
, nir_var_shader_out
, NULL
);
816 NIR_PASS_V(consumer
, nir_remove_dead_variables
, nir_var_shader_in
, NULL
);
818 if (nir_remove_unused_varyings(producer
, consumer
)) {
819 NIR_PASS_V(producer
, nir_lower_global_vars_to_local
);
820 NIR_PASS_V(consumer
, nir_lower_global_vars_to_local
);
822 /* The backend might not be able to handle indirects on
823 * temporaries so we need to lower indirects on any of the
824 * varyings we have demoted here.
826 NIR_PASS_V(producer
, nir_lower_indirect_derefs
,
827 brw_nir_no_indirect_mask(compiler
, producer
->info
.stage
),
829 NIR_PASS_V(consumer
, nir_lower_indirect_derefs
,
830 brw_nir_no_indirect_mask(compiler
, consumer
->info
.stage
),
833 brw_nir_optimize(producer
, compiler
, p_is_scalar
, false);
834 brw_nir_optimize(consumer
, compiler
, c_is_scalar
, false);
837 NIR_PASS_V(producer
, nir_lower_io_to_vector
, nir_var_shader_out
);
838 NIR_PASS_V(producer
, nir_opt_combine_stores
, nir_var_shader_out
);
839 NIR_PASS_V(consumer
, nir_lower_io_to_vector
, nir_var_shader_in
);
841 if (producer
->info
.stage
!= MESA_SHADER_TESS_CTRL
) {
842 /* Calling lower_io_to_vector creates output variable writes with
843 * write-masks. On non-TCS outputs, the back-end can't handle it and we
844 * need to call nir_lower_io_to_temporaries to get rid of them. This,
845 * in turn, creates temporary variables and extra copy_deref intrinsics
846 * that we need to clean up.
848 NIR_PASS_V(producer
, nir_lower_io_to_temporaries
,
849 nir_shader_get_entrypoint(producer
), true, false);
850 NIR_PASS_V(producer
, nir_lower_global_vars_to_local
);
851 NIR_PASS_V(producer
, nir_split_var_copies
);
852 NIR_PASS_V(producer
, nir_lower_var_copies
);
857 brw_nir_should_vectorize_mem(unsigned align
, unsigned bit_size
,
858 unsigned num_components
, unsigned high_offset
,
859 nir_intrinsic_instr
*low
,
860 nir_intrinsic_instr
*high
)
862 /* Don't combine things to generate 64-bit loads/stores. We have to split
863 * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
864 * we don't want to make a mess for the back-end.
869 /* We can handle at most a vec4 right now. Anything bigger would get
870 * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
872 if (num_components
> 4)
875 if (align
< bit_size
/ 8)
882 bool combine_all_barriers(nir_intrinsic_instr
*a
,
883 nir_intrinsic_instr
*b
,
886 /* Translation to backend IR will get rid of modes we don't care about, so
887 * no harm in always combining them.
889 * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
890 * scheduling so that it can take advantage of the different semantics.
892 nir_intrinsic_set_memory_modes(a
, nir_intrinsic_memory_modes(a
) |
893 nir_intrinsic_memory_modes(b
));
894 nir_intrinsic_set_memory_semantics(a
, nir_intrinsic_memory_semantics(a
) |
895 nir_intrinsic_memory_semantics(b
));
896 nir_intrinsic_set_memory_scope(a
, MAX2(nir_intrinsic_memory_scope(a
),
897 nir_intrinsic_memory_scope(b
)));
902 brw_vectorize_lower_mem_access(nir_shader
*nir
,
903 const struct brw_compiler
*compiler
,
906 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
907 bool progress
= false;
910 OPT(nir_opt_load_store_vectorize
,
911 nir_var_mem_ubo
| nir_var_mem_ssbo
|
912 nir_var_mem_global
| nir_var_mem_shared
,
913 brw_nir_should_vectorize_mem
,
914 (nir_variable_mode
)0);
917 OPT(brw_nir_lower_mem_access_bit_sizes
, devinfo
);
926 OPT(nir_opt_algebraic
);
927 OPT(nir_opt_constant_folding
);
932 nir_shader_has_local_variables(const nir_shader
*nir
)
934 nir_foreach_function(func
, nir
) {
935 if (func
->impl
&& !exec_list_is_empty(&func
->impl
->locals
))
942 /* Prepare the given shader for codegen
944 * This function is intended to be called right before going into the actual
945 * backend and is highly backend-specific. Also, once this function has been
946 * called on a shader, it will no longer be in SSA form so most optimizations
950 brw_postprocess_nir(nir_shader
*nir
, const struct brw_compiler
*compiler
,
953 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
955 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(nir
->info
.stage
));
957 UNUSED
bool progress
; /* Written by OPT */
959 OPT(brw_nir_lower_scoped_barriers
);
960 OPT(nir_opt_combine_memory_barriers
, combine_all_barriers
, NULL
);
964 OPT(nir_opt_algebraic_before_ffma
);
967 brw_nir_optimize(nir
, compiler
, is_scalar
, false);
969 if (is_scalar
&& nir_shader_has_local_variables(nir
)) {
970 OPT(nir_lower_vars_to_explicit_types
, nir_var_function_temp
,
971 glsl_get_natural_size_align_bytes
);
972 OPT(nir_lower_explicit_io
, nir_var_function_temp
,
973 nir_address_format_32bit_offset
);
974 brw_nir_optimize(nir
, compiler
, is_scalar
, false);
977 brw_vectorize_lower_mem_access(nir
, compiler
, is_scalar
);
979 if (OPT(nir_lower_int64
))
980 brw_nir_optimize(nir
, compiler
, is_scalar
, false);
982 if (devinfo
->gen
>= 6) {
983 /* Try and fuse multiply-adds */
984 OPT(brw_nir_opt_peephole_ffma
);
987 if (OPT(nir_opt_comparison_pre
)) {
992 /* Do the select peepehole again. nir_opt_comparison_pre (combined with
993 * the other optimization passes) will have removed at least one
994 * instruction from one of the branches of the if-statement, so now it
995 * might be under the threshold of conversion to bcsel.
997 * See brw_nir_optimize for the explanation of is_vec4_tessellation.
999 const bool is_vec4_tessellation
= !is_scalar
&&
1000 (nir
->info
.stage
== MESA_SHADER_TESS_CTRL
||
1001 nir
->info
.stage
== MESA_SHADER_TESS_EVAL
);
1002 OPT(nir_opt_peephole_select
, 0, is_vec4_tessellation
, false);
1003 OPT(nir_opt_peephole_select
, 1, is_vec4_tessellation
,
1004 compiler
->devinfo
->gen
>= 6);
1009 if (OPT(nir_opt_algebraic_late
)) {
1010 /* At this late stage, anything that makes more constants will wreak
1011 * havok on the vec4 backend. The handling of constants in the vec4
1012 * backend is not good.
1015 OPT(nir_opt_constant_folding
);
1024 OPT(brw_nir_lower_conversions
);
1027 OPT(nir_lower_alu_to_scalar
, NULL
, NULL
);
1029 while (OPT(nir_opt_algebraic_distribute_src_mods
)) {
1037 OPT(nir_opt_move
, nir_move_comparisons
);
1039 OPT(nir_lower_bool_to_int32
);
1043 OPT(nir_lower_locals_to_regs
);
1045 if (unlikely(debug_enabled
)) {
1046 /* Re-index SSA defs so we print more sensible numbers. */
1047 nir_foreach_function(function
, nir
) {
1049 nir_index_ssa_defs(function
->impl
);
1052 fprintf(stderr
, "NIR (SSA form) for %s shader:\n",
1053 _mesa_shader_stage_to_string(nir
->info
.stage
));
1054 nir_print_shader(nir
, stderr
);
1057 OPT(nir_convert_from_ssa
, true);
1060 OPT(nir_move_vec_src_uses_to_dest
);
1061 OPT(nir_lower_vec_to_movs
);
1066 if (OPT(nir_opt_rematerialize_compares
))
1069 /* This is the last pass we run before we start emitting stuff. It
1070 * determines when we need to insert boolean resolves on Gen <= 5. We
1071 * run it last because it stashes data in instr->pass_flags and we don't
1072 * want that to be squashed by other NIR passes.
1074 if (devinfo
->gen
<= 5)
1075 brw_nir_analyze_boolean_resolves(nir
);
1079 if (unlikely(debug_enabled
)) {
1080 fprintf(stderr
, "NIR (final form) for %s shader:\n",
1081 _mesa_shader_stage_to_string(nir
->info
.stage
));
1082 nir_print_shader(nir
, stderr
);
1087 brw_nir_apply_sampler_key(nir_shader
*nir
,
1088 const struct brw_compiler
*compiler
,
1089 const struct brw_sampler_prog_key_data
*key_tex
)
1091 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
1092 nir_lower_tex_options tex_options
= {
1093 .lower_txd_clamp_bindless_sampler
= true,
1094 .lower_txd_clamp_if_sampler_index_not_lt_16
= true,
1097 /* Iron Lake and prior require lowering of all rectangle textures */
1098 if (devinfo
->gen
< 6)
1099 tex_options
.lower_rect
= true;
1101 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
1102 if (devinfo
->gen
< 8) {
1103 tex_options
.saturate_s
= key_tex
->gl_clamp_mask
[0];
1104 tex_options
.saturate_t
= key_tex
->gl_clamp_mask
[1];
1105 tex_options
.saturate_r
= key_tex
->gl_clamp_mask
[2];
1108 /* Prior to Haswell, we have to fake texture swizzle */
1109 for (unsigned s
= 0; s
< MAX_SAMPLERS
; s
++) {
1110 if (key_tex
->swizzles
[s
] == SWIZZLE_NOOP
)
1113 tex_options
.swizzle_result
|= BITFIELD_BIT(s
);
1114 for (unsigned c
= 0; c
< 4; c
++)
1115 tex_options
.swizzles
[s
][c
] = GET_SWZ(key_tex
->swizzles
[s
], c
);
1118 /* Prior to Haswell, we have to lower gradients on shadow samplers */
1119 tex_options
.lower_txd_shadow
= devinfo
->gen
< 8 && !devinfo
->is_haswell
;
1121 tex_options
.lower_y_uv_external
= key_tex
->y_uv_image_mask
;
1122 tex_options
.lower_y_u_v_external
= key_tex
->y_u_v_image_mask
;
1123 tex_options
.lower_yx_xuxv_external
= key_tex
->yx_xuxv_image_mask
;
1124 tex_options
.lower_xy_uxvx_external
= key_tex
->xy_uxvx_image_mask
;
1125 tex_options
.lower_ayuv_external
= key_tex
->ayuv_image_mask
;
1126 tex_options
.lower_xyuv_external
= key_tex
->xyuv_image_mask
;
1127 tex_options
.bt709_external
= key_tex
->bt709_mask
;
1128 tex_options
.bt2020_external
= key_tex
->bt2020_mask
;
1130 /* Setup array of scaling factors for each texture. */
1131 memcpy(&tex_options
.scale_factors
, &key_tex
->scale_factors
,
1132 sizeof(tex_options
.scale_factors
));
1134 return nir_lower_tex(nir
, &tex_options
);
1138 get_subgroup_size(gl_shader_stage stage
,
1139 const struct brw_base_prog_key
*key
,
1140 unsigned max_subgroup_size
)
1142 switch (key
->subgroup_size_type
) {
1143 case BRW_SUBGROUP_SIZE_API_CONSTANT
:
1144 /* We have to use the global constant size. */
1145 return BRW_SUBGROUP_SIZE
;
1147 case BRW_SUBGROUP_SIZE_UNIFORM
:
1148 /* It has to be uniform across all invocations but can vary per stage
1149 * if we want. This gives us a bit more freedom.
1151 * For compute, brw_nir_apply_key is called per-dispatch-width so this
1152 * is the actual subgroup size and not a maximum. However, we only
1153 * invoke one size of any given compute shader so it's still guaranteed
1154 * to be uniform across invocations.
1156 return max_subgroup_size
;
1158 case BRW_SUBGROUP_SIZE_VARYING
:
1159 /* The subgroup size is allowed to be fully varying. For geometry
1160 * stages, we know it's always 8 which is max_subgroup_size so we can
1161 * return that. For compute, brw_nir_apply_key is called once per
1162 * dispatch-width so max_subgroup_size is the real subgroup size.
1164 * For fragment, we return 0 and let it fall through to the back-end
1165 * compiler. This means we can't optimize based on subgroup size but
1166 * that's a risk the client took when it asked for a varying subgroup
1169 return stage
== MESA_SHADER_FRAGMENT
? 0 : max_subgroup_size
;
1171 case BRW_SUBGROUP_SIZE_REQUIRE_8
:
1172 case BRW_SUBGROUP_SIZE_REQUIRE_16
:
1173 case BRW_SUBGROUP_SIZE_REQUIRE_32
:
1174 assert(stage
== MESA_SHADER_COMPUTE
);
1175 /* These enum values are expressly chosen to be equal to the subgroup
1176 * size that they require.
1178 return key
->subgroup_size_type
;
1181 unreachable("Invalid subgroup size type");
1185 brw_nir_apply_key(nir_shader
*nir
,
1186 const struct brw_compiler
*compiler
,
1187 const struct brw_base_prog_key
*key
,
1188 unsigned max_subgroup_size
,
1191 bool progress
= false;
1193 OPT(brw_nir_apply_sampler_key
, compiler
, &key
->tex
);
1195 const nir_lower_subgroups_options subgroups_options
= {
1196 .subgroup_size
= get_subgroup_size(nir
->info
.stage
, key
,
1198 .ballot_bit_size
= 32,
1199 .lower_subgroup_masks
= true,
1201 OPT(nir_lower_subgroups
, &subgroups_options
);
1204 brw_nir_optimize(nir
, compiler
, is_scalar
, false);
1207 enum brw_conditional_mod
1208 brw_cmod_for_nir_comparison(nir_op op
)
1217 return BRW_CONDITIONAL_L
;
1225 return BRW_CONDITIONAL_GE
;
1231 case nir_op_b32all_fequal2
:
1232 case nir_op_b32all_iequal2
:
1233 case nir_op_b32all_fequal3
:
1234 case nir_op_b32all_iequal3
:
1235 case nir_op_b32all_fequal4
:
1236 case nir_op_b32all_iequal4
:
1237 return BRW_CONDITIONAL_Z
;
1243 case nir_op_b32any_fnequal2
:
1244 case nir_op_b32any_inequal2
:
1245 case nir_op_b32any_fnequal3
:
1246 case nir_op_b32any_inequal3
:
1247 case nir_op_b32any_fnequal4
:
1248 case nir_op_b32any_inequal4
:
1249 return BRW_CONDITIONAL_NZ
;
1252 unreachable("Unsupported NIR comparison op");
1257 brw_aop_for_nir_intrinsic(const nir_intrinsic_instr
*atomic
)
1259 switch (atomic
->intrinsic
) {
1260 #define AOP_CASE(atom) \
1261 case nir_intrinsic_image_atomic_##atom: \
1262 case nir_intrinsic_bindless_image_atomic_##atom: \
1263 case nir_intrinsic_ssbo_atomic_##atom: \
1264 case nir_intrinsic_shared_atomic_##atom: \
1265 case nir_intrinsic_global_atomic_##atom
1269 switch (atomic
->intrinsic
) {
1270 case nir_intrinsic_image_atomic_add
:
1271 case nir_intrinsic_bindless_image_atomic_add
:
1274 case nir_intrinsic_ssbo_atomic_add
:
1277 case nir_intrinsic_shared_atomic_add
:
1278 case nir_intrinsic_global_atomic_add
:
1282 unreachable("Invalid add atomic opcode");
1285 if (nir_src_is_const(atomic
->src
[src_idx
])) {
1286 int64_t add_val
= nir_src_as_int(atomic
->src
[src_idx
]);
1289 else if (add_val
== -1)
1295 AOP_CASE(imin
): return BRW_AOP_IMIN
;
1296 AOP_CASE(umin
): return BRW_AOP_UMIN
;
1297 AOP_CASE(imax
): return BRW_AOP_IMAX
;
1298 AOP_CASE(umax
): return BRW_AOP_UMAX
;
1299 AOP_CASE(and): return BRW_AOP_AND
;
1300 AOP_CASE(or): return BRW_AOP_OR
;
1301 AOP_CASE(xor): return BRW_AOP_XOR
;
1302 AOP_CASE(exchange
): return BRW_AOP_MOV
;
1303 AOP_CASE(comp_swap
): return BRW_AOP_CMPWR
;
1306 #define AOP_CASE(atom) \
1307 case nir_intrinsic_ssbo_atomic_##atom: \
1308 case nir_intrinsic_shared_atomic_##atom: \
1309 case nir_intrinsic_global_atomic_##atom
1311 AOP_CASE(fmin
): return BRW_AOP_FMIN
;
1312 AOP_CASE(fmax
): return BRW_AOP_FMAX
;
1313 AOP_CASE(fcomp_swap
): return BRW_AOP_FCMPWR
;
1318 unreachable("Unsupported NIR atomic intrinsic");
1323 brw_type_for_nir_type(const struct gen_device_info
*devinfo
, nir_alu_type type
)
1327 case nir_type_uint32
:
1328 return BRW_REGISTER_TYPE_UD
;
1331 case nir_type_bool32
:
1332 case nir_type_int32
:
1333 return BRW_REGISTER_TYPE_D
;
1334 case nir_type_float
:
1335 case nir_type_float32
:
1336 return BRW_REGISTER_TYPE_F
;
1337 case nir_type_float16
:
1338 return BRW_REGISTER_TYPE_HF
;
1339 case nir_type_float64
:
1340 return BRW_REGISTER_TYPE_DF
;
1341 case nir_type_int64
:
1342 return devinfo
->gen
< 8 ? BRW_REGISTER_TYPE_DF
: BRW_REGISTER_TYPE_Q
;
1343 case nir_type_uint64
:
1344 return devinfo
->gen
< 8 ? BRW_REGISTER_TYPE_DF
: BRW_REGISTER_TYPE_UQ
;
1345 case nir_type_int16
:
1346 return BRW_REGISTER_TYPE_W
;
1347 case nir_type_uint16
:
1348 return BRW_REGISTER_TYPE_UW
;
1350 return BRW_REGISTER_TYPE_B
;
1351 case nir_type_uint8
:
1352 return BRW_REGISTER_TYPE_UB
;
1354 unreachable("unknown type");
1357 return BRW_REGISTER_TYPE_F
;
1360 /* Returns the glsl_base_type corresponding to a nir_alu_type.
1361 * This is used by both brw_vec4_nir and brw_fs_nir.
1364 brw_glsl_base_type_for_nir_type(nir_alu_type type
)
1367 case nir_type_float
:
1368 case nir_type_float32
:
1369 return GLSL_TYPE_FLOAT
;
1371 case nir_type_float16
:
1372 return GLSL_TYPE_FLOAT16
;
1374 case nir_type_float64
:
1375 return GLSL_TYPE_DOUBLE
;
1378 case nir_type_int32
:
1379 return GLSL_TYPE_INT
;
1382 case nir_type_uint32
:
1383 return GLSL_TYPE_UINT
;
1385 case nir_type_int16
:
1386 return GLSL_TYPE_INT16
;
1388 case nir_type_uint16
:
1389 return GLSL_TYPE_UINT16
;
1392 unreachable("bad type");
1397 brw_nir_create_passthrough_tcs(void *mem_ctx
, const struct brw_compiler
*compiler
,
1398 const nir_shader_compiler_options
*options
,
1399 const struct brw_tcs_prog_key
*key
)
1402 nir_builder_init_simple_shader(&b
, mem_ctx
, MESA_SHADER_TESS_CTRL
,
1404 nir_shader
*nir
= b
.shader
;
1406 nir_intrinsic_instr
*load
;
1407 nir_intrinsic_instr
*store
;
1408 nir_ssa_def
*zero
= nir_imm_int(&b
, 0);
1409 nir_ssa_def
*invoc_id
= nir_load_invocation_id(&b
);
1411 nir
->info
.inputs_read
= key
->outputs_written
&
1412 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
1413 nir
->info
.outputs_written
= key
->outputs_written
;
1414 nir
->info
.tess
.tcs_vertices_out
= key
->input_vertices
;
1415 nir
->info
.name
= ralloc_strdup(nir
, "passthrough");
1416 nir
->num_uniforms
= 8 * sizeof(uint32_t);
1418 var
= nir_variable_create(nir
, nir_var_uniform
, glsl_vec4_type(), "hdr_0");
1419 var
->data
.location
= 0;
1420 var
= nir_variable_create(nir
, nir_var_uniform
, glsl_vec4_type(), "hdr_1");
1421 var
->data
.location
= 1;
1423 /* Write the patch URB header. */
1424 for (int i
= 0; i
<= 1; i
++) {
1425 load
= nir_intrinsic_instr_create(nir
, nir_intrinsic_load_uniform
);
1426 load
->num_components
= 4;
1427 load
->src
[0] = nir_src_for_ssa(zero
);
1428 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, 32, NULL
);
1429 nir_intrinsic_set_base(load
, i
* 4 * sizeof(uint32_t));
1430 nir_builder_instr_insert(&b
, &load
->instr
);
1432 store
= nir_intrinsic_instr_create(nir
, nir_intrinsic_store_output
);
1433 store
->num_components
= 4;
1434 store
->src
[0] = nir_src_for_ssa(&load
->dest
.ssa
);
1435 store
->src
[1] = nir_src_for_ssa(zero
);
1436 nir_intrinsic_set_base(store
, VARYING_SLOT_TESS_LEVEL_INNER
- i
);
1437 nir_intrinsic_set_write_mask(store
, WRITEMASK_XYZW
);
1438 nir_builder_instr_insert(&b
, &store
->instr
);
1441 /* Copy inputs to outputs. */
1442 uint64_t varyings
= nir
->info
.inputs_read
;
1444 while (varyings
!= 0) {
1445 const int varying
= ffsll(varyings
) - 1;
1447 load
= nir_intrinsic_instr_create(nir
,
1448 nir_intrinsic_load_per_vertex_input
);
1449 load
->num_components
= 4;
1450 load
->src
[0] = nir_src_for_ssa(invoc_id
);
1451 load
->src
[1] = nir_src_for_ssa(zero
);
1452 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 4, 32, NULL
);
1453 nir_intrinsic_set_base(load
, varying
);
1454 nir_builder_instr_insert(&b
, &load
->instr
);
1456 store
= nir_intrinsic_instr_create(nir
,
1457 nir_intrinsic_store_per_vertex_output
);
1458 store
->num_components
= 4;
1459 store
->src
[0] = nir_src_for_ssa(&load
->dest
.ssa
);
1460 store
->src
[1] = nir_src_for_ssa(invoc_id
);
1461 store
->src
[2] = nir_src_for_ssa(zero
);
1462 nir_intrinsic_set_base(store
, varying
);
1463 nir_intrinsic_set_write_mask(store
, WRITEMASK_XYZW
);
1464 nir_builder_instr_insert(&b
, &store
->instr
);
1466 varyings
&= ~BITFIELD64_BIT(varying
);
1469 nir_validate_shader(nir
, "in brw_nir_create_passthrough_tcs");
1471 brw_preprocess_nir(compiler
, nir
, NULL
);