#include "common/gen_debug.h"
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
+#include "util/u_math.h"
static bool
is_input(nir_intrinsic_instr *intrin)
void
brw_nir_lower_vs_inputs(nir_shader *nir,
- bool use_legacy_snorm_formula,
const uint8_t *vs_attrib_wa_flags)
{
/* Start with the location of the variable's base. */
add_const_offset_to_base(nir, nir_var_shader_in);
- brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula,
- vs_attrib_wa_flags);
+ brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
/* The last step is to remap VERT_ATTRIB_* to actual registers */
*/
const bool has_sgvs =
nir->info.system_values_read &
- (BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX) |
+ (BITFIELD64_BIT(SYSTEM_VALUE_FIRST_VERTEX) |
BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE) |
BITFIELD64_BIT(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) |
BITFIELD64_BIT(SYSTEM_VALUE_INSTANCE_ID));
- const unsigned num_inputs = _mesa_bitcount_64(nir->info.inputs_read);
+ const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
nir_foreach_function(function, nir) {
if (!function->impl)
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
- case nir_intrinsic_load_base_vertex:
+ case nir_intrinsic_load_first_vertex:
case nir_intrinsic_load_base_instance:
case nir_intrinsic_load_vertex_id_zero_base:
case nir_intrinsic_load_instance_id:
+ case nir_intrinsic_load_is_indexed_draw:
case nir_intrinsic_load_draw_id: {
b.cursor = nir_after_instr(&intrin->instr);
nir_intrinsic_set_base(load, num_inputs);
switch (intrin->intrinsic) {
- case nir_intrinsic_load_base_vertex:
+ case nir_intrinsic_load_first_vertex:
nir_intrinsic_set_component(load, 0);
break;
case nir_intrinsic_load_base_instance:
nir_intrinsic_set_component(load, 3);
break;
case nir_intrinsic_load_draw_id:
- /* gl_DrawID is stored right after gl_VertexID and friends
- * if any of them exist.
+ case nir_intrinsic_load_is_indexed_draw:
+ /* gl_DrawID and IsIndexedDraw are stored right after
+ * gl_VertexID and friends if any of them exist.
*/
nir_intrinsic_set_base(load, num_inputs + has_sgvs);
- nir_intrinsic_set_component(load, 0);
+ if (intrin->intrinsic == nir_intrinsic_load_draw_id)
+ nir_intrinsic_set_component(load, 0);
+ else
+ nir_intrinsic_set_component(load, 1);
break;
default:
unreachable("Invalid system value intrinsic");
* before it and counting the bits.
*/
int attr = nir_intrinsic_base(intrin);
- int slot = _mesa_bitcount_64(nir->info.inputs_read &
+ int slot = util_bitcount64(nir->info.inputs_read &
BITFIELD64_MASK(attr));
nir_intrinsic_set_base(intrin, slot);
break;
}
void
-brw_nir_lower_vue_outputs(nir_shader *nir,
- bool is_scalar)
+brw_nir_lower_vue_outputs(nir_shader *nir)
{
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location = var->data.location;
nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
}
-void
-brw_nir_lower_cs_shared(nir_shader *nir)
-{
- nir_assign_var_locations(&nir->shared, &nir->num_shared,
- type_size_scalar_bytes);
- nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0);
-}
-
#define OPT(pass, ...) ({ \
bool this_progress = false; \
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
this_progress; \
})
-nir_shader *
-brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
- bool is_scalar)
+static nir_variable_mode
+brw_nir_no_indirect_mask(const struct brw_compiler *compiler,
+ gl_shader_stage stage)
{
nir_variable_mode indirect_mask = 0;
- if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectInput)
+
+ if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
indirect_mask |= nir_var_shader_in;
- if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectOutput)
+ if (compiler->glsl_compiler_options[stage].EmitNoIndirectOutput)
indirect_mask |= nir_var_shader_out;
- if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectTemp)
- indirect_mask |= nir_var_local;
+ if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
+ indirect_mask |= nir_var_function_temp;
+
+ return indirect_mask;
+}
+
+nir_shader *
+brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
+ bool is_scalar, bool allow_copies)
+{
+ nir_variable_mode indirect_mask =
+ brw_nir_no_indirect_mask(compiler, nir->info.stage);
bool progress;
do {
progress = false;
+ OPT(nir_split_array_vars, nir_var_function_temp);
+ OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
+ OPT(nir_opt_deref);
OPT(nir_lower_vars_to_ssa);
+ if (allow_copies) {
+ /* Only run this pass in the first call to brw_nir_optimize. Later
+ * calls assume that we've lowered away any copy_deref instructions
+ * and we don't want to introduce any more.
+ */
+ OPT(nir_opt_find_array_copies);
+ }
OPT(nir_opt_copy_prop_vars);
+ OPT(nir_opt_dead_write_vars);
+ OPT(nir_opt_combine_stores, nir_var_all);
if (is_scalar) {
OPT(nir_lower_alu_to_scalar);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
- OPT(nir_opt_peephole_select, 0);
+ OPT(nir_opt_combine_stores, nir_var_all);
+
+ /* Passing 0 to the peephole select pass causes it to convert
+ * if-statements that contain only move instructions in the branches
+ * regardless of the count.
+ *
+ * Passing 1 to the peephole select pass causes it to convert
+ * if-statements that contain at most a single ALU instruction (total)
+ * in both branches. Before Gen6, some math instructions were
+ * prohibitively expensive and the results of compare operations need an
+ * extra resolve step. For these reasons, this pass is more harmful
+ * than good on those platforms.
+ *
+ * For indirect loads of uniforms (push constants), we assume that array
+ * indices will nearly always be in bounds and the cost of the load is
+ * low. Therefore there shouldn't be a performance benefit to avoid it.
+ * However, in vec4 tessellation shaders, these loads operate by
+ * actually pulling from memory.
+ */
+ const bool is_vec4_tessellation = !is_scalar &&
+ (nir->info.stage == MESA_SHADER_TESS_CTRL ||
+ nir->info.stage == MESA_SHADER_TESS_EVAL);
+ OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
+ OPT(nir_opt_peephole_select, 1, !is_vec4_tessellation,
+ compiler->devinfo->gen >= 6);
+
OPT(nir_opt_intrinsics);
+ OPT(nir_opt_idiv_const, 32);
OPT(nir_opt_algebraic);
OPT(nir_opt_constant_folding);
OPT(nir_opt_dead_cf);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
}
- OPT(nir_opt_if);
+ OPT(nir_opt_if, false);
if (nir->options->max_unroll_iterations != 0) {
OPT(nir_opt_loop_unroll, indirect_mask);
}
OPT(nir_opt_remove_phis);
OPT(nir_opt_undef);
- OPT(nir_lower_doubles, nir_lower_drcp |
- nir_lower_dsqrt |
- nir_lower_drsq |
- nir_lower_dtrunc |
- nir_lower_dfloor |
- nir_lower_dceil |
- nir_lower_dfract |
- nir_lower_dround_even |
- nir_lower_dmod);
- OPT(nir_lower_64bit_pack);
+ OPT(nir_lower_pack);
} while (progress);
+ /* Workaround Gfxbench unused local sampler variable which will trigger an
+ * assert in the opt_large_constants pass.
+ */
+ OPT(nir_remove_dead_variables, nir_var_function_temp);
+
return nir;
}
+static unsigned
+lower_bit_size_callback(const nir_alu_instr *alu, UNUSED void *data)
+{
+ assert(alu->dest.dest.is_ssa);
+ if (alu->dest.dest.ssa.bit_size != 16)
+ return 0;
+
+ switch (alu->op) {
+ case nir_op_idiv:
+ case nir_op_imod:
+ case nir_op_irem:
+ case nir_op_udiv:
+ case nir_op_umod:
+ return 32;
+ default:
+ return 0;
+ }
+}
+
/* Does some simple lowering and runs the standard suite of optimizations
*
* This is intended to be called more-or-less directly after you get the
* is_scalar = true to scalarize everything prior to code gen.
*/
nir_shader *
-brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
+brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
+ const nir_shader *softfp64)
{
const struct gen_device_info *devinfo = compiler->devinfo;
UNUSED bool progress; /* Written by OPT */
const bool is_scalar = compiler->scalar_stage[nir->info.stage];
+ if (is_scalar) {
+ OPT(nir_lower_alu_to_scalar);
+ }
+
if (nir->info.stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
.lower_txf_offset = true,
.lower_rect_offset = true,
.lower_txd_cube_map = true,
+ .lower_txb_shadow_clamp = true,
+ .lower_txd_shadow_clamp = true,
+ .lower_txd_offset_clamp = true,
+ .lower_tg4_offsets = true,
};
OPT(nir_lower_tex, &tex_options);
OPT(nir_lower_global_vars_to_local);
OPT(nir_split_var_copies);
+ OPT(nir_split_struct_vars, nir_var_function_temp);
+
+ nir = brw_nir_optimize(nir, compiler, is_scalar, true);
+
+ bool lowered_64bit_ops = false;
+ do {
+ progress = false;
- nir = brw_nir_optimize(nir, compiler, is_scalar);
+ OPT(nir_lower_int64, nir->options->lower_int64_options);
+ OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
+
+ /* Necessary to lower add -> sub and div -> mul/rcp */
+ OPT(nir_opt_algebraic);
+
+ lowered_64bit_ops |= progress;
+ } while (progress);
+
+ /* This needs to be run after the first optimization pass but before we
+ * lower indirect derefs away
+ */
+ if (compiler->supports_shader_constants) {
+ OPT(nir_opt_large_constants, NULL, 32);
+ }
+
+ OPT(nir_lower_bit_size, lower_bit_size_callback, NULL);
if (is_scalar) {
OPT(nir_lower_load_const_to_scalar);
OPT(nir_lower_system_values);
const nir_lower_subgroups_options subgroups_options = {
+ .subgroup_size = BRW_SUBGROUP_SIZE,
+ .ballot_bit_size = 32,
.lower_to_scalar = true,
.lower_subgroup_masks = true,
.lower_vote_trivial = !is_scalar,
+ .lower_shuffle = true,
};
OPT(nir_lower_subgroups, &subgroups_options);
OPT(nir_lower_clip_cull_distance_arrays);
- nir_variable_mode indirect_mask = 0;
- if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectInput)
- indirect_mask |= nir_var_shader_in;
- if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectOutput)
- indirect_mask |= nir_var_shader_out;
- if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectTemp)
- indirect_mask |= nir_var_local;
+ nir_variable_mode indirect_mask =
+ brw_nir_no_indirect_mask(compiler, nir->info.stage);
+ OPT(nir_lower_indirect_derefs, indirect_mask);
- nir_lower_indirect_derefs(nir, indirect_mask);
-
- nir_lower_int64(nir, nir_lower_imul64 |
- nir_lower_isign64 |
- nir_lower_divmod64);
+ /* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
+ * SSBOs, our back-end is capable of loading an entire vec4 at a time and
+ * we would like to take advantage of that whenever possible regardless of
+ * whether or not the app gives us full loads. This should allow the
+ * optimizer to combine UBO and SSBO load operations and save us some send
+ * messages.
+ */
+ OPT(nir_lower_array_deref_of_vec,
+ nir_var_mem_ubo | nir_var_mem_ssbo,
+ nir_lower_direct_array_deref_of_vec_load);
/* Get rid of split copies */
- nir = brw_nir_optimize(nir, compiler, is_scalar);
-
- OPT(nir_remove_dead_variables, nir_var_local);
+ nir = brw_nir_optimize(nir, compiler, is_scalar, false);
return nir;
}
+void
+brw_nir_link_shaders(const struct brw_compiler *compiler,
+ nir_shader **producer, nir_shader **consumer)
+{
+ nir_lower_io_arrays_to_elements(*producer, *consumer);
+ nir_validate_shader(*producer, "after nir_lower_io_arrays_to_elements");
+ nir_validate_shader(*consumer, "after nir_lower_io_arrays_to_elements");
+
+ const bool p_is_scalar =
+ compiler->scalar_stage[(*producer)->info.stage];
+ const bool c_is_scalar =
+ compiler->scalar_stage[(*consumer)->info.stage];
+
+ if (p_is_scalar && c_is_scalar) {
+ NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
+ NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
+ *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
+ *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ }
+
+ if (nir_link_opt_varyings(*producer, *consumer))
+ *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+
+ NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
+ NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
+
+ if (nir_remove_unused_varyings(*producer, *consumer)) {
+ NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
+ NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
+
+ /* The backend might not be able to handle indirects on
+ * temporaries so we need to lower indirects on any of the
+ * varyings we have demoted here.
+ */
+ NIR_PASS_V(*producer, nir_lower_indirect_derefs,
+ brw_nir_no_indirect_mask(compiler, (*producer)->info.stage));
+ NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
+ brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
+
+ *producer = brw_nir_optimize(*producer, compiler, p_is_scalar, false);
+ *consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
+ }
+
+ NIR_PASS_V(*producer, nir_lower_io_to_vector, nir_var_shader_out);
+ NIR_PASS_V(*producer, nir_opt_combine_stores, nir_var_shader_out);
+ NIR_PASS_V(*consumer, nir_lower_io_to_vector, nir_var_shader_in);
+
+ if ((*producer)->info.stage != MESA_SHADER_TESS_CTRL) {
+ /* Calling lower_io_to_vector creates output variable writes with
+ * write-masks. On non-TCS outputs, the back-end can't handle it and we
+ * need to call nir_lower_io_to_temporaries to get rid of them. This,
+ * in turn, creates temporary variables and extra copy_deref intrinsics
+ * that we need to clean up.
+ */
+ NIR_PASS_V(*producer, nir_lower_io_to_temporaries,
+ nir_shader_get_entrypoint(*producer), true, false);
+ NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
+ NIR_PASS_V(*producer, nir_split_var_copies);
+ NIR_PASS_V(*producer, nir_lower_var_copies);
+ }
+}
+
/* Prepare the given shader for codegen
*
* This function is intended to be called right before going into the actual
UNUSED bool progress; /* Written by OPT */
+ OPT(brw_nir_lower_mem_access_bit_sizes);
do {
progress = false;
OPT(nir_opt_algebraic_before_ffma);
} while (progress);
- nir = brw_nir_optimize(nir, compiler, is_scalar);
+ nir = brw_nir_optimize(nir, compiler, is_scalar, false);
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
OPT(brw_nir_opt_peephole_ffma);
}
+ if (OPT(nir_opt_comparison_pre)) {
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ OPT(nir_opt_cse);
+
+ /* Do the select peepehole again. nir_opt_comparison_pre (combined with
+ * the other optimization passes) will have removed at least one
+ * instruction from one of the branches of the if-statement, so now it
+ * might be under the threshold of conversion to bcsel.
+ *
+ * See brw_nir_optimize for the explanation of is_vec4_tessellation.
+ */
+ const bool is_vec4_tessellation = !is_scalar &&
+ (nir->info.stage == MESA_SHADER_TESS_CTRL ||
+ nir->info.stage == MESA_SHADER_TESS_EVAL);
+ OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
+ OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
+ compiler->devinfo->gen >= 6);
+ }
+
OPT(nir_opt_algebraic_late);
- OPT(nir_lower_to_source_mods);
+ OPT(nir_lower_to_source_mods, nir_lower_all_source_mods);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_move_comparisons);
+ OPT(nir_lower_bool_to_int32);
+
OPT(nir_lower_locals_to_regs);
if (unlikely(debug_enabled)) {
OPT(nir_lower_vec_to_movs);
}
+ OPT(nir_opt_dce);
+
/* This is the last pass we run before we start emitting stuff. It
* determines when we need to insert boolean resolves on Gen <= 5. We
* run it last because it stashes data in instr->pass_flags and we don't
bool is_scalar)
{
const struct gen_device_info *devinfo = compiler->devinfo;
- nir_lower_tex_options tex_options = { 0 };
+ nir_lower_tex_options tex_options = {
+ .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
+ };
/* Iron Lake and prior require lowering of all rectangle textures */
if (devinfo->gen < 6)
tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
+ tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
+ tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
+
+ /* Setup array of scaling factors for each texture. */
+ memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
+ sizeof(tex_options.scale_factors));
if (nir_lower_tex(nir, &tex_options)) {
- nir_validate_shader(nir);
- nir = brw_nir_optimize(nir, compiler, is_scalar);
+ nir_validate_shader(nir, "after nir_lower_tex");
+ nir = brw_nir_optimize(nir, compiler, is_scalar, false);
}
return nir;
case nir_type_float:
case nir_type_float32:
return BRW_REGISTER_TYPE_F;
+ case nir_type_float16:
+ return BRW_REGISTER_TYPE_HF;
case nir_type_float64:
return BRW_REGISTER_TYPE_DF;
case nir_type_int64:
return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
case nir_type_uint64:
return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
+ case nir_type_int16:
+ return BRW_REGISTER_TYPE_W;
+ case nir_type_uint16:
+ return BRW_REGISTER_TYPE_UW;
+ case nir_type_int8:
+ return BRW_REGISTER_TYPE_B;
+ case nir_type_uint8:
+ return BRW_REGISTER_TYPE_UB;
default:
unreachable("unknown type");
}
case nir_type_float32:
return GLSL_TYPE_FLOAT;
+ case nir_type_float16:
+ return GLSL_TYPE_FLOAT16;
+
case nir_type_float64:
return GLSL_TYPE_DOUBLE;
case nir_type_uint32:
return GLSL_TYPE_UINT;
+ case nir_type_int16:
+ return GLSL_TYPE_INT16;
+
+ case nir_type_uint16:
+ return GLSL_TYPE_UINT16;
+
default:
unreachable("bad type");
}
}
+
+nir_shader *
+brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
+ const nir_shader_compiler_options *options,
+ const struct brw_tcs_prog_key *key)
+{
+ nir_builder b;
+ nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_TESS_CTRL,
+ options);
+ nir_shader *nir = b.shader;
+ nir_variable *var;
+ nir_intrinsic_instr *load;
+ nir_intrinsic_instr *store;
+ nir_ssa_def *zero = nir_imm_int(&b, 0);
+ nir_ssa_def *invoc_id = nir_load_invocation_id(&b);
+
+ nir->info.inputs_read = key->outputs_written &
+ ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
+ nir->info.outputs_written = key->outputs_written;
+ nir->info.tess.tcs_vertices_out = key->input_vertices;
+ nir->info.name = ralloc_strdup(nir, "passthrough");
+ nir->num_uniforms = 8 * sizeof(uint32_t);
+
+ var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
+ var->data.location = 0;
+ var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
+ var->data.location = 1;
+
+ /* Write the patch URB header. */
+ for (int i = 0; i <= 1; i++) {
+ load = nir_intrinsic_instr_create(nir, nir_intrinsic_load_uniform);
+ load->num_components = 4;
+ load->src[0] = nir_src_for_ssa(zero);
+ nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
+ nir_intrinsic_set_base(load, i * 4 * sizeof(uint32_t));
+ nir_builder_instr_insert(&b, &load->instr);
+
+ store = nir_intrinsic_instr_create(nir, nir_intrinsic_store_output);
+ store->num_components = 4;
+ store->src[0] = nir_src_for_ssa(&load->dest.ssa);
+ store->src[1] = nir_src_for_ssa(zero);
+ nir_intrinsic_set_base(store, VARYING_SLOT_TESS_LEVEL_INNER - i);
+ nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
+ nir_builder_instr_insert(&b, &store->instr);
+ }
+
+ /* Copy inputs to outputs. */
+ uint64_t varyings = nir->info.inputs_read;
+
+ while (varyings != 0) {
+ const int varying = ffsll(varyings) - 1;
+
+ load = nir_intrinsic_instr_create(nir,
+ nir_intrinsic_load_per_vertex_input);
+ load->num_components = 4;
+ load->src[0] = nir_src_for_ssa(invoc_id);
+ load->src[1] = nir_src_for_ssa(zero);
+ nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
+ nir_intrinsic_set_base(load, varying);
+ nir_builder_instr_insert(&b, &load->instr);
+
+ store = nir_intrinsic_instr_create(nir,
+ nir_intrinsic_store_per_vertex_output);
+ store->num_components = 4;
+ store->src[0] = nir_src_for_ssa(&load->dest.ssa);
+ store->src[1] = nir_src_for_ssa(invoc_id);
+ store->src[2] = nir_src_for_ssa(zero);
+ nir_intrinsic_set_base(store, varying);
+ nir_intrinsic_set_write_mask(store, WRITEMASK_XYZW);
+ nir_builder_instr_insert(&b, &store->instr);
+
+ varyings &= ~BITFIELD64_BIT(varying);
+ }
+
+ nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
+
+ nir = brw_preprocess_nir(compiler, nir, NULL);
+
+ return nir;
+}