struct remap_patch_urb_offsets_state {
nir_builder b;
- struct brw_vue_map vue_map;
+ const struct brw_vue_map *vue_map;
};
static bool
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
- int vue_slot = state->vue_map.varying_to_slot[intrin->const_index[0]];
+ int vue_slot = state->vue_map->varying_to_slot[intrin->const_index[0]];
assert(vue_slot != -1);
intrin->const_index[0] = vue_slot;
nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
if (const_vertex) {
intrin->const_index[0] += const_vertex->u[0] *
- state->vue_map.num_per_vertex_slots;
+ state->vue_map->num_per_vertex_slots;
} else {
state->b.cursor = nir_before_instr(&intrin->instr);
nir_imul(&state->b,
nir_ssa_for_src(&state->b, *vertex, 1),
nir_imm_int(&state->b,
- state->vue_map.num_per_vertex_slots));
+ state->vue_map->num_per_vertex_slots));
/* Add it to the existing offset */
nir_src *offset = nir_get_io_offset_src(intrin);
}
void
-brw_nir_lower_tes_inputs(nir_shader *nir)
+brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
struct remap_patch_urb_offsets_state state;
- brw_compute_tess_vue_map(&state.vue_map,
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
- nir->info.patch_inputs_read);
+ state.vue_map = vue_map;
foreach_list_typed(nir_variable, var, node, &nir->inputs) {
var->data.driver_location = var->data.location;
}
void
-brw_nir_lower_tcs_outputs(nir_shader *nir)
+brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
{
struct remap_patch_urb_offsets_state state;
- brw_compute_tess_vue_map(&state.vue_map, nir->info.outputs_written,
- nir->info.patch_outputs_written);
+ state.vue_map = vue_map;
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location = var->data.location;
void brw_nir_lower_vue_inputs(nir_shader *nir,
const struct brw_device_info *devinfo,
bool is_scalar);
-void brw_nir_lower_tes_inputs(nir_shader *nir);
+void brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue);
void brw_nir_lower_fs_inputs(nir_shader *nir);
void brw_nir_lower_vue_outputs(nir_shader *nir, bool is_scalar);
-void brw_nir_lower_tcs_outputs(nir_shader *nir);
+void brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue);
void brw_nir_lower_fs_outputs(nir_shader *nir);
nir_shader *brw_postprocess_nir(nir_shader *nir,
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_EVAL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
nir->info.inputs_read = key->inputs_read;
nir->info.patch_inputs_read = key->patch_inputs_read;
- brw_nir_lower_tes_inputs(nir);
+
+ struct brw_vue_map input_vue_map;
+ brw_compute_tess_vue_map(&input_vue_map,
+ nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
+ nir->info.patch_inputs_read);
+
+ nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
+ brw_nir_lower_tes_inputs(nir, &input_vue_map);
brw_nir_lower_vue_outputs(nir, is_scalar);
nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
/* URB entry sizes are stored as a multiple of 64 bytes. */
prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
- struct brw_vue_map input_vue_map;
- brw_compute_tess_vue_map(&input_vue_map,
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
- nir->info.patch_inputs_read);
-
bool need_patch_header = nir->info.system_values_read &
(BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_OUTER) |
BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_INNER));
const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];
nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
- nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
nir->info.outputs_written = key->outputs_written;
nir->info.patch_outputs_written = key->patch_outputs_written;
- brw_nir_lower_vue_inputs(nir, compiler->devinfo, is_scalar);
- brw_nir_lower_tcs_outputs(nir);
- nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
-
- prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);
brw_compute_tess_vue_map(&vue_prog_data->vue_map,
nir->info.outputs_written,
nir->info.patch_outputs_written);
+ nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
+ brw_nir_lower_vue_inputs(nir, compiler->devinfo, is_scalar);
+ brw_nir_lower_tcs_outputs(nir, &vue_prog_data->vue_map);
+ nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
+
+ prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);
+
/* Compute URB entry size. The maximum allowed URB entry size is 32k.
* That divides up as follows:
*