i965: Avoid recalculating the tessellation VUE map for IO lowering.
authorKenneth Graunke <kenneth@whitecape.org>
Thu, 25 Feb 2016 06:34:51 +0000 (22:34 -0800)
committerKenneth Graunke <kenneth@whitecape.org>
Fri, 26 Feb 2016 23:55:59 +0000 (15:55 -0800)
The caller already computes it.  Now that we have stage specific
functions, it's really easy to pass this in.

Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
src/mesa/drivers/dri/i965/brw_nir.c
src/mesa/drivers/dri/i965/brw_nir.h
src/mesa/drivers/dri/i965/brw_shader.cpp
src/mesa/drivers/dri/i965/brw_vec4_tcs.cpp

index 2bd6c4ed57c6c1b86aaa986772d95fe24b18674a..90c4f6687673e214d5310be1cb9050bc8831a2fe 100644 (file)
@@ -149,7 +149,7 @@ remap_inputs_with_vue_map(nir_block *block, void *closure)
 
 struct remap_patch_urb_offsets_state {
    nir_builder b;
-   struct brw_vue_map vue_map;
+   const struct brw_vue_map *vue_map;
 };
 
 static bool
@@ -167,7 +167,7 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
 
       if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
           (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
-         int vue_slot = state->vue_map.varying_to_slot[intrin->const_index[0]];
+         int vue_slot = state->vue_map->varying_to_slot[intrin->const_index[0]];
          assert(vue_slot != -1);
          intrin->const_index[0] = vue_slot;
 
@@ -176,7 +176,7 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
             nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
             if (const_vertex) {
                intrin->const_index[0] += const_vertex->u[0] *
-                                         state->vue_map.num_per_vertex_slots;
+                                         state->vue_map->num_per_vertex_slots;
             } else {
                state->b.cursor = nir_before_instr(&intrin->instr);
 
@@ -185,7 +185,7 @@ remap_patch_urb_offsets(nir_block *block, void *closure)
                   nir_imul(&state->b,
                            nir_ssa_for_src(&state->b, *vertex, 1),
                            nir_imm_int(&state->b,
-                                       state->vue_map.num_per_vertex_slots));
+                                       state->vue_map->num_per_vertex_slots));
 
                /* Add it to the existing offset */
                nir_src *offset = nir_get_io_offset_src(intrin);
@@ -298,12 +298,10 @@ brw_nir_lower_vue_inputs(nir_shader *nir,
 }
 
 void
-brw_nir_lower_tes_inputs(nir_shader *nir)
+brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
 {
    struct remap_patch_urb_offsets_state state;
-   brw_compute_tess_vue_map(&state.vue_map,
-                            nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
-                            nir->info.patch_inputs_read);
+   state.vue_map = vue_map;
 
    foreach_list_typed(nir_variable, var, node, &nir->inputs) {
       var->data.driver_location = var->data.location;
@@ -347,11 +345,10 @@ brw_nir_lower_vue_outputs(nir_shader *nir,
 }
 
 void
-brw_nir_lower_tcs_outputs(nir_shader *nir)
+brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
 {
    struct remap_patch_urb_offsets_state state;
-   brw_compute_tess_vue_map(&state.vue_map, nir->info.outputs_written,
-                            nir->info.patch_outputs_written);
+   state.vue_map = vue_map;
 
    nir_foreach_variable(var, &nir->outputs) {
       var->data.driver_location = var->data.location;
index 0140f3a80bec2cd3e4f32392f40587bb5722f85b..0fbdc5fa625636d23d206cc4866e8ecedcc0d7d3 100644 (file)
@@ -91,10 +91,10 @@ void brw_nir_lower_vs_inputs(nir_shader *nir,
 void brw_nir_lower_vue_inputs(nir_shader *nir,
                               const struct brw_device_info *devinfo,
                               bool is_scalar);
-void brw_nir_lower_tes_inputs(nir_shader *nir);
+void brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue);
 void brw_nir_lower_fs_inputs(nir_shader *nir);
 void brw_nir_lower_vue_outputs(nir_shader *nir, bool is_scalar);
-void brw_nir_lower_tcs_outputs(nir_shader *nir);
+void brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue);
 void brw_nir_lower_fs_outputs(nir_shader *nir);
 
 nir_shader *brw_postprocess_nir(nir_shader *nir,
index 857a079c67b01233dac907be3726bca1b42dc1f0..dfe6afcf6d06f9c0cd39eda104632ed8576dad5e 100644 (file)
@@ -1227,10 +1227,16 @@ brw_compile_tes(const struct brw_compiler *compiler,
    const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_EVAL];
 
    nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
-   nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
    nir->info.inputs_read = key->inputs_read;
    nir->info.patch_inputs_read = key->patch_inputs_read;
-   brw_nir_lower_tes_inputs(nir);
+
+   struct brw_vue_map input_vue_map;
+   brw_compute_tess_vue_map(&input_vue_map,
+                            nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
+                            nir->info.patch_inputs_read);
+
+   nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
+   brw_nir_lower_tes_inputs(nir, &input_vue_map);
    brw_nir_lower_vue_outputs(nir, is_scalar);
    nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
 
@@ -1250,11 +1256,6 @@ brw_compile_tes(const struct brw_compiler *compiler,
    /* URB entry sizes are stored as a multiple of 64 bytes. */
    prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
 
-   struct brw_vue_map input_vue_map;
-   brw_compute_tess_vue_map(&input_vue_map,
-                            nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
-                            nir->info.patch_inputs_read);
-
    bool need_patch_header = nir->info.system_values_read &
       (BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_OUTER) |
        BITFIELD64_BIT(SYSTEM_VALUE_TESS_LEVEL_INNER));
index b6a759b73d142d9aa2a2299a71944113c4f5b433..53e7aef37f2ac766a0fce0da3174bffe354dd97b 100644 (file)
@@ -513,19 +513,20 @@ brw_compile_tcs(const struct brw_compiler *compiler,
    const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];
 
    nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
-   nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
    nir->info.outputs_written = key->outputs_written;
    nir->info.patch_outputs_written = key->patch_outputs_written;
-   brw_nir_lower_vue_inputs(nir, compiler->devinfo, is_scalar);
-   brw_nir_lower_tcs_outputs(nir);
-   nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
-
-   prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);
 
    brw_compute_tess_vue_map(&vue_prog_data->vue_map,
                             nir->info.outputs_written,
                             nir->info.patch_outputs_written);
 
+   nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
+   brw_nir_lower_vue_inputs(nir, compiler->devinfo, is_scalar);
+   brw_nir_lower_tcs_outputs(nir, &vue_prog_data->vue_map);
+   nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);
+
+   prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);
+
    /* Compute URB entry size.  The maximum allowed URB entry size is 32k.
     * That divides up as follows:
     *