anv: Get rid of backend compiler hacks for descriptor sets
authorJason Ekstrand <jason.ekstrand@intel.com>
Thu, 15 Oct 2015 00:02:55 +0000 (17:02 -0700)
committerJason Ekstrand <jason.ekstrand@intel.com>
Thu, 15 Oct 2015 01:38:33 +0000 (18:38 -0700)
Now that we have anv_nir_apply_pipeline_layout, we can hand the backend
compiler intrinsics and texture instructions that use a flat buffer index
just like it wants.  There's no longer any reason for any of these hacks.

src/mesa/drivers/dri/i965/brw_context.h
src/mesa/drivers/dri/i965/brw_fs_nir.cpp
src/mesa/drivers/dri/i965/brw_vec4_nir.cpp
src/vulkan/anv_compiler.cpp

index a66c61e58f351e00a649cada2ca32ab30c7a381b..aa1284db3ce5bab9772c5f683c26ba97032faf85 100644 (file)
@@ -358,12 +358,6 @@ struct brw_stage_prog_data {
       /** @} */
    } binding_table;
 
-   uint32_t *map_entries;
-   struct {
-      uint32_t index_count;
-      uint32_t *index;
-   } bind_map[8]; /* MAX_SETS from vulkan/private.h */
-
    GLuint nr_params;       /**< number of float params/constants */
    GLuint nr_pull_params;
    unsigned nr_image_params;
index 883b8cbf3e725b71ca7377786fe52f9e7004bbcb..45c3f4ef3b452dd0352ec712cd2b41f7cd6ea781 100644 (file)
@@ -1419,22 +1419,13 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       has_indirect = true;
       /* fallthrough */
    case nir_intrinsic_load_ubo: {
-      uint32_t set = instr->const_index[0];
       nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
       fs_reg surf_index;
 
       if (const_index) {
-         uint32_t binding = const_index->u[0];
-
-         /* FIXME: We should probably assert here, but dota2 seems to hit
-          * it and we'd like to keep going.
-          */
-         if (binding >= stage_prog_data->bind_map[set].index_count)
-            binding = 0;
-
-         surf_index = fs_reg(stage_prog_data->bind_map[set].index[binding]);
+         surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
+                             const_index->u[0]);
       } else {
-         assert(0 && "need more info from the ir for this.");
          /* The block index is not a constant. Evaluate the index expression
           * per-channel and add the base UBO index; we have to select a value
           * from any live channel.
@@ -1459,7 +1450,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
                                      BRW_REGISTER_TYPE_D),
                  fs_reg(2));
 
-         unsigned vec4_offset = instr->const_index[1] / 4;
+         unsigned vec4_offset = instr->const_index[0] / 4;
          for (int i = 0; i < instr->num_components; i++)
             VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
                                        base_offset, vec4_offset + i);
@@ -1467,7 +1458,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
          fs_reg packed_consts = vgrf(glsl_type::float_type);
          packed_consts.type = dest.type;
 
-         fs_reg const_offset_reg((unsigned) instr->const_index[1] & ~15);
+         fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
          bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
                   surf_index, const_offset_reg);
 
@@ -1921,13 +1912,7 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
 void
 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
 {
-   uint32_t set = instr->sampler_set;
-   uint32_t binding = instr->sampler_index;
-
-   assert(binding < stage_prog_data->bind_map[set].index_count);
-   assert(stage_prog_data->bind_map[set].index[binding] < 1000);
-
-   unsigned sampler = stage_prog_data->bind_map[set].index[binding];
+   unsigned sampler = instr->sampler_index;
    fs_reg sampler_reg(sampler);
 
    int gather_component = instr->component;
index 37a74df6d7174c0a159f06b74f972bf64a5b7cd2..41bd80df37701e073ea2c1e253345cfe90438c60 100644 (file)
@@ -738,19 +738,17 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
       has_indirect = true;
       /* fallthrough */
    case nir_intrinsic_load_ubo: {
-      const uint32_t set = instr->const_index[0];
       nir_const_value *const_block_index = nir_src_as_const_value(instr->src[0]);
       src_reg surf_index;
 
       dest = get_nir_dest(instr->dest);
 
       if (const_block_index) {
-         uint32_t binding = const_block_index->u[0];
-
          /* The block index is a constant, so just emit the binding table entry
           * as an immediate.
           */
-         surf_index = src_reg(stage_prog_data->bind_map[set].index[binding]);
+         surf_index = src_reg(prog_data->base.binding_table.ubo_start +
+                              const_block_index->u[0]);
       } else {
          /* The block index is not a constant. Evaluate the index expression
           * per-channel and add the base UBO index; we have to select a value
@@ -770,7 +768,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
                                nir->info.num_ssbos - 1);
       }
 
-      unsigned const_offset = instr->const_index[1];
+      unsigned const_offset = instr->const_index[0];
       src_reg offset;
 
       if (!has_indirect)  {
index 34023a7369f45fad4e5685206892b086fdc759a5..a3b8d1cc80cd9b2f7bc67e6932e13f9e899667ed 100644 (file)
@@ -72,41 +72,16 @@ static VkResult
 set_binding_table_layout(struct brw_stage_prog_data *prog_data,
                          struct anv_pipeline *pipeline, uint32_t stage)
 {
-   uint32_t bias, count, k, *map;
-   struct anv_pipeline_layout *layout = pipeline->layout;
-
-   /* No layout is valid for shaders that don't bind any resources. */
-   if (pipeline->layout == NULL)
-      return VK_SUCCESS;
-
+   unsigned bias;
    if (stage == VK_SHADER_STAGE_FRAGMENT)
       bias = MAX_RTS;
    else
       bias = 0;
 
-   count = layout->stage[stage].surface_count;
-   prog_data->map_entries =
-      (uint32_t *) malloc(count * sizeof(prog_data->map_entries[0]));
-   if (prog_data->map_entries == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
-   k = bias;
-   map = prog_data->map_entries;
-   for (uint32_t set = 0; set < layout->num_sets; set++) {
-      prog_data->bind_map[set].index = map;
-      unsigned index_count = 0;
-      for (uint32_t b = 0; b < layout->set[set].layout->binding_count; b++) {
-         if (layout->set[set].layout->binding[b].stage[stage].surface_index < 0)
-            continue;
-
-         unsigned array_size = layout->set[set].layout->binding[b].array_size;
-         for (uint32_t i = 0; i < array_size; i++)
-            *map++ = k++;
-         index_count += array_size;
-      }
-
-      prog_data->bind_map[set].index_count = index_count;
-   }
+   prog_data->binding_table.size_bytes = 0;
+   prog_data->binding_table.texture_start = bias;
+   prog_data->binding_table.ubo_start = bias;
+   prog_data->binding_table.image_start = bias;
 
    return VK_SUCCESS;
 }
@@ -1400,7 +1375,6 @@ anv_compiler_free(struct anv_pipeline *pipeline)
 {
    for (uint32_t stage = 0; stage < VK_SHADER_STAGE_NUM; stage++) {
       if (pipeline->prog_data[stage]) {
-         free(pipeline->prog_data[stage]->map_entries);
          /* We only ever set up the params array because we don't do
           * non-UBO pull constants
           */