turnip: rework format helpers
[mesa.git] / src / freedreno / vulkan / tu_shader.c
index 8f09fbb03b914a8513dc2b60f9a65b5c37fee44c..4a396ced9e7283afd47cb955cdef26054f506d59 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "spirv/nir_spirv.h"
 #include "util/mesa-sha1.h"
+#include "nir/nir_xfb_info.h"
 
 #include "ir3/ir3_nir.h"
 
@@ -40,7 +41,9 @@ tu_spirv_to_nir(struct ir3_compiler *compiler,
    const struct spirv_to_nir_options spirv_options = {
       .frag_coord_is_sysval = true,
       .lower_ubo_ssbo_access_to_offsets = true,
-      .caps = { false },
+      .caps = {
+         .transform_feedback = compiler->gpu_id >= 600,
+      },
    };
    const nir_shader_compiler_options *nir_options =
       ir3_get_compiler_options(compiler);
@@ -80,54 +83,36 @@ tu_spirv_to_nir(struct ir3_compiler *compiler,
    return nir;
 }
 
-static void
-tu_sort_variables_by_location(struct exec_list *variables)
+static unsigned
+map_add(struct tu_descriptor_map *map, int set, int binding, int value,
+        int array_size)
 {
-   struct exec_list sorted;
-   exec_list_make_empty(&sorted);
-
-   nir_foreach_variable_safe(var, variables)
-   {
-      exec_node_remove(&var->node);
-
-      /* insert the variable into the sorted list */
-      nir_variable *next = NULL;
-      nir_foreach_variable(tmp, &sorted)
-      {
-         if (var->data.location < tmp->data.location) {
-            next = tmp;
-            break;
-         }
+   unsigned index = 0;
+   for (unsigned i = 0; i < map->num; i++) {
+      if (set == map->set[i] && binding == map->binding[i]) {
+         assert(value == map->value[i]);
+         assert(array_size == map->array_size[i]);
+         return index;
       }
-      if (next)
-         exec_node_insert_node_before(&next->node, &var->node);
-      else
-         exec_list_push_tail(&sorted, &var->node);
+      index += map->array_size[i];
    }
 
-   exec_list_move_nodes_to(&sorted, variables);
-}
-
-static unsigned
-map_add(struct tu_descriptor_map *map, int set, int binding)
-{
-   unsigned index;
-   for (index = 0; index < map->num; index++) {
-      if (set == map->set[index] && binding == map->binding[index])
-         break;
-   }
+   assert(index == map->num_desc);
 
-   assert(index < ARRAY_SIZE(map->set));
+   map->set[map->num] = set;
+   map->binding[map->num] = binding;
+   map->value[map->num] = value;
+   map->array_size[map->num] = array_size;
+   map->num++;
+   map->num_desc += array_size;
 
-   map->set[index] = set;
-   map->binding[index] = binding;
-   map->num = MAX2(map->num, index + 1);
    return index;
 }
 
 static void
 lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
-                        struct tu_shader *shader)
+                        struct tu_shader *shader,
+                        const struct tu_pipeline_layout *layout)
 {
    nir_ssa_def *index = NULL;
    unsigned base_index = 0;
@@ -177,43 +162,43 @@ lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
       src->src_type = is_sampler ?
          nir_tex_src_sampler_offset :
          nir_tex_src_texture_offset;
-
-      instr->texture_array_size = array_elements;
    } else {
       nir_tex_instr_remove_src(instr, src_idx);
    }
 
-   if (array_elements > 1)
-      tu_finishme("texture/sampler array");
-
-   if (is_sampler) {
-      instr->sampler_index = map_add(&shader->sampler_map,
-                                     deref->var->data.descriptor_set,
-                                     deref->var->data.binding);
-      instr->sampler_index += base_index;
-   } else {
-      instr->texture_index = map_add(&shader->texture_map,
-                                     deref->var->data.descriptor_set,
-                                     deref->var->data.binding);
-      instr->texture_index += base_index;
-      instr->texture_array_size = array_elements;
-   }
+   uint32_t set = deref->var->data.descriptor_set;
+   uint32_t binding = deref->var->data.binding;
+   struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
+   struct tu_descriptor_set_binding_layout *binding_layout =
+      &set_layout->binding[binding];
+
+   int desc_index = map_add(is_sampler ?
+                            &shader->sampler_map : &shader->texture_map,
+                            deref->var->data.descriptor_set,
+                            deref->var->data.binding,
+                            deref->var->data.index,
+                            binding_layout->array_size) + base_index;
+   if (is_sampler)
+      instr->sampler_index = desc_index;
+   else
+      instr->texture_index = desc_index;
 }
 
 static bool
-lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader)
+lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader,
+                const struct tu_pipeline_layout *layout)
 {
    int texture_idx =
       nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
 
    if (texture_idx >= 0)
-      lower_tex_src_to_offset(b, instr, texture_idx, shader);
+      lower_tex_src_to_offset(b, instr, texture_idx, shader, layout);
 
    int sampler_idx =
       nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
 
    if (sampler_idx >= 0)
-      lower_tex_src_to_offset(b, instr, sampler_idx, shader);
+      lower_tex_src_to_offset(b, instr, sampler_idx, shader, layout);
 
    if (texture_idx < 0 && sampler_idx < 0)
       return false;
@@ -221,59 +206,146 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader)
    return true;
 }
 
-static bool
-lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
-                struct tu_shader *shader)
+static void
+lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
+                         struct tu_shader *shader)
 {
-   if (instr->intrinsic == nir_intrinsic_load_push_constant) {
-      /* note: ir3 wants load_ubo, not load_uniform */
-      assert(nir_intrinsic_base(instr) == 0);
-
-      nir_intrinsic_instr *load =
-         nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
-      load->num_components = instr->num_components;
-      load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
-      load->src[1] = instr->src[0];
-      nir_ssa_dest_init(&load->instr, &load->dest,
-                        load->num_components, instr->dest.ssa.bit_size,
-                        instr->dest.ssa.name);
-      nir_builder_instr_insert(b, &load->instr);
-      nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+   /* note: ir3 wants load_ubo, not load_uniform */
+   assert(nir_intrinsic_base(instr) == 0);
 
-      nir_instr_remove(&instr->instr);
+   nir_intrinsic_instr *load =
+      nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
 
-      return true;
-   }
+   nir_intrinsic_set_align(load, 4, 0);
 
-   if (instr->intrinsic != nir_intrinsic_vulkan_resource_index)
-      return false;
+   load->num_components = instr->num_components;
+   load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
+   load->src[1] = instr->src[0];
+   nir_ssa_dest_init(&load->instr, &load->dest,
+                     load->num_components, instr->dest.ssa.bit_size,
+                     instr->dest.ssa.name);
+   nir_builder_instr_insert(b, &load->instr);
+   nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+
+   nir_instr_remove(&instr->instr);
+}
 
+static void
+lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
+                            struct tu_shader *shader,
+                            const struct tu_pipeline_layout *layout)
+{
    nir_const_value *const_val = nir_src_as_const_value(instr->src[0]);
-   if (!const_val || const_val->u32 != 0) {
-      tu_finishme("non-zero vulkan_resource_index array index");
-      return false;
-   }
 
-   if (nir_intrinsic_desc_type(instr) != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
-      tu_finishme("non-ubo vulkan_resource_index");
-      return false;
+   unsigned set = nir_intrinsic_desc_set(instr);
+   unsigned binding = nir_intrinsic_binding(instr);
+   struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
+   struct tu_descriptor_set_binding_layout *binding_layout =
+      &set_layout->binding[binding];
+   unsigned index = 0;
+
+   switch (nir_intrinsic_desc_type(instr)) {
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      if (!const_val)
+         tu_finishme("non-constant vulkan_resource_index array index");
+      /* skip index 0 which is used for push constants */
+      index = map_add(&shader->ubo_map, set, binding, 0,
+                      binding_layout->array_size) + 1;
+      index += const_val->u32;
+      break;
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+      if (!const_val)
+         tu_finishme("non-constant vulkan_resource_index array index");
+      index = map_add(&shader->ssbo_map, set, binding, 0,
+                      binding_layout->array_size);
+      index += const_val->u32;
+      break;
+   default:
+      tu_finishme("unsupported desc_type for vulkan_resource_index");
+      break;
    }
 
-   unsigned index = map_add(&shader->ubo_map,
-                            nir_intrinsic_desc_set(instr),
-                            nir_intrinsic_binding(instr));
-
-   b->cursor = nir_before_instr(&instr->instr);
-   /* skip index 0 because ir3 treats it differently */
    nir_ssa_def_rewrite_uses(&instr->dest.ssa,
-                            nir_src_for_ssa(nir_imm_int(b, index + 1)));
+                            nir_src_for_ssa(nir_imm_int(b, index)));
    nir_instr_remove(&instr->instr);
+}
 
-   return true;
+static void
+lower_image_deref(nir_builder *b,
+                  nir_intrinsic_instr *instr, struct tu_shader *shader,
+                  const struct tu_pipeline_layout *layout)
+{
+   nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
+   nir_variable *var = nir_deref_instr_get_variable(deref);
+
+   uint32_t set = var->data.descriptor_set;
+   uint32_t binding = var->data.binding;
+   struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
+   struct tu_descriptor_set_binding_layout *binding_layout =
+      &set_layout->binding[binding];
+
+   nir_ssa_def *index = nir_imm_int(b,
+                                    map_add(&shader->image_map,
+                                            set, binding, var->data.index,
+                                            binding_layout->array_size));
+   if (deref->deref_type != nir_deref_type_var) {
+      assert(deref->deref_type == nir_deref_type_array);
+      index = nir_iadd(b, index, nir_ssa_for_src(b, deref->arr.index, 1));
+   }
+   nir_rewrite_image_intrinsic(instr, index, false);
+}
+
+static bool
+lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
+                struct tu_shader *shader,
+                const struct tu_pipeline_layout *layout)
+{
+   switch (instr->intrinsic) {
+   case nir_intrinsic_load_layer_id:
+      /* TODO: remove this when layered rendering is implemented */
+      nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+                               nir_src_for_ssa(nir_imm_int(b, 0)));
+      nir_instr_remove(&instr->instr);
+      return true;
+
+   case nir_intrinsic_load_push_constant:
+      lower_load_push_constant(b, instr, shader);
+      return true;
+
+   case nir_intrinsic_vulkan_resource_index:
+      lower_vulkan_resource_index(b, instr, shader, layout);
+      return true;
+
+   case nir_intrinsic_image_deref_load:
+   case nir_intrinsic_image_deref_store:
+   case nir_intrinsic_image_deref_atomic_add:
+   case nir_intrinsic_image_deref_atomic_imin:
+   case nir_intrinsic_image_deref_atomic_umin:
+   case nir_intrinsic_image_deref_atomic_imax:
+   case nir_intrinsic_image_deref_atomic_umax:
+   case nir_intrinsic_image_deref_atomic_and:
+   case nir_intrinsic_image_deref_atomic_or:
+   case nir_intrinsic_image_deref_atomic_xor:
+   case nir_intrinsic_image_deref_atomic_exchange:
+   case nir_intrinsic_image_deref_atomic_comp_swap:
+   case nir_intrinsic_image_deref_size:
+   case nir_intrinsic_image_deref_samples:
+   case nir_intrinsic_image_deref_load_param_intel:
+   case nir_intrinsic_image_deref_load_raw_intel:
+   case nir_intrinsic_image_deref_store_raw_intel:
+      lower_image_deref(b, instr, shader, layout);
+      return true;
+
+   default:
+      return false;
+   }
 }
 
 static bool
-lower_impl(nir_function_impl *impl, struct tu_shader *shader)
+lower_impl(nir_function_impl *impl, struct tu_shader *shader,
+            const struct tu_pipeline_layout *layout)
 {
    nir_builder b;
    nir_builder_init(&b, impl);
@@ -284,10 +356,10 @@ lower_impl(nir_function_impl *impl, struct tu_shader *shader)
          b.cursor = nir_before_instr(instr);
          switch (instr->type) {
          case nir_instr_type_tex:
-            progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader);
+            progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader, layout);
             break;
          case nir_instr_type_intrinsic:
-            progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader);
+            progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
             break;
          default:
             break;
@@ -299,22 +371,72 @@ lower_impl(nir_function_impl *impl, struct tu_shader *shader)
 }
 
 static bool
-tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader)
+tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader,
+            const struct tu_pipeline_layout *layout)
 {
    bool progress = false;
 
    nir_foreach_function(function, shader) {
       if (function->impl)
-         progress |= lower_impl(function->impl, tu_shader);
+         progress |= lower_impl(function->impl, tu_shader, layout);
    }
 
+   /* spirv_to_nir produces num_ssbos equal to the number of SSBO-containing
+    * variables, while ir3 wants the number of descriptors (like the gallium
+    * path).
+    */
+   shader->info.num_ssbos = tu_shader->ssbo_map.num_desc;
+
    return progress;
 }
 
+static void
+tu_gather_xfb_info(nir_shader *nir, struct tu_shader *shader)
+{
+   struct ir3_stream_output_info *info = &shader->ir3_shader.stream_output;
+   nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
+
+   if (!xfb)
+      return;
+
+   /* creating a map from VARYING_SLOT_* enums to consecutive index */
+   uint8_t num_outputs = 0;
+   uint64_t outputs_written = 0;
+   for (int i = 0; i < xfb->output_count; i++)
+      outputs_written |= BITFIELD64_BIT(xfb->outputs[i].location);
+
+   uint8_t output_map[VARYING_SLOT_TESS_MAX];
+   memset(output_map, 0, sizeof(output_map));
+
+   for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
+      if (outputs_written & BITFIELD64_BIT(attr))
+         output_map[attr] = num_outputs++;
+   }
+
+   assert(xfb->output_count < IR3_MAX_SO_OUTPUTS);
+   info->num_outputs = xfb->output_count;
+
+   for (int i = 0; i < IR3_MAX_SO_BUFFERS; i++)
+      info->stride[i] = xfb->buffers[i].stride / 4;
+
+   for (int i = 0; i < xfb->output_count; i++) {
+      info->output[i].register_index = output_map[xfb->outputs[i].location];
+      info->output[i].start_component = xfb->outputs[i].component_offset;
+      info->output[i].num_components =
+                           util_bitcount(xfb->outputs[i].component_mask);
+      info->output[i].output_buffer  = xfb->outputs[i].buffer;
+      info->output[i].dst_offset = xfb->outputs[i].offset / 4;
+      info->output[i].stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
+   }
+
+   ralloc_free(xfb);
+}
+
 struct tu_shader *
 tu_shader_create(struct tu_device *dev,
                  gl_shader_stage stage,
                  const VkPipelineShaderStageCreateInfo *stage_info,
+                 struct tu_pipeline_layout *layout,
                  const VkAllocationCallbacks *alloc)
 {
    const struct tu_shader_module *module =
@@ -345,7 +467,7 @@ tu_shader_create(struct tu_device *dev,
    }
 
    /* multi step inlining procedure */
-   NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
+   NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
    NIR_PASS_V(nir, nir_lower_returns);
    NIR_PASS_V(nir, nir_inline_functions);
    NIR_PASS_V(nir, nir_opt_deref);
@@ -354,7 +476,7 @@ tu_shader_create(struct tu_device *dev,
          exec_node_remove(&func->node);
    }
    assert(exec_list_length(&nir->functions) == 1);
-   NIR_PASS_V(nir, nir_lower_constant_initializers, ~nir_var_function_temp);
+   NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
 
    /* Split member structs.  We do this before lower_io_to_temporaries so that
     * it doesn't lower system values to temporaries by accident.
@@ -365,6 +487,16 @@ tu_shader_create(struct tu_device *dev,
    NIR_PASS_V(nir, nir_remove_dead_variables,
               nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
 
+   /* Gather information for transform feedback.
+    * This should be called after nir_split_per_member_structs.
+    * Also needs to be called after nir_remove_dead_variables with varyings,
+    * so that we could align stream outputs correctly.
+    */
+   if (nir->info.stage == MESA_SHADER_VERTEX ||
+         nir->info.stage == MESA_SHADER_TESS_EVAL ||
+         nir->info.stage == MESA_SHADER_GEOMETRY)
+      tu_gather_xfb_info(nir, shader);
+
    NIR_PASS_V(nir, nir_propagate_invariant);
 
    NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
@@ -379,25 +511,7 @@ tu_shader_create(struct tu_device *dev,
    /* ir3 doesn't support indirect input/output */
    NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
 
-   switch (stage) {
-   case MESA_SHADER_VERTEX:
-      tu_sort_variables_by_location(&nir->outputs);
-      break;
-   case MESA_SHADER_TESS_CTRL:
-   case MESA_SHADER_TESS_EVAL:
-   case MESA_SHADER_GEOMETRY:
-      tu_sort_variables_by_location(&nir->inputs);
-      tu_sort_variables_by_location(&nir->outputs);
-      break;
-   case MESA_SHADER_FRAGMENT:
-      tu_sort_variables_by_location(&nir->inputs);
-      break;
-   case MESA_SHADER_COMPUTE:
-      break;
-   default:
-      unreachable("invalid gl_shader_stage");
-      break;
-   }
+   NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
 
    nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
    nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
@@ -405,7 +519,10 @@ tu_shader_create(struct tu_device *dev,
    NIR_PASS_V(nir, nir_lower_system_values);
    NIR_PASS_V(nir, nir_lower_frexp);
 
-   NIR_PASS_V(nir, tu_lower_io, shader);
+   if (stage == MESA_SHADER_FRAGMENT)
+      NIR_PASS_V(nir, nir_lower_input_attachments, true);
+
+   NIR_PASS_V(nir, tu_lower_io, shader, layout);
 
    NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
 
@@ -419,8 +536,6 @@ tu_shader_create(struct tu_device *dev,
       NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
    }
 
-   NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
-
    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
 
    /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
@@ -461,9 +576,21 @@ tu_shader_compile_options_init(
    struct tu_shader_compile_options *options,
    const VkGraphicsPipelineCreateInfo *pipeline_info)
 {
-   *options = (struct tu_shader_compile_options) {
-      /* TODO ir3_key */
+   bool has_gs = false;
+   if (pipeline_info) {
+      for (uint32_t i = 0; i < pipeline_info->stageCount; i++) {
+         if (pipeline_info->pStages[i].stage == VK_SHADER_STAGE_GEOMETRY_BIT) {
+            has_gs = true;
+            break;
+         }
+      }
+   }
 
+   *options = (struct tu_shader_compile_options) {
+      /* TODO: Populate the remaining fields of ir3_shader_key. */
+      .key = {
+         .has_gs = has_gs,
+      },
       /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
        * some optimizations need to happen otherwise shader might not compile
        */