turnip: basic msaa working
[mesa.git] / src / freedreno / vulkan / tu_shader.c
index 62f8f91c7f3320d93cf82a37139a9b7d4e3aade7..7a05a115269f8e2641d6eba864cdc1a6fd54ff56 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "ir3/ir3_nir.h"
 
-static nir_function *
+static nir_shader *
 tu_spirv_to_nir(struct ir3_compiler *compiler,
                 const uint32_t *words,
                 size_t word_count,
@@ -38,7 +38,7 @@ tu_spirv_to_nir(struct ir3_compiler *compiler,
 {
    /* TODO these are made-up */
    const struct spirv_to_nir_options spirv_options = {
-      .lower_workgroup_access_to_offsets = true,
+      .frag_coord_is_sysval = true,
       .lower_ubo_ssbo_access_to_offsets = true,
       .caps = { false },
    };
@@ -68,16 +68,16 @@ tu_spirv_to_nir(struct ir3_compiler *compiler,
       num_spec = spec_info->mapEntryCount;
    }
 
-   nir_function *entry_point =
+   nir_shader *nir =
       spirv_to_nir(words, word_count, spec, num_spec, stage, entry_point_name,
                    &spirv_options, nir_options);
 
    free(spec);
 
-   assert(entry_point->shader->info.stage == stage);
-   nir_validate_shader(entry_point->shader, "after spirv_to_nir");
+   assert(nir->info.stage == stage);
+   nir_validate_shader(nir, "after spirv_to_nir");
 
-   return entry_point;
+   return nir;
 }
 
 static void
@@ -108,6 +108,188 @@ tu_sort_variables_by_location(struct exec_list *variables)
    exec_list_move_nodes_to(&sorted, variables);
 }
 
+static unsigned
+map_add(struct tu_descriptor_map *map, int set, int binding)
+{
+   unsigned index;
+   for (index = 0; index < map->num; index++) {
+      if (set == map->set[index] && binding == map->binding[index])
+         break;
+   }
+
+   assert(index < ARRAY_SIZE(map->set));
+
+   map->set[index] = set;
+   map->binding[index] = binding;
+   map->num = MAX2(map->num, index + 1);
+   return index;
+}
+
+static void
+lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
+                        struct tu_shader *shader)
+{
+   nir_ssa_def *index = NULL;
+   unsigned base_index = 0;
+   unsigned array_elements = 1;
+   nir_tex_src *src = &instr->src[src_idx];
+   bool is_sampler = src->src_type == nir_tex_src_sampler_deref;
+
+   /* We compute first the offsets */
+   nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
+   while (deref->deref_type != nir_deref_type_var) {
+      assert(deref->parent.is_ssa);
+      nir_deref_instr *parent =
+         nir_instr_as_deref(deref->parent.ssa->parent_instr);
+
+      assert(deref->deref_type == nir_deref_type_array);
+
+      if (nir_src_is_const(deref->arr.index) && index == NULL) {
+         /* We're still building a direct index */
+         base_index += nir_src_as_uint(deref->arr.index) * array_elements;
+      } else {
+         if (index == NULL) {
+            /* We used to be direct but not anymore */
+            index = nir_imm_int(b, base_index);
+            base_index = 0;
+         }
+
+         index = nir_iadd(b, index,
+                          nir_imul(b, nir_imm_int(b, array_elements),
+                                   nir_ssa_for_src(b, deref->arr.index, 1)));
+      }
+
+      array_elements *= glsl_get_length(parent->type);
+
+      deref = parent;
+   }
+
+   if (index)
+      index = nir_umin(b, index, nir_imm_int(b, array_elements - 1));
+
+   /* We have the offsets, we apply them, rewriting the source or removing
+    * instr if needed
+    */
+   if (index) {
+      nir_instr_rewrite_src(&instr->instr, &src->src,
+                            nir_src_for_ssa(index));
+
+      src->src_type = is_sampler ?
+         nir_tex_src_sampler_offset :
+         nir_tex_src_texture_offset;
+
+      instr->texture_array_size = array_elements;
+   } else {
+      nir_tex_instr_remove_src(instr, src_idx);
+   }
+
+   if (array_elements > 1)
+      tu_finishme("texture/sampler array");
+
+   if (is_sampler) {
+      instr->sampler_index = map_add(&shader->sampler_map,
+                                     deref->var->data.descriptor_set,
+                                     deref->var->data.binding);
+      instr->sampler_index += base_index;
+   } else {
+      instr->texture_index = map_add(&shader->texture_map,
+                                     deref->var->data.descriptor_set,
+                                     deref->var->data.binding);
+      instr->texture_index += base_index;
+      instr->texture_array_size = array_elements;
+   }
+}
+
+static bool
+lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader)
+{
+   int texture_idx =
+      nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
+
+   if (texture_idx >= 0)
+      lower_tex_src_to_offset(b, instr, texture_idx, shader);
+
+   int sampler_idx =
+      nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
+
+   if (sampler_idx >= 0)
+      lower_tex_src_to_offset(b, instr, sampler_idx, shader);
+
+   if (texture_idx < 0 && sampler_idx < 0)
+      return false;
+
+   return true;
+}
+
+static bool
+lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
+                struct tu_shader *shader)
+{
+   if (instr->intrinsic != nir_intrinsic_vulkan_resource_index)
+      return false;
+
+   nir_const_value *const_val = nir_src_as_const_value(instr->src[0]);
+   if (!const_val || const_val->u32 != 0) {
+      tu_finishme("non-zero vulkan_resource_index array index");
+      return false;
+   }
+
+   if (nir_intrinsic_desc_type(instr) != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
+      tu_finishme("non-ubo vulkan_resource_index");
+      return false;
+   }
+
+   unsigned index = map_add(&shader->ubo_map,
+                            nir_intrinsic_desc_set(instr),
+                            nir_intrinsic_binding(instr));
+
+   b->cursor = nir_before_instr(&instr->instr);
+   /* skip index 0 because ir3 treats it differently */
+   nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+                            nir_src_for_ssa(nir_imm_int(b, index + 1)));
+   nir_instr_remove(&instr->instr);
+
+   return true;
+}
+
+static bool
+lower_impl(nir_function_impl *impl, struct tu_shader *shader)
+{
+   nir_builder b;
+   nir_builder_init(&b, impl);
+   bool progress = false;
+
+   nir_foreach_block(block, impl) {
+      nir_foreach_instr_safe(instr, block) {
+         switch (instr->type) {
+         case nir_instr_type_tex:
+            progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader);
+            break;
+         case nir_instr_type_intrinsic:
+            progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader);
+            break;
+         default:
+            break;
+         }
+      }
+   }
+
+   return progress;
+}
+
+static bool
+tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader)
+{
+   bool progress = false;
+
+   nir_foreach_function(function, shader) {
+      if (function->impl)
+         progress |= lower_impl(function->impl, tu_shader);
+   }
+
+   return progress;
+}
+
 struct tu_shader *
 tu_shader_create(struct tu_device *dev,
                  gl_shader_stage stage,
@@ -128,22 +310,53 @@ tu_shader_create(struct tu_device *dev,
 
    /* translate SPIR-V to NIR */
    assert(module->code_size % 4 == 0);
-   nir_function *entry_point = tu_spirv_to_nir(
+   nir_shader *nir = tu_spirv_to_nir(
       dev->compiler, (const uint32_t *) module->code, module->code_size / 4,
       stage, stage_info->pName, stage_info->pSpecializationInfo);
-   if (!entry_point) {
+   if (!nir) {
       vk_free2(&dev->alloc, alloc, shader);
       return NULL;
    }
 
-   nir_shader *nir = entry_point->shader;
-
    if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_NIR)) {
       fprintf(stderr, "translated nir:\n");
       nir_print_shader(nir, stderr);
    }
 
-   /* TODO what needs to happen? */
+   /* multi step inlining procedure */
+   NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
+   NIR_PASS_V(nir, nir_lower_returns);
+   NIR_PASS_V(nir, nir_inline_functions);
+   NIR_PASS_V(nir, nir_opt_deref);
+   foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
+      if (!func->is_entrypoint)
+         exec_node_remove(&func->node);
+   }
+   assert(exec_list_length(&nir->functions) == 1);
+   NIR_PASS_V(nir, nir_lower_constant_initializers, ~nir_var_function_temp);
+
+   /* Split member structs.  We do this before lower_io_to_temporaries so that
+    * it doesn't lower system values to temporaries by accident.
+    */
+   NIR_PASS_V(nir, nir_split_var_copies);
+   NIR_PASS_V(nir, nir_split_per_member_structs);
+
+   NIR_PASS_V(nir, nir_remove_dead_variables,
+              nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
+
+   NIR_PASS_V(nir, nir_propagate_invariant);
+
+   NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
+
+   NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+   NIR_PASS_V(nir, nir_split_var_copies);
+   NIR_PASS_V(nir, nir_lower_var_copies);
+
+   NIR_PASS_V(nir, nir_opt_copy_prop_vars);
+   NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
+
+   /* ir3 doesn't support indirect input/output */
+   NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
 
    switch (stage) {
    case MESA_SHADER_VERTEX:
@@ -165,18 +378,29 @@ tu_shader_create(struct tu_device *dev,
       break;
    }
 
-   nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
-                            ir3_glsl_type_size);
-   nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
-                            ir3_glsl_type_size);
-   nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
-                            ir3_glsl_type_size);
+   nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
+   nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
 
    NIR_PASS_V(nir, nir_lower_system_values);
    NIR_PASS_V(nir, nir_lower_frexp);
+
+   NIR_PASS_V(nir, tu_lower_io, shader);
+
    NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
 
-   nir_shader_gather_info(nir, entry_point->impl);
+   if (stage == MESA_SHADER_FRAGMENT) {
+      /* NOTE: lower load_barycentric_at_sample first, since it
+       * produces load_barycentric_at_offset:
+       */
+      NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
+      NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
+
+      NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
+   }
+
+   NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+
+   nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
 
    shader->ir3_shader.compiler = dev->compiler;
    shader->ir3_shader.type = stage;
@@ -196,10 +420,10 @@ tu_shader_destroy(struct tu_device *dev,
    for (uint32_t i = 0; i < 1 + shader->has_binning_pass; i++) {
       if (shader->variants[i].ir)
          ir3_destroy(shader->variants[i].ir);
-      if (shader->variants[i].const_state.immediates)
-         free(shader->variants[i].const_state.immediates);
    }
 
+   if (shader->ir3_shader.const_state.immediates)
+          free(shader->ir3_shader.const_state.immediates);
    if (shader->binary)
       free(shader->binary);
    if (shader->binning_binary)
@@ -216,8 +440,10 @@ tu_shader_compile_options_init(
    *options = (struct tu_shader_compile_options) {
       /* TODO ir3_key */
 
-      .optimize = !(pipeline_info->flags &
-                    VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT),
+      /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
+       * some optimizations need to happen otherwise shader might not compile
+       */
+      .optimize = true,
       .include_binning_pass = true,
    };
 }
@@ -225,13 +451,14 @@ tu_shader_compile_options_init(
 static uint32_t *
 tu_compile_shader_variant(struct ir3_shader *shader,
                           const struct ir3_shader_key *key,
-                          bool binning_pass,
+                          struct ir3_shader_variant *nonbinning,
                           struct ir3_shader_variant *variant)
 {
    variant->shader = shader;
    variant->type = shader->type;
    variant->key = *key;
-   variant->binning_pass = binning_pass;
+   variant->binning_pass = !!nonbinning;
+   variant->nonbinning = nonbinning;
 
    int ret = ir3_compile_shader_nir(shader->compiler, variant);
    if (ret)
@@ -262,7 +489,7 @@ tu_shader_compile(struct tu_device *dev,
    }
 
    shader->binary = tu_compile_shader_variant(
-      &shader->ir3_shader, &options->key, false, &shader->variants[0]);
+      &shader->ir3_shader, &options->key, NULL, &shader->variants[0]);
    if (!shader->binary)
       return VK_ERROR_OUT_OF_HOST_MEMORY;
 
@@ -270,7 +497,8 @@ tu_shader_compile(struct tu_device *dev,
    if (options->include_binning_pass &&
        shader->ir3_shader.type == MESA_SHADER_VERTEX) {
       shader->binning_binary = tu_compile_shader_variant(
-         &shader->ir3_shader, &options->key, true, &shader->variants[1]);
+         &shader->ir3_shader, &options->key, &shader->variants[0],
+         &shader->variants[1]);
       if (!shader->binning_binary)
          return VK_ERROR_OUT_OF_HOST_MEMORY;