tu: Align GMEM resolve blit scissor
[mesa.git] / src / freedreno / vulkan / tu_shader.c
index d87aa1dbf7129f7d5e7d75ce5450624ee6690857..85bf6bbc50ffa269213f5342b8fea34f49c7bc8a 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "spirv/nir_spirv.h"
 #include "util/mesa-sha1.h"
+#include "nir/nir_xfb_info.h"
 
 #include "ir3/ir3_nir.h"
 
@@ -38,9 +39,11 @@ tu_spirv_to_nir(struct ir3_compiler *compiler,
 {
    /* TODO these are made-up */
    const struct spirv_to_nir_options spirv_options = {
-      .lower_workgroup_access_to_offsets = true,
+      .frag_coord_is_sysval = true,
       .lower_ubo_ssbo_access_to_offsets = true,
-      .caps = { false },
+      .caps = {
+         .transform_feedback = compiler->gpu_id >= 600,
+      },
    };
    const nir_shader_compiler_options *nir_options =
       ir3_get_compiler_options(compiler);
@@ -81,37 +84,378 @@ tu_spirv_to_nir(struct ir3_compiler *compiler,
 }
 
 static void
-tu_sort_variables_by_location(struct exec_list *variables)
+lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
+                         struct tu_shader *shader)
 {
-   struct exec_list sorted;
-   exec_list_make_empty(&sorted);
-
-   nir_foreach_variable_safe(var, variables)
-   {
-      exec_node_remove(&var->node);
-
-      /* insert the variable into the sorted list */
-      nir_variable *next = NULL;
-      nir_foreach_variable(tmp, &sorted)
-      {
-         if (var->data.location < tmp->data.location) {
-            next = tmp;
+   nir_intrinsic_instr *load =
+      nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
+   load->num_components = instr->num_components;
+   uint32_t base = nir_intrinsic_base(instr);
+   assert(base % 4 == 0);
+   assert(base >= shader->push_consts.lo * 16);
+   base -= shader->push_consts.lo * 16;
+   nir_intrinsic_set_base(load, base / 4);
+   load->src[0] =
+      nir_src_for_ssa(nir_ushr(b, instr->src[0].ssa, nir_imm_int(b, 2)));
+   nir_ssa_dest_init(&load->instr, &load->dest,
+                     load->num_components, instr->dest.ssa.bit_size,
+                     instr->dest.ssa.name);
+   nir_builder_instr_insert(b, &load->instr);
+   nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+
+   nir_instr_remove(&instr->instr);
+}
+
+static void
+lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
+                            struct tu_shader *shader,
+                            const struct tu_pipeline_layout *layout)
+{
+   nir_ssa_def *vulkan_idx = instr->src[0].ssa;
+
+   unsigned set = nir_intrinsic_desc_set(instr);
+   unsigned binding = nir_intrinsic_binding(instr);
+   struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
+   struct tu_descriptor_set_binding_layout *binding_layout =
+      &set_layout->binding[binding];
+   uint32_t base;
+
+   switch (binding_layout->type) {
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+      base = layout->set[set].dynamic_offset_start +
+         binding_layout->dynamic_offset_offset +
+         layout->input_attachment_count;
+      set = MAX_SETS;
+      break;
+   default:
+      base = binding_layout->offset / (4 * A6XX_TEX_CONST_DWORDS);
+      break;
+   }
+
+   nir_intrinsic_instr *bindless =
+      nir_intrinsic_instr_create(b->shader,
+                                 nir_intrinsic_bindless_resource_ir3);
+   bindless->num_components = 1;
+   nir_ssa_dest_init(&bindless->instr, &bindless->dest,
+                     1, 32, NULL);
+   nir_intrinsic_set_desc_set(bindless, set);
+   bindless->src[0] = nir_src_for_ssa(nir_iadd(b, nir_imm_int(b, base), vulkan_idx));
+   nir_builder_instr_insert(b, &bindless->instr);
+
+   nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+                            nir_src_for_ssa(&bindless->dest.ssa));
+   nir_instr_remove(&instr->instr);
+}
+
+static nir_ssa_def *
+build_bindless(nir_builder *b, nir_deref_instr *deref, bool is_sampler,
+               struct tu_shader *shader,
+               const struct tu_pipeline_layout *layout)
+{
+   nir_variable *var = nir_deref_instr_get_variable(deref);
+
+   unsigned set = var->data.descriptor_set;
+   unsigned binding = var->data.binding;
+   const struct tu_descriptor_set_binding_layout *bind_layout =
+      &layout->set[set].layout->binding[binding];
+
+   nir_ssa_def *desc_offset;
+   unsigned descriptor_stride;
+   if (bind_layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
+      unsigned offset =
+         layout->set[set].input_attachment_start +
+         bind_layout->input_attachment_offset;
+      desc_offset = nir_imm_int(b, offset);
+      set = MAX_SETS;
+      descriptor_stride = 1;
+   } else {
+      unsigned offset = 0;
+      /* Samplers come second in combined image/sampler descriptors, see
+       * write_combined_image_sampler_descriptor().
+       */
+      if (is_sampler && bind_layout->type ==
+          VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+         offset = 1;
+      }
+      desc_offset =
+         nir_imm_int(b, (bind_layout->offset / (4 * A6XX_TEX_CONST_DWORDS)) +
+                     offset);
+      descriptor_stride = bind_layout->size / (4 * A6XX_TEX_CONST_DWORDS);
+   }
+
+   if (deref->deref_type != nir_deref_type_var) {
+      assert(deref->deref_type == nir_deref_type_array);
+
+      nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+      desc_offset = nir_iadd(b, desc_offset,
+                             nir_imul_imm(b, arr_index, descriptor_stride));
+   }
+
+   nir_intrinsic_instr *bindless =
+      nir_intrinsic_instr_create(b->shader,
+                                 nir_intrinsic_bindless_resource_ir3);
+   bindless->num_components = 1;
+   nir_ssa_dest_init(&bindless->instr, &bindless->dest,
+                     1, 32, NULL);
+   nir_intrinsic_set_desc_set(bindless, set);
+   bindless->src[0] = nir_src_for_ssa(desc_offset);
+   nir_builder_instr_insert(b, &bindless->instr);
+
+   return &bindless->dest.ssa;
+}
+
+static void
+lower_image_deref(nir_builder *b,
+                  nir_intrinsic_instr *instr, struct tu_shader *shader,
+                  const struct tu_pipeline_layout *layout)
+{
+   nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
+   nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+   nir_rewrite_image_intrinsic(instr, bindless, true);
+}
+
+static bool
+lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
+                struct tu_shader *shader,
+                const struct tu_pipeline_layout *layout)
+{
+   switch (instr->intrinsic) {
+   case nir_intrinsic_load_layer_id:
+      /* TODO: remove this when layered rendering is implemented */
+      nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+                               nir_src_for_ssa(nir_imm_int(b, 0)));
+      nir_instr_remove(&instr->instr);
+      return true;
+
+   case nir_intrinsic_load_push_constant:
+      lower_load_push_constant(b, instr, shader);
+      return true;
+
+   case nir_intrinsic_vulkan_resource_index:
+      lower_vulkan_resource_index(b, instr, shader, layout);
+      return true;
+
+   case nir_intrinsic_image_deref_load:
+   case nir_intrinsic_image_deref_store:
+   case nir_intrinsic_image_deref_atomic_add:
+   case nir_intrinsic_image_deref_atomic_imin:
+   case nir_intrinsic_image_deref_atomic_umin:
+   case nir_intrinsic_image_deref_atomic_imax:
+   case nir_intrinsic_image_deref_atomic_umax:
+   case nir_intrinsic_image_deref_atomic_and:
+   case nir_intrinsic_image_deref_atomic_or:
+   case nir_intrinsic_image_deref_atomic_xor:
+   case nir_intrinsic_image_deref_atomic_exchange:
+   case nir_intrinsic_image_deref_atomic_comp_swap:
+   case nir_intrinsic_image_deref_size:
+   case nir_intrinsic_image_deref_samples:
+      lower_image_deref(b, instr, shader, layout);
+      return true;
+
+   default:
+      return false;
+   }
+}
+
+static bool
+lower_tex(nir_builder *b, nir_tex_instr *tex,
+          struct tu_shader *shader, const struct tu_pipeline_layout *layout)
+{
+   int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
+   if (sampler_src_idx >= 0) {
+      nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
+      nir_ssa_def *bindless = build_bindless(b, deref, true, shader, layout);
+      nir_instr_rewrite_src(&tex->instr, &tex->src[sampler_src_idx].src,
+                            nir_src_for_ssa(bindless));
+      tex->src[sampler_src_idx].src_type = nir_tex_src_sampler_handle;
+   }
+
+   int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
+   if (tex_src_idx >= 0) {
+      nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
+      nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+      nir_instr_rewrite_src(&tex->instr, &tex->src[tex_src_idx].src,
+                            nir_src_for_ssa(bindless));
+      tex->src[tex_src_idx].src_type = nir_tex_src_texture_handle;
+   }
+
+   return true;
+}
+
+static bool
+lower_impl(nir_function_impl *impl, struct tu_shader *shader,
+            const struct tu_pipeline_layout *layout)
+{
+   nir_builder b;
+   nir_builder_init(&b, impl);
+   bool progress = false;
+
+   nir_foreach_block(block, impl) {
+      nir_foreach_instr_safe(instr, block) {
+         b.cursor = nir_before_instr(instr);
+         switch (instr->type) {
+         case nir_instr_type_tex:
+            progress |= lower_tex(&b, nir_instr_as_tex(instr), shader, layout);
+            break;
+         case nir_instr_type_intrinsic:
+            progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
+            break;
+         default:
+            break;
+         }
+      }
+   }
+
+   return progress;
+}
+
+
+/* Figure out the range of push constants that we're actually going to push to
+ * the shader, and tell the backend to reserve this range when pushing UBO
+ * constants.
+ */
+
+static void
+gather_push_constants(nir_shader *shader, struct tu_shader *tu_shader)
+{
+   uint32_t min = UINT32_MAX, max = 0;
+   nir_foreach_function(function, shader) {
+      if (!function->impl)
+         continue;
+
+      nir_foreach_block(block, function->impl) {
+         nir_foreach_instr_safe(instr, block) {
+            if (instr->type != nir_instr_type_intrinsic)
+               continue;
+
+            nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+            if (intrin->intrinsic != nir_intrinsic_load_push_constant)
+               continue;
+
+            uint32_t base = nir_intrinsic_base(intrin);
+            uint32_t range = nir_intrinsic_range(intrin);
+            min = MIN2(min, base);
+            max = MAX2(max, base + range);
             break;
          }
       }
-      if (next)
-         exec_node_insert_node_before(&next->node, &var->node);
-      else
-         exec_list_push_tail(&sorted, &var->node);
    }
 
-   exec_list_move_nodes_to(&sorted, variables);
+   if (min >= max) {
+      tu_shader->push_consts.lo = 0;
+      tu_shader->push_consts.count = 0;
+      tu_shader->ir3_shader.const_state.num_reserved_user_consts = 0;
+      return;
+   }
+
+   /* CP_LOAD_STATE OFFSET and NUM_UNIT are in units of vec4 (4 dwords),
+    * however there's an alignment requirement of 4 on OFFSET. Expand the
+    * range and change units accordingly.
+    */
+   tu_shader->push_consts.lo = (min / 16) / 4 * 4;
+   tu_shader->push_consts.count =
+      align(max, 16) / 16 - tu_shader->push_consts.lo;
+   tu_shader->ir3_shader.const_state.num_reserved_user_consts = 
+      align(tu_shader->push_consts.count, 4);
+}
+
+/* Gather the InputAttachmentIndex for each input attachment from the NIR
+ * shader and organize the info in a way so that draw-time patching is easy.
+ */
+static void
+gather_input_attachments(nir_shader *shader, struct tu_shader *tu_shader,
+                         const struct tu_pipeline_layout *layout)
+{
+   nir_foreach_variable(var, &shader->uniforms) {
+      const struct glsl_type *glsl_type = glsl_without_array(var->type);
+
+      if (!glsl_type_is_image(glsl_type))
+         continue;
+
+      enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
+
+      const uint32_t set = var->data.descriptor_set;
+      const uint32_t binding = var->data.binding;
+      const struct tu_descriptor_set_binding_layout *bind_layout =
+            &layout->set[set].layout->binding[binding];
+      const uint32_t array_size = bind_layout->array_size;
+
+      if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
+          dim == GLSL_SAMPLER_DIM_SUBPASS_MS) {
+         unsigned offset =
+            layout->set[set].input_attachment_start +
+            bind_layout->input_attachment_offset;
+         for (unsigned i = 0; i < array_size; i++)
+            tu_shader->attachment_idx[offset + i] = var->data.index + i;
+      }
+   }
+}
+
+static bool
+tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader,
+            const struct tu_pipeline_layout *layout)
+{
+   bool progress = false;
+
+   gather_push_constants(shader, tu_shader);
+   gather_input_attachments(shader, tu_shader, layout);
+
+   nir_foreach_function(function, shader) {
+      if (function->impl)
+         progress |= lower_impl(function->impl, tu_shader, layout);
+   }
+
+   return progress;
+}
+
+static void
+tu_gather_xfb_info(nir_shader *nir, struct tu_shader *shader)
+{
+   struct ir3_stream_output_info *info = &shader->ir3_shader.stream_output;
+   nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
+
+   if (!xfb)
+      return;
+
+   /* creating a map from VARYING_SLOT_* enums to consecutive index */
+   uint8_t num_outputs = 0;
+   uint64_t outputs_written = 0;
+   for (int i = 0; i < xfb->output_count; i++)
+      outputs_written |= BITFIELD64_BIT(xfb->outputs[i].location);
+
+   uint8_t output_map[VARYING_SLOT_TESS_MAX];
+   memset(output_map, 0, sizeof(output_map));
+
+   for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
+      if (outputs_written & BITFIELD64_BIT(attr))
+         output_map[attr] = num_outputs++;
+   }
+
+   assert(xfb->output_count < IR3_MAX_SO_OUTPUTS);
+   info->num_outputs = xfb->output_count;
+
+   for (int i = 0; i < IR3_MAX_SO_BUFFERS; i++)
+      info->stride[i] = xfb->buffers[i].stride / 4;
+
+   for (int i = 0; i < xfb->output_count; i++) {
+      info->output[i].register_index = output_map[xfb->outputs[i].location];
+      info->output[i].start_component = xfb->outputs[i].component_offset;
+      info->output[i].num_components =
+                           util_bitcount(xfb->outputs[i].component_mask);
+      info->output[i].output_buffer  = xfb->outputs[i].buffer;
+      info->output[i].dst_offset = xfb->outputs[i].offset / 4;
+      info->output[i].stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
+   }
+
+   ralloc_free(xfb);
 }
 
 struct tu_shader *
 tu_shader_create(struct tu_device *dev,
                  gl_shader_stage stage,
                  const VkPipelineShaderStageCreateInfo *stage_info,
+                 struct tu_pipeline_layout *layout,
                  const VkAllocationCallbacks *alloc)
 {
    const struct tu_shader_module *module =
@@ -141,41 +485,81 @@ tu_shader_create(struct tu_device *dev,
       nir_print_shader(nir, stderr);
    }
 
-   /* TODO what needs to happen? */
-
-   switch (stage) {
-   case MESA_SHADER_VERTEX:
-      tu_sort_variables_by_location(&nir->outputs);
-      break;
-   case MESA_SHADER_TESS_CTRL:
-   case MESA_SHADER_TESS_EVAL:
-   case MESA_SHADER_GEOMETRY:
-      tu_sort_variables_by_location(&nir->inputs);
-      tu_sort_variables_by_location(&nir->outputs);
-      break;
-   case MESA_SHADER_FRAGMENT:
-      tu_sort_variables_by_location(&nir->inputs);
-      break;
-   case MESA_SHADER_COMPUTE:
-      break;
-   default:
-      unreachable("invalid gl_shader_stage");
-      break;
+   /* multi step inlining procedure */
+   NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
+   NIR_PASS_V(nir, nir_lower_returns);
+   NIR_PASS_V(nir, nir_inline_functions);
+   NIR_PASS_V(nir, nir_opt_deref);
+   foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
+      if (!func->is_entrypoint)
+         exec_node_remove(&func->node);
    }
+   assert(exec_list_length(&nir->functions) == 1);
+   NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
+
+   /* Split member structs.  We do this before lower_io_to_temporaries so that
+    * it doesn't lower system values to temporaries by accident.
+    */
+   NIR_PASS_V(nir, nir_split_var_copies);
+   NIR_PASS_V(nir, nir_split_per_member_structs);
+
+   NIR_PASS_V(nir, nir_remove_dead_variables,
+              nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
+
+   /* Gather information for transform feedback.
+    * This should be called after nir_split_per_member_structs.
+    * Also needs to be called after nir_remove_dead_variables with varyings,
+    * so that we could align stream outputs correctly.
+    */
+   if (nir->info.stage == MESA_SHADER_VERTEX ||
+         nir->info.stage == MESA_SHADER_TESS_EVAL ||
+         nir->info.stage == MESA_SHADER_GEOMETRY)
+      tu_gather_xfb_info(nir, shader);
+
+   NIR_PASS_V(nir, nir_propagate_invariant);
 
-   nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
-                            ir3_glsl_type_size);
-   nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
-                            ir3_glsl_type_size);
-   nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
-                            ir3_glsl_type_size);
+   NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
+
+   NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+   NIR_PASS_V(nir, nir_split_var_copies);
+   NIR_PASS_V(nir, nir_lower_var_copies);
+
+   NIR_PASS_V(nir, nir_opt_copy_prop_vars);
+   NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
+
+   /* ir3 doesn't support indirect input/output */
+   NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
+
+   NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+
+   nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
+   nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
 
    NIR_PASS_V(nir, nir_lower_system_values);
    NIR_PASS_V(nir, nir_lower_frexp);
+
+   if (stage == MESA_SHADER_FRAGMENT)
+      NIR_PASS_V(nir, nir_lower_input_attachments, true);
+
+   NIR_PASS_V(nir, tu_lower_io, shader, layout);
+
    NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
 
+   if (stage == MESA_SHADER_FRAGMENT) {
+      /* NOTE: lower load_barycentric_at_sample first, since it
+       * produces load_barycentric_at_offset:
+       */
+      NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
+      NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
+
+      NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
+   }
+
    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
 
+   /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
+   nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE / 16;
+
    shader->ir3_shader.compiler = dev->compiler;
    shader->ir3_shader.type = stage;
    shader->ir3_shader.nir = nir;
@@ -211,11 +595,25 @@ tu_shader_compile_options_init(
    struct tu_shader_compile_options *options,
    const VkGraphicsPipelineCreateInfo *pipeline_info)
 {
-   *options = (struct tu_shader_compile_options) {
-      /* TODO ir3_key */
+   bool has_gs = false;
+   if (pipeline_info) {
+      for (uint32_t i = 0; i < pipeline_info->stageCount; i++) {
+         if (pipeline_info->pStages[i].stage == VK_SHADER_STAGE_GEOMETRY_BIT) {
+            has_gs = true;
+            break;
+         }
+      }
+   }
 
-      .optimize = !(pipeline_info->flags &
-                    VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT),
+   *options = (struct tu_shader_compile_options) {
+      /* TODO: Populate the remaining fields of ir3_shader_key. */
+      .key = {
+         .has_gs = has_gs,
+      },
+      /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
+       * some optimizations need to happen otherwise shader might not compile
+       */
+      .optimize = true,
       .include_binning_pass = true,
    };
 }
@@ -223,13 +621,14 @@ tu_shader_compile_options_init(
 static uint32_t *
 tu_compile_shader_variant(struct ir3_shader *shader,
                           const struct ir3_shader_key *key,
-                          bool binning_pass,
+                          struct ir3_shader_variant *nonbinning,
                           struct ir3_shader_variant *variant)
 {
    variant->shader = shader;
    variant->type = shader->type;
    variant->key = *key;
-   variant->binning_pass = binning_pass;
+   variant->binning_pass = !!nonbinning;
+   variant->nonbinning = nonbinning;
 
    int ret = ir3_compile_shader_nir(shader->compiler, variant);
    if (ret)
@@ -260,19 +659,34 @@ tu_shader_compile(struct tu_device *dev,
    }
 
    shader->binary = tu_compile_shader_variant(
-      &shader->ir3_shader, &options->key, false, &shader->variants[0]);
+      &shader->ir3_shader, &options->key, NULL, &shader->variants[0]);
    if (!shader->binary)
       return VK_ERROR_OUT_OF_HOST_MEMORY;
 
+   if (shader_debug_enabled(shader->ir3_shader.type)) {
+      fprintf(stdout, "Native code for unnamed %s shader %s:\n",
+              ir3_shader_stage(&shader->variants[0]), shader->ir3_shader.nir->info.name);
+       if (shader->ir3_shader.type == MESA_SHADER_FRAGMENT)
+               fprintf(stdout, "SIMD0\n");
+       ir3_shader_disasm(&shader->variants[0], shader->binary, stdout);
+   }
+
    /* compile another variant for the binning pass */
    if (options->include_binning_pass &&
        shader->ir3_shader.type == MESA_SHADER_VERTEX) {
       shader->binning_binary = tu_compile_shader_variant(
-         &shader->ir3_shader, &options->key, true, &shader->variants[1]);
+         &shader->ir3_shader, &options->key, &shader->variants[0],
+         &shader->variants[1]);
       if (!shader->binning_binary)
          return VK_ERROR_OUT_OF_HOST_MEMORY;
 
       shader->has_binning_pass = true;
+
+      if (shader_debug_enabled(MESA_SHADER_VERTEX)) {
+         fprintf(stdout, "Native code for unnamed binning shader %s:\n",
+                 shader->ir3_shader.nir->info.name);
+          ir3_shader_disasm(&shader->variants[1], shader->binary, stdout);
+      }
    }
 
    if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_IR3)) {