#include "spirv/nir_spirv.h"
#include "util/mesa-sha1.h"
+#include "nir/nir_xfb_info.h"
#include "ir3/ir3_nir.h"
const struct spirv_to_nir_options spirv_options = {
.frag_coord_is_sysval = true,
.lower_ubo_ssbo_access_to_offsets = true,
- .caps = { false },
+ .caps = {
+ .transform_feedback = compiler->gpu_id >= 600,
+ },
};
const nir_shader_compiler_options *nir_options =
ir3_get_compiler_options(compiler);
}
static void
-tu_sort_variables_by_location(struct exec_list *variables)
+lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
+ struct tu_shader *shader)
{
- struct exec_list sorted;
- exec_list_make_empty(&sorted);
-
- nir_foreach_variable_safe(var, variables)
- {
- exec_node_remove(&var->node);
-
- /* insert the variable into the sorted list */
- nir_variable *next = NULL;
- nir_foreach_variable(tmp, &sorted)
- {
- if (var->data.location < tmp->data.location) {
- next = tmp;
- break;
- }
- }
- if (next)
- exec_node_insert_node_before(&next->node, &var->node);
- else
- exec_list_push_tail(&sorted, &var->node);
- }
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
+ load->num_components = instr->num_components;
+ uint32_t base = nir_intrinsic_base(instr);
+ assert(base % 4 == 0);
+ assert(base >= shader->push_consts.lo * 16);
+ base -= shader->push_consts.lo * 16;
+ nir_intrinsic_set_base(load, base / 4);
+ load->src[0] =
+ nir_src_for_ssa(nir_ushr(b, instr->src[0].ssa, nir_imm_int(b, 2)));
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ load->num_components, instr->dest.ssa.bit_size,
+ instr->dest.ssa.name);
+ nir_builder_instr_insert(b, &load->instr);
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
- exec_list_move_nodes_to(&sorted, variables);
+ nir_instr_remove(&instr->instr);
}
-static unsigned
-map_add(struct tu_descriptor_map *map, int set, int binding)
+static void
+lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
+ struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
{
- unsigned index;
- for (index = 0; index < map->num; index++) {
- if (set == map->set[index] && binding == map->binding[index])
- break;
+ nir_ssa_def *vulkan_idx = instr->src[0].ssa;
+
+ unsigned set = nir_intrinsic_desc_set(instr);
+ unsigned binding = nir_intrinsic_binding(instr);
+ struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
+ struct tu_descriptor_set_binding_layout *binding_layout =
+ &set_layout->binding[binding];
+ uint32_t base;
+
+ switch (binding_layout->type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ base = layout->set[set].dynamic_offset_start +
+ binding_layout->dynamic_offset_offset +
+ layout->input_attachment_count;
+ set = MAX_SETS;
+ break;
+ default:
+ base = binding_layout->offset / (4 * A6XX_TEX_CONST_DWORDS);
+ break;
}
- assert(index < ARRAY_SIZE(map->set));
+ nir_intrinsic_instr *bindless =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_bindless_resource_ir3);
+ bindless->num_components = 1;
+ nir_ssa_dest_init(&bindless->instr, &bindless->dest,
+ 1, 32, NULL);
+ nir_intrinsic_set_desc_set(bindless, set);
+ bindless->src[0] = nir_src_for_ssa(nir_iadd(b, nir_imm_int(b, base), vulkan_idx));
+ nir_builder_instr_insert(b, &bindless->instr);
- map->set[index] = set;
- map->binding[index] = binding;
- map->num = MAX2(map->num, index + 1);
- return index;
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+ nir_src_for_ssa(&bindless->dest.ssa));
+ nir_instr_remove(&instr->instr);
}
-static void
-lower_tex_src_to_offset(nir_builder *b, nir_tex_instr *instr, unsigned src_idx,
- struct tu_shader *shader)
+static nir_ssa_def *
+build_bindless(nir_builder *b, nir_deref_instr *deref, bool is_sampler,
+ struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
{
- nir_ssa_def *index = NULL;
- unsigned base_index = 0;
- unsigned array_elements = 1;
- nir_tex_src *src = &instr->src[src_idx];
- bool is_sampler = src->src_type == nir_tex_src_sampler_deref;
-
- /* We compute first the offsets */
- nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
- while (deref->deref_type != nir_deref_type_var) {
- assert(deref->parent.is_ssa);
- nir_deref_instr *parent =
- nir_instr_as_deref(deref->parent.ssa->parent_instr);
-
- assert(deref->deref_type == nir_deref_type_array);
-
- if (nir_src_is_const(deref->arr.index) && index == NULL) {
- /* We're still building a direct index */
- base_index += nir_src_as_uint(deref->arr.index) * array_elements;
- } else {
- if (index == NULL) {
- /* We used to be direct but not anymore */
- index = nir_imm_int(b, base_index);
- base_index = 0;
- }
-
- index = nir_iadd(b, index,
- nir_imul(b, nir_imm_int(b, array_elements),
- nir_ssa_for_src(b, deref->arr.index, 1)));
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+
+ unsigned set = var->data.descriptor_set;
+ unsigned binding = var->data.binding;
+ const struct tu_descriptor_set_binding_layout *bind_layout =
+ &layout->set[set].layout->binding[binding];
+
+ nir_ssa_def *desc_offset;
+ unsigned descriptor_stride;
+ if (bind_layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
+ unsigned offset =
+ layout->set[set].input_attachment_start +
+ bind_layout->input_attachment_offset;
+ desc_offset = nir_imm_int(b, offset);
+ set = MAX_SETS;
+ descriptor_stride = 1;
+ } else {
+ unsigned offset = 0;
+ /* Samplers come second in combined image/sampler descriptors, see
+ * write_combined_image_sampler_descriptor().
+ */
+ if (is_sampler && bind_layout->type ==
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ offset = 1;
}
-
- array_elements *= glsl_get_length(parent->type);
-
- deref = parent;
+ desc_offset =
+ nir_imm_int(b, (bind_layout->offset / (4 * A6XX_TEX_CONST_DWORDS)) +
+ offset);
+ descriptor_stride = bind_layout->size / (4 * A6XX_TEX_CONST_DWORDS);
}
- if (index)
- index = nir_umin(b, index, nir_imm_int(b, array_elements - 1));
-
- /* We have the offsets, we apply them, rewriting the source or removing
- * instr if needed
- */
- if (index) {
- nir_instr_rewrite_src(&instr->instr, &src->src,
- nir_src_for_ssa(index));
-
- src->src_type = is_sampler ?
- nir_tex_src_sampler_offset :
- nir_tex_src_texture_offset;
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->deref_type == nir_deref_type_array);
- instr->texture_array_size = array_elements;
- } else {
- nir_tex_instr_remove_src(instr, src_idx);
+ nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+ desc_offset = nir_iadd(b, desc_offset,
+ nir_imul_imm(b, arr_index, descriptor_stride));
}
- if (array_elements > 1)
- tu_finishme("texture/sampler array");
-
- if (is_sampler) {
- instr->sampler_index = map_add(&shader->sampler_map,
- deref->var->data.descriptor_set,
- deref->var->data.binding);
- instr->sampler_index += base_index;
- } else {
- instr->texture_index = map_add(&shader->texture_map,
- deref->var->data.descriptor_set,
- deref->var->data.binding);
- instr->texture_index += base_index;
- instr->texture_array_size = array_elements;
- }
+ nir_intrinsic_instr *bindless =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_bindless_resource_ir3);
+ bindless->num_components = 1;
+ nir_ssa_dest_init(&bindless->instr, &bindless->dest,
+ 1, 32, NULL);
+ nir_intrinsic_set_desc_set(bindless, set);
+ bindless->src[0] = nir_src_for_ssa(desc_offset);
+ nir_builder_instr_insert(b, &bindless->instr);
+
+ return &bindless->dest.ssa;
}
-static bool
-lower_sampler(nir_builder *b, nir_tex_instr *instr, struct tu_shader *shader)
+static void
+lower_image_deref(nir_builder *b,
+ nir_intrinsic_instr *instr, struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
{
- int texture_idx =
- nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
-
- if (texture_idx >= 0)
- lower_tex_src_to_offset(b, instr, texture_idx, shader);
-
- int sampler_idx =
- nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
-
- if (sampler_idx >= 0)
- lower_tex_src_to_offset(b, instr, sampler_idx, shader);
-
- if (texture_idx < 0 && sampler_idx < 0)
- return false;
-
- return true;
+ nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
+ nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+ nir_rewrite_image_intrinsic(instr, bindless, true);
}
static bool
lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
- struct tu_shader *shader)
+ struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
{
- if (instr->intrinsic != nir_intrinsic_vulkan_resource_index)
- return false;
+ switch (instr->intrinsic) {
+ case nir_intrinsic_load_layer_id:
+ /* TODO: remove this when layered rendering is implemented */
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+ nir_src_for_ssa(nir_imm_int(b, 0)));
+ nir_instr_remove(&instr->instr);
+ return true;
+
+ case nir_intrinsic_load_push_constant:
+ lower_load_push_constant(b, instr, shader);
+ return true;
+
+ case nir_intrinsic_vulkan_resource_index:
+ lower_vulkan_resource_index(b, instr, shader, layout);
+ return true;
+
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_store:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_size:
+ case nir_intrinsic_image_deref_samples:
+ lower_image_deref(b, instr, shader, layout);
+ return true;
- nir_const_value *const_val = nir_src_as_const_value(instr->src[0]);
- if (!const_val || const_val->u32 != 0) {
- tu_finishme("non-zero vulkan_resource_index array index");
+ default:
return false;
}
+}
- if (nir_intrinsic_desc_type(instr) != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
- tu_finishme("non-ubo vulkan_resource_index");
- return false;
+static bool
+lower_tex(nir_builder *b, nir_tex_instr *tex,
+ struct tu_shader *shader, const struct tu_pipeline_layout *layout)
+{
+ int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
+ if (sampler_src_idx >= 0) {
+ nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
+ nir_ssa_def *bindless = build_bindless(b, deref, true, shader, layout);
+ nir_instr_rewrite_src(&tex->instr, &tex->src[sampler_src_idx].src,
+ nir_src_for_ssa(bindless));
+ tex->src[sampler_src_idx].src_type = nir_tex_src_sampler_handle;
}
- unsigned index = map_add(&shader->ubo_map,
- nir_intrinsic_desc_set(instr),
- nir_intrinsic_binding(instr));
-
- b->cursor = nir_before_instr(&instr->instr);
- /* skip index 0 because ir3 treats it differently */
- nir_ssa_def_rewrite_uses(&instr->dest.ssa,
- nir_src_for_ssa(nir_imm_int(b, index + 1)));
- nir_instr_remove(&instr->instr);
+ int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
+ if (tex_src_idx >= 0) {
+ nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
+ nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+ nir_instr_rewrite_src(&tex->instr, &tex->src[tex_src_idx].src,
+ nir_src_for_ssa(bindless));
+ tex->src[tex_src_idx].src_type = nir_tex_src_texture_handle;
+ }
return true;
}
static bool
-lower_impl(nir_function_impl *impl, struct tu_shader *shader)
+lower_impl(nir_function_impl *impl, struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
{
nir_builder b;
nir_builder_init(&b, impl);
nir_foreach_block(block, impl) {
nir_foreach_instr_safe(instr, block) {
+ b.cursor = nir_before_instr(instr);
switch (instr->type) {
case nir_instr_type_tex:
- progress |= lower_sampler(&b, nir_instr_as_tex(instr), shader);
+ progress |= lower_tex(&b, nir_instr_as_tex(instr), shader, layout);
break;
case nir_instr_type_intrinsic:
- progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader);
+ progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
break;
default:
break;
return progress;
}
+
+/* Figure out the range of push constants that we're actually going to push to
+ * the shader, and tell the backend to reserve this range when pushing UBO
+ * constants.
+ */
+
+static void
+gather_push_constants(nir_shader *shader, struct tu_shader *tu_shader)
+{
+ uint32_t min = UINT32_MAX, max = 0;
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ if (intrin->intrinsic != nir_intrinsic_load_push_constant)
+ continue;
+
+ uint32_t base = nir_intrinsic_base(intrin);
+ uint32_t range = nir_intrinsic_range(intrin);
+ min = MIN2(min, base);
+ max = MAX2(max, base + range);
+ break;
+ }
+ }
+ }
+
+ if (min >= max) {
+ tu_shader->push_consts.lo = 0;
+ tu_shader->push_consts.count = 0;
+ tu_shader->ir3_shader.const_state.num_reserved_user_consts = 0;
+ return;
+ }
+
+ /* CP_LOAD_STATE OFFSET and NUM_UNIT are in units of vec4 (4 dwords),
+ * however there's an alignment requirement of 4 on OFFSET. Expand the
+ * range and change units accordingly.
+ */
+ tu_shader->push_consts.lo = (min / 16) / 4 * 4;
+ tu_shader->push_consts.count =
+ align(max, 16) / 16 - tu_shader->push_consts.lo;
+ tu_shader->ir3_shader.const_state.num_reserved_user_consts =
+ align(tu_shader->push_consts.count, 4);
+}
+
+/* Gather the InputAttachmentIndex for each input attachment from the NIR
+ * shader and organize the info in a way so that draw-time patching is easy.
+ */
+static void
+gather_input_attachments(nir_shader *shader, struct tu_shader *tu_shader,
+ const struct tu_pipeline_layout *layout)
+{
+ nir_foreach_variable(var, &shader->uniforms) {
+ const struct glsl_type *glsl_type = glsl_without_array(var->type);
+
+ if (!glsl_type_is_image(glsl_type))
+ continue;
+
+ enum glsl_sampler_dim dim = glsl_get_sampler_dim(glsl_type);
+
+ const uint32_t set = var->data.descriptor_set;
+ const uint32_t binding = var->data.binding;
+ const struct tu_descriptor_set_binding_layout *bind_layout =
+ &layout->set[set].layout->binding[binding];
+ const uint32_t array_size = bind_layout->array_size;
+
+ if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
+ dim == GLSL_SAMPLER_DIM_SUBPASS_MS) {
+ unsigned offset =
+ layout->set[set].input_attachment_start +
+ bind_layout->input_attachment_offset;
+ for (unsigned i = 0; i < array_size; i++)
+ tu_shader->attachment_idx[offset + i] = var->data.index + i;
+ }
+ }
+}
+
static bool
-tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader)
+tu_lower_io(nir_shader *shader, struct tu_shader *tu_shader,
+ const struct tu_pipeline_layout *layout)
{
bool progress = false;
+ gather_push_constants(shader, tu_shader);
+ gather_input_attachments(shader, tu_shader, layout);
+
nir_foreach_function(function, shader) {
if (function->impl)
- progress |= lower_impl(function->impl, tu_shader);
+ progress |= lower_impl(function->impl, tu_shader, layout);
}
return progress;
}
+static void
+tu_gather_xfb_info(nir_shader *nir, struct tu_shader *shader)
+{
+ struct ir3_stream_output_info *info = &shader->ir3_shader.stream_output;
+ nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
+
+ if (!xfb)
+ return;
+
+ /* creating a map from VARYING_SLOT_* enums to consecutive index */
+ uint8_t num_outputs = 0;
+ uint64_t outputs_written = 0;
+ for (int i = 0; i < xfb->output_count; i++)
+ outputs_written |= BITFIELD64_BIT(xfb->outputs[i].location);
+
+ uint8_t output_map[VARYING_SLOT_TESS_MAX];
+ memset(output_map, 0, sizeof(output_map));
+
+ for (unsigned attr = 0; attr < VARYING_SLOT_MAX; attr++) {
+ if (outputs_written & BITFIELD64_BIT(attr))
+ output_map[attr] = num_outputs++;
+ }
+
+ assert(xfb->output_count < IR3_MAX_SO_OUTPUTS);
+ info->num_outputs = xfb->output_count;
+
+ for (int i = 0; i < IR3_MAX_SO_BUFFERS; i++)
+ info->stride[i] = xfb->buffers[i].stride / 4;
+
+ for (int i = 0; i < xfb->output_count; i++) {
+ info->output[i].register_index = output_map[xfb->outputs[i].location];
+ info->output[i].start_component = xfb->outputs[i].component_offset;
+ info->output[i].num_components =
+ util_bitcount(xfb->outputs[i].component_mask);
+ info->output[i].output_buffer = xfb->outputs[i].buffer;
+ info->output[i].dst_offset = xfb->outputs[i].offset / 4;
+ info->output[i].stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
+ }
+
+ ralloc_free(xfb);
+}
+
struct tu_shader *
tu_shader_create(struct tu_device *dev,
gl_shader_stage stage,
const VkPipelineShaderStageCreateInfo *stage_info,
+ struct tu_pipeline_layout *layout,
const VkAllocationCallbacks *alloc)
{
const struct tu_shader_module *module =
nir_print_shader(nir, stderr);
}
- /* TODO what needs to happen? */
-
- switch (stage) {
- case MESA_SHADER_VERTEX:
- tu_sort_variables_by_location(&nir->outputs);
- break;
- case MESA_SHADER_TESS_CTRL:
- case MESA_SHADER_TESS_EVAL:
- case MESA_SHADER_GEOMETRY:
- tu_sort_variables_by_location(&nir->inputs);
- tu_sort_variables_by_location(&nir->outputs);
- break;
- case MESA_SHADER_FRAGMENT:
- tu_sort_variables_by_location(&nir->inputs);
- break;
- case MESA_SHADER_COMPUTE:
- break;
- default:
- unreachable("invalid gl_shader_stage");
- break;
+ /* multi step inlining procedure */
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_returns);
+ NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_opt_deref);
+ foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
+ if (!func->is_entrypoint)
+ exec_node_remove(&func->node);
}
+ assert(exec_list_length(&nir->functions) == 1);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);
- nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
- ir3_glsl_type_size);
- nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
- ir3_glsl_type_size);
- nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
- ir3_glsl_type_size);
+ /* Split member structs. We do this before lower_io_to_temporaries so that
+ * it doesn't lower system values to temporaries by accident.
+ */
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_split_per_member_structs);
+
+ NIR_PASS_V(nir, nir_remove_dead_variables,
+ nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
+
+ /* Gather information for transform feedback.
+ * This should be called after nir_split_per_member_structs.
+ * Also needs to be called after nir_remove_dead_variables with varyings,
+ * so that we could align stream outputs correctly.
+ */
+ if (nir->info.stage == MESA_SHADER_VERTEX ||
+ nir->info.stage == MESA_SHADER_TESS_EVAL ||
+ nir->info.stage == MESA_SHADER_GEOMETRY)
+ tu_gather_xfb_info(nir, shader);
+
+ NIR_PASS_V(nir, nir_propagate_invariant);
+
+ NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
+
+ NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_opt_copy_prop_vars);
+ NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);
+
+ /* ir3 doesn't support indirect input/output */
+ NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out);
+
+ NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
+
+ nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
+ nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_frexp);
- NIR_PASS_V(nir, tu_lower_io, shader);
+ if (stage == MESA_SHADER_FRAGMENT)
+ NIR_PASS_V(nir, nir_lower_input_attachments, true);
+
+ NIR_PASS_V(nir, tu_lower_io, shader, layout);
NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
+ if (stage == MESA_SHADER_FRAGMENT) {
+ /* NOTE: lower load_barycentric_at_sample first, since it
+ * produces load_barycentric_at_offset:
+ */
+ NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
+ NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
+
+ NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
+ }
+
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
+ /* num_uniforms only used by ir3 for size of ubo 0 (push constants) */
+ nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE / 16;
+
shader->ir3_shader.compiler = dev->compiler;
shader->ir3_shader.type = stage;
shader->ir3_shader.nir = nir;
struct tu_shader_compile_options *options,
const VkGraphicsPipelineCreateInfo *pipeline_info)
{
- *options = (struct tu_shader_compile_options) {
- /* TODO ir3_key */
+ bool has_gs = false;
+ if (pipeline_info) {
+ for (uint32_t i = 0; i < pipeline_info->stageCount; i++) {
+ if (pipeline_info->pStages[i].stage == VK_SHADER_STAGE_GEOMETRY_BIT) {
+ has_gs = true;
+ break;
+ }
+ }
+ }
- .optimize = !(pipeline_info->flags &
- VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT),
+ *options = (struct tu_shader_compile_options) {
+ /* TODO: Populate the remaining fields of ir3_shader_key. */
+ .key = {
+ .has_gs = has_gs,
+ },
+ /* TODO: VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT
+ * some optimizations need to happen otherwise shader might not compile
+ */
+ .optimize = true,
.include_binning_pass = true,
};
}
if (!shader->binary)
return VK_ERROR_OUT_OF_HOST_MEMORY;
+ if (shader_debug_enabled(shader->ir3_shader.type)) {
+ fprintf(stdout, "Native code for unnamed %s shader %s:\n",
+ ir3_shader_stage(&shader->variants[0]), shader->ir3_shader.nir->info.name);
+ if (shader->ir3_shader.type == MESA_SHADER_FRAGMENT)
+ fprintf(stdout, "SIMD0\n");
+ ir3_shader_disasm(&shader->variants[0], shader->binary, stdout);
+ }
+
/* compile another variant for the binning pass */
if (options->include_binning_pass &&
shader->ir3_shader.type == MESA_SHADER_VERTEX) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
shader->has_binning_pass = true;
+
+ if (shader_debug_enabled(MESA_SHADER_VERTEX)) {
+ fprintf(stdout, "Native code for unnamed binning shader %s:\n",
+ shader->ir3_shader.nir->info.name);
+ ir3_shader_disasm(&shader->variants[1], shader->binary, stdout);
+ }
}
if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_IR3)) {