/*
* Copyright 2017 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "si_shader.h"
-#include "si_shader_internal.h"
-
#include "ac_nir_to_llvm.h"
-
-#include "tgsi/tgsi_from_mesa.h"
-
#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_builder.h"
+#include "compiler/nir/nir_deref.h"
#include "compiler/nir_types.h"
+#include "si_pipe.h"
+#include "si_shader_internal.h"
+#include "tgsi/tgsi_from_mesa.h"
-
-static int
-type_size(const struct glsl_type *type)
+static const nir_deref_instr *tex_get_texture_deref(nir_tex_instr *instr)
{
- return glsl_count_attribute_slots(type, false);
+ for (unsigned i = 0; i < instr->num_srcs; i++) {
+ switch (instr->src[i].src_type) {
+ case nir_tex_src_texture_deref:
+ return nir_src_as_deref(instr->src[i].src);
+ default:
+ break;
+ }
+ }
+
+ return NULL;
}
-static void scan_instruction(struct tgsi_shader_info *info,
- nir_instr *instr)
+static void scan_io_usage(struct si_shader_info *info, nir_intrinsic_instr *intr,
+ bool is_input)
{
- if (instr->type == nir_instr_type_alu) {
- nir_alu_instr *alu = nir_instr_as_alu(instr);
-
- switch (alu->op) {
- case nir_op_fddx:
- case nir_op_fddy:
- case nir_op_fddx_fine:
- case nir_op_fddy_fine:
- case nir_op_fddx_coarse:
- case nir_op_fddy_coarse:
- info->uses_derivatives = true;
- break;
- default:
- break;
- }
- } else if (instr->type == nir_instr_type_tex) {
- nir_tex_instr *tex = nir_instr_as_tex(instr);
-
- if (!tex->texture) {
- info->samplers_declared |=
- u_bit_consecutive(tex->sampler_index, 1);
- }
-
- switch (tex->op) {
- case nir_texop_tex:
- case nir_texop_txb:
- case nir_texop_lod:
- info->uses_derivatives = true;
- break;
- default:
- break;
- }
- } else if (instr->type == nir_instr_type_intrinsic) {
- nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-
- switch (intr->intrinsic) {
- case nir_intrinsic_load_front_face:
- info->uses_frontface = 1;
- break;
- case nir_intrinsic_load_instance_id:
- info->uses_instanceid = 1;
- break;
- case nir_intrinsic_load_invocation_id:
- info->uses_invocationid = true;
- break;
- case nir_intrinsic_load_vertex_id:
- info->uses_vertexid = 1;
- break;
- case nir_intrinsic_load_vertex_id_zero_base:
- info->uses_vertexid_nobase = 1;
- break;
- case nir_intrinsic_load_base_vertex:
- info->uses_basevertex = 1;
- break;
- case nir_intrinsic_load_primitive_id:
- info->uses_primid = 1;
- break;
- case nir_intrinsic_load_sample_mask_in:
- info->reads_samplemask = true;
- break;
- case nir_intrinsic_load_tess_level_inner:
- case nir_intrinsic_load_tess_level_outer:
- info->reads_tess_factors = true;
- break;
- case nir_intrinsic_image_store:
- case nir_intrinsic_image_atomic_add:
- case nir_intrinsic_image_atomic_min:
- case nir_intrinsic_image_atomic_max:
- case nir_intrinsic_image_atomic_and:
- case nir_intrinsic_image_atomic_or:
- case nir_intrinsic_image_atomic_xor:
- case nir_intrinsic_image_atomic_exchange:
- case nir_intrinsic_image_atomic_comp_swap:
- case nir_intrinsic_store_ssbo:
- case nir_intrinsic_ssbo_atomic_add:
- case nir_intrinsic_ssbo_atomic_imin:
- case nir_intrinsic_ssbo_atomic_umin:
- case nir_intrinsic_ssbo_atomic_imax:
- case nir_intrinsic_ssbo_atomic_umax:
- case nir_intrinsic_ssbo_atomic_and:
- case nir_intrinsic_ssbo_atomic_or:
- case nir_intrinsic_ssbo_atomic_xor:
- case nir_intrinsic_ssbo_atomic_exchange:
- case nir_intrinsic_ssbo_atomic_comp_swap:
- info->writes_memory = true;
- break;
- default:
- break;
- }
- }
+ unsigned interp = INTERP_MODE_FLAT; /* load_input uses flat shading */
+
+ if (intr->intrinsic == nir_intrinsic_load_interpolated_input) {
+ nir_intrinsic_instr *baryc = nir_instr_as_intrinsic(intr->src[0].ssa->parent_instr);
+
+ if (baryc) {
+ if (nir_intrinsic_infos[baryc->intrinsic].index_map[NIR_INTRINSIC_INTERP_MODE] > 0)
+ interp = nir_intrinsic_interp_mode(baryc);
+ else
+ unreachable("unknown barycentric intrinsic");
+ } else {
+ unreachable("unknown barycentric expression");
+ }
+ }
+
+ unsigned mask, bit_size;
+ bool dual_slot, is_output_load;
+
+ if (nir_intrinsic_infos[intr->intrinsic].index_map[NIR_INTRINSIC_WRMASK] > 0) {
+ mask = nir_intrinsic_write_mask(intr); /* store */
+ bit_size = nir_src_bit_size(intr->src[0]);
+ dual_slot = bit_size == 64 && nir_src_num_components(intr->src[0]) >= 3;
+ is_output_load = false;
+ } else {
+ mask = nir_ssa_def_components_read(&intr->dest.ssa); /* load */
+ bit_size = intr->dest.ssa.bit_size;
+ dual_slot = bit_size == 64 && intr->dest.ssa.num_components >= 3;
+ is_output_load = !is_input;
+ }
+
+ /* Convert the 64-bit component mask to a 32-bit component mask. */
+ if (bit_size == 64) {
+ unsigned new_mask = 0;
+ for (unsigned i = 0; i < 4; i++) {
+ if (mask & (1 << i))
+ new_mask |= 0x3 << (2 * i);
+ }
+ mask = new_mask;
+ }
+
+ /* Convert the 16-bit component mask to a 32-bit component mask. */
+ if (bit_size == 16) {
+ unsigned new_mask = 0;
+ for (unsigned i = 0; i < 4; i++) {
+ if (mask & (1 << i))
+ new_mask |= 0x1 << (i / 2);
+ }
+ mask = new_mask;
+ }
+
+ mask <<= nir_intrinsic_component(intr);
+
+ nir_src offset = *nir_get_io_offset_src(intr);
+ bool indirect = !nir_src_is_const(offset);
+ if (!indirect)
+ assert(nir_src_as_uint(offset) == 0);
+
+ unsigned semantic = 0;
+ /* VS doesn't have semantics. */
+ if (info->stage != MESA_SHADER_VERTEX || !is_input)
+ semantic = nir_intrinsic_io_semantics(intr).location;
+
+ if (info->stage == MESA_SHADER_FRAGMENT && !is_input) {
+ /* Never use FRAG_RESULT_COLOR directly. */
+ if (semantic == FRAG_RESULT_COLOR) {
+ semantic = FRAG_RESULT_DATA0;
+ info->color0_writes_all_cbufs = true;
+ }
+ semantic += nir_intrinsic_io_semantics(intr).dual_source_blend_index;
+ }
+
+ unsigned driver_location = nir_intrinsic_base(intr);
+ unsigned num_slots = indirect ? nir_intrinsic_io_semantics(intr).num_slots : (1 + dual_slot);
+
+ if (is_input) {
+ assert(driver_location + num_slots <= ARRAY_SIZE(info->input_usage_mask));
+
+ for (unsigned i = 0; i < num_slots; i++) {
+ unsigned loc = driver_location + i;
+ unsigned slot_mask = (dual_slot && i % 2 ? mask >> 4 : mask) & 0xf;
+
+ info->input_semantic[loc] = semantic + i;
+ info->input_interpolate[loc] = interp;
+
+ if (slot_mask) {
+ info->input_usage_mask[loc] |= slot_mask;
+ info->num_inputs = MAX2(info->num_inputs, loc + 1);
+
+ if (semantic == VARYING_SLOT_PRIMITIVE_ID)
+ info->uses_primid = true;
+ }
+ }
+ } else {
+ /* Outputs. */
+ assert(driver_location + num_slots <= ARRAY_SIZE(info->output_usagemask));
+ assert(semantic + num_slots < ARRAY_SIZE(info->output_semantic_to_slot));
+
+ for (unsigned i = 0; i < num_slots; i++) {
+ unsigned loc = driver_location + i;
+ unsigned slot_mask = (dual_slot && i % 2 ? mask >> 4 : mask) & 0xf;
+
+ info->output_semantic[loc] = semantic + i;
+ info->output_semantic_to_slot[semantic + i] = loc;
+
+ if (is_output_load) {
+ /* Output loads have only a few things that we need to track. */
+ info->output_readmask[loc] |= slot_mask;
+
+ if (info->stage == MESA_SHADER_FRAGMENT &&
+ nir_intrinsic_io_semantics(intr).fb_fetch_output)
+ info->uses_fbfetch = true;
+ } else if (slot_mask) {
+ /* Output stores. */
+ if (info->stage == MESA_SHADER_GEOMETRY) {
+ unsigned gs_streams = (uint32_t)nir_intrinsic_io_semantics(intr).gs_streams <<
+ (nir_intrinsic_component(intr) * 2);
+ unsigned new_mask = slot_mask & ~info->output_usagemask[loc];
+
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned stream = (gs_streams >> (i * 2)) & 0x3;
+
+ if (new_mask & (1 << i)) {
+ info->output_streams[loc] |= stream << (i * 2);
+ info->num_stream_output_components[stream]++;
+ }
+ }
+ }
+
+ info->output_usagemask[loc] |= slot_mask;
+ info->num_outputs = MAX2(info->num_outputs, loc + 1);
+
+ if (info->stage == MESA_SHADER_FRAGMENT) {
+ switch (semantic) {
+ case FRAG_RESULT_DEPTH:
+ info->writes_z = true;
+ break;
+ case FRAG_RESULT_STENCIL:
+ info->writes_stencil = true;
+ break;
+ case FRAG_RESULT_SAMPLE_MASK:
+ info->writes_samplemask = true;
+ break;
+ default:
+ if (semantic >= FRAG_RESULT_DATA0 && semantic <= FRAG_RESULT_DATA7) {
+ unsigned index = semantic - FRAG_RESULT_DATA0;
+ info->colors_written |= 1 << (index + i);
+ }
+ break;
+ }
+ } else {
+ switch (semantic) {
+ case VARYING_SLOT_PRIMITIVE_ID:
+ info->writes_primid = true;
+ break;
+ case VARYING_SLOT_VIEWPORT:
+ info->writes_viewport_index = true;
+ break;
+ case VARYING_SLOT_LAYER:
+ info->writes_layer = true;
+ break;
+ case VARYING_SLOT_PSIZ:
+ info->writes_psize = true;
+ break;
+ case VARYING_SLOT_CLIP_VERTEX:
+ info->writes_clipvertex = true;
+ break;
+ case VARYING_SLOT_EDGE:
+ info->writes_edgeflag = true;
+ break;
+ case VARYING_SLOT_POS:
+ info->writes_position = true;
+ break;
+ }
+ }
+ }
+ }
+ }
}
-void si_nir_scan_tess_ctrl(const struct nir_shader *nir,
- const struct tgsi_shader_info *info,
- struct tgsi_tessctrl_info *out)
+static void scan_instruction(const struct nir_shader *nir, struct si_shader_info *info,
+ nir_instr *instr)
{
- memset(out, 0, sizeof(*out));
-
- if (nir->info.stage != MESA_SHADER_TESS_CTRL)
- return;
-
- /* Initial value = true. Here the pass will accumulate results from
- * multiple segments surrounded by barriers. If tess factors aren't
- * written at all, it's a shader bug and we don't care if this will be
- * true.
- */
- out->tessfactors_are_def_in_all_invocs = true;
-
- /* TODO: Implement scanning of tess factors, see tgsi backend. */
+ if (instr->type == nir_instr_type_tex) {
+ nir_tex_instr *tex = nir_instr_as_tex(instr);
+ const nir_deref_instr *deref = tex_get_texture_deref(tex);
+ nir_variable *var = deref ? nir_deref_instr_get_variable(deref) : NULL;
+
+ if (var) {
+ if (deref->mode != nir_var_uniform || var->data.bindless)
+ info->uses_bindless_samplers = true;
+ }
+ } else if (instr->type == nir_instr_type_intrinsic) {
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+
+ switch (intr->intrinsic) {
+ case nir_intrinsic_load_front_face:
+ info->uses_frontface = 1;
+ break;
+ case nir_intrinsic_load_instance_id:
+ info->uses_instanceid = 1;
+ break;
+ case nir_intrinsic_load_invocation_id:
+ info->uses_invocationid = true;
+ break;
+ case nir_intrinsic_load_num_work_groups:
+ info->uses_grid_size = true;
+ break;
+ case nir_intrinsic_load_local_invocation_index:
+ case nir_intrinsic_load_subgroup_id:
+ case nir_intrinsic_load_num_subgroups:
+ info->uses_subgroup_info = true;
+ break;
+ case nir_intrinsic_load_local_group_size:
+ /* The block size is translated to IMM with a fixed block size. */
+ if (info->base.cs.local_size[0] == 0)
+ info->uses_block_size = true;
+ break;
+ case nir_intrinsic_load_local_invocation_id:
+ case nir_intrinsic_load_work_group_id: {
+ unsigned mask = nir_ssa_def_components_read(&intr->dest.ssa);
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+
+ if (intr->intrinsic == nir_intrinsic_load_work_group_id)
+ info->uses_block_id[i] = true;
+ else
+ info->uses_thread_id[i] = true;
+ }
+ break;
+ }
+ case nir_intrinsic_load_draw_id:
+ info->uses_drawid = 1;
+ break;
+ case nir_intrinsic_load_primitive_id:
+ info->uses_primid = 1;
+ break;
+ case nir_intrinsic_load_sample_mask_in:
+ info->reads_samplemask = true;
+ break;
+ case nir_intrinsic_load_tess_level_inner:
+ case nir_intrinsic_load_tess_level_outer:
+ info->reads_tess_factors = true;
+ break;
+ case nir_intrinsic_bindless_image_load:
+ case nir_intrinsic_bindless_image_size:
+ case nir_intrinsic_bindless_image_samples:
+ info->uses_bindless_images = true;
+ break;
+ case nir_intrinsic_bindless_image_store:
+ info->uses_bindless_images = true;
+ info->num_memory_stores++;
+ break;
+ case nir_intrinsic_image_deref_store:
+ info->num_memory_stores++;
+ break;
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ info->uses_bindless_images = true;
+ info->num_memory_stores++;
+ break;
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_inc_wrap:
+ case nir_intrinsic_image_deref_atomic_dec_wrap:
+ info->num_memory_stores++;
+ break;
+ case nir_intrinsic_store_ssbo:
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ info->num_memory_stores++;
+ break;
+ case nir_intrinsic_load_color0:
+ case nir_intrinsic_load_color1: {
+ unsigned index = intr->intrinsic == nir_intrinsic_load_color1;
+ uint8_t mask = nir_ssa_def_components_read(&intr->dest.ssa);
+ info->colors_read |= mask << (index * 4);
+ break;
+ }
+ case nir_intrinsic_load_barycentric_pixel:
+ case nir_intrinsic_load_barycentric_centroid:
+ case nir_intrinsic_load_barycentric_sample:
+ case nir_intrinsic_load_barycentric_at_offset: /* uses center */
+ case nir_intrinsic_load_barycentric_at_sample: { /* uses center */
+ unsigned mode = nir_intrinsic_interp_mode(intr);
+
+ if (mode == INTERP_MODE_FLAT)
+ break;
+
+ if (mode == INTERP_MODE_NOPERSPECTIVE) {
+ if (intr->intrinsic == nir_intrinsic_load_barycentric_sample)
+ info->uses_linear_sample = true;
+ else if (intr->intrinsic == nir_intrinsic_load_barycentric_centroid)
+ info->uses_linear_centroid = true;
+ else
+ info->uses_linear_center = true;
+ } else {
+ if (intr->intrinsic == nir_intrinsic_load_barycentric_sample)
+ info->uses_persp_sample = true;
+ else if (intr->intrinsic == nir_intrinsic_load_barycentric_centroid)
+ info->uses_persp_centroid = true;
+ else
+ info->uses_persp_center = true;
+ }
+ if (intr->intrinsic == nir_intrinsic_load_barycentric_at_sample)
+ info->uses_interp_at_sample = true;
+ break;
+ }
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_per_vertex_input:
+ case nir_intrinsic_load_input_vertex:
+ case nir_intrinsic_load_interpolated_input:
+ scan_io_usage(info, intr, true);
+ break;
+ case nir_intrinsic_load_output:
+ case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_store_output:
+ case nir_intrinsic_store_per_vertex_output:
+ scan_io_usage(info, intr, false);
+ break;
+ case nir_intrinsic_load_deref:
+ case nir_intrinsic_store_deref:
+ case nir_intrinsic_interp_deref_at_centroid:
+ case nir_intrinsic_interp_deref_at_sample:
+ case nir_intrinsic_interp_deref_at_offset:
+ unreachable("these opcodes should have been lowered");
+ break;
+ default:
+ break;
+ }
+ }
}
-void si_nir_scan_shader(const struct nir_shader *nir,
- struct tgsi_shader_info *info)
+void si_nir_scan_shader(const struct nir_shader *nir, struct si_shader_info *info)
{
- nir_function *func;
- unsigned i;
-
- assert(nir->info.stage == MESA_SHADER_VERTEX ||
- nir->info.stage == MESA_SHADER_GEOMETRY ||
- nir->info.stage == MESA_SHADER_TESS_CTRL ||
- nir->info.stage == MESA_SHADER_TESS_EVAL ||
- nir->info.stage == MESA_SHADER_FRAGMENT);
-
- info->processor = pipe_shader_type_from_mesa(nir->info.stage);
- info->num_tokens = 2; /* indicate that the shader is non-empty */
- info->num_instructions = 2;
-
- if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
- info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
- nir->info.tess.tcs_vertices_out;
- }
-
- if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
- if (nir->info.tess.primitive_mode == GL_ISOLINES)
- info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = PIPE_PRIM_LINES;
- else
- info->properties[TGSI_PROPERTY_TES_PRIM_MODE] = nir->info.tess.primitive_mode;
-
- STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
- STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
- PIPE_TESS_SPACING_FRACTIONAL_ODD);
- STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
- PIPE_TESS_SPACING_FRACTIONAL_EVEN);
-
- info->properties[TGSI_PROPERTY_TES_SPACING] = (nir->info.tess.spacing + 1) % 3;
- info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW] = !nir->info.tess.ccw;
- info->properties[TGSI_PROPERTY_TES_POINT_MODE] = nir->info.tess.point_mode;
- }
-
- if (nir->info.stage == MESA_SHADER_GEOMETRY) {
- info->properties[TGSI_PROPERTY_GS_INPUT_PRIM] = nir->info.gs.input_primitive;
- info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM] = nir->info.gs.output_primitive;
- info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES] = nir->info.gs.vertices_out;
- info->properties[TGSI_PROPERTY_GS_INVOCATIONS] = nir->info.gs.invocations;
- }
-
- i = 0;
- uint64_t processed_inputs = 0;
- unsigned num_inputs = 0;
- nir_foreach_variable(variable, &nir->inputs) {
- unsigned semantic_name, semantic_index;
- unsigned attrib_count = glsl_count_attribute_slots(variable->type,
- nir->info.stage == MESA_SHADER_VERTEX);
-
- /* Vertex shader inputs don't have semantics. The state
- * tracker has already mapped them to attributes via
- * variable->data.driver_location.
- */
- if (nir->info.stage == MESA_SHADER_VERTEX) {
- if (glsl_type_is_dual_slot(variable->type))
- num_inputs += 2;
- else
- num_inputs++;
- continue;
- }
-
- assert(nir->info.stage != MESA_SHADER_FRAGMENT ||
- (attrib_count == 1 && "not implemented"));
-
- /* Fragment shader position is a system value. */
- if (nir->info.stage == MESA_SHADER_FRAGMENT &&
- variable->data.location == VARYING_SLOT_POS) {
- if (variable->data.pixel_center_integer)
- info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
- TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
-
- num_inputs++;
- continue;
- }
-
- i = variable->data.driver_location;
- if (processed_inputs & ((uint64_t)1 << i))
- continue;
-
- processed_inputs |= ((uint64_t)1 << i);
- num_inputs++;
-
- tgsi_get_gl_varying_semantic(variable->data.location, true,
- &semantic_name, &semantic_index);
-
- info->input_semantic_name[i] = semantic_name;
- info->input_semantic_index[i] = semantic_index;
-
- if (semantic_name == TGSI_SEMANTIC_PRIMID)
- info->uses_primid = true;
-
- if (variable->data.sample)
- info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_SAMPLE;
- else if (variable->data.centroid)
- info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTROID;
- else
- info->input_interpolate_loc[i] = TGSI_INTERPOLATE_LOC_CENTER;
-
- enum glsl_base_type base_type =
- glsl_get_base_type(glsl_without_array(variable->type));
-
- switch (variable->data.interpolation) {
- case INTERP_MODE_NONE:
- if (glsl_base_type_is_integer(base_type)) {
- info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
- break;
- }
-
- if (semantic_name == TGSI_SEMANTIC_COLOR) {
- info->input_interpolate[i] = TGSI_INTERPOLATE_COLOR;
- goto persp_locations;
- }
- /* fall-through */
- case INTERP_MODE_SMOOTH:
- assert(!glsl_base_type_is_integer(base_type));
-
- info->input_interpolate[i] = TGSI_INTERPOLATE_PERSPECTIVE;
-
- persp_locations:
- if (variable->data.sample)
- info->uses_persp_sample = true;
- else if (variable->data.centroid)
- info->uses_persp_centroid = true;
- else
- info->uses_persp_center = true;
- break;
-
- case INTERP_MODE_NOPERSPECTIVE:
- assert(!glsl_base_type_is_integer(base_type));
-
- info->input_interpolate[i] = TGSI_INTERPOLATE_LINEAR;
-
- if (variable->data.sample)
- info->uses_linear_sample = true;
- else if (variable->data.centroid)
- info->uses_linear_centroid = true;
- else
- info->uses_linear_center = true;
- break;
-
- case INTERP_MODE_FLAT:
- info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
- break;
- }
-
- /* TODO make this more precise */
- if (variable->data.location == VARYING_SLOT_COL0)
- info->colors_read |= 0x0f;
- else if (variable->data.location == VARYING_SLOT_COL1)
- info->colors_read |= 0xf0;
- }
-
- info->num_inputs = num_inputs;
-
-
- i = 0;
- uint64_t processed_outputs = 0;
- unsigned num_outputs = 0;
- nir_foreach_variable(variable, &nir->outputs) {
- unsigned semantic_name, semantic_index;
-
- if (nir->info.stage == MESA_SHADER_FRAGMENT) {
- tgsi_get_gl_frag_result_semantic(variable->data.location,
- &semantic_name, &semantic_index);
-
- /* Adjust for dual source blending */
- if (variable->data.index > 0) {
- semantic_index++;
- }
- } else {
- tgsi_get_gl_varying_semantic(variable->data.location, true,
- &semantic_name, &semantic_index);
- }
-
- i = variable->data.driver_location;
- if (processed_outputs & ((uint64_t)1 << i))
- continue;
-
- processed_outputs |= ((uint64_t)1 << i);
- num_outputs++;
-
- info->output_semantic_name[i] = semantic_name;
- info->output_semantic_index[i] = semantic_index;
- info->output_usagemask[i] = TGSI_WRITEMASK_XYZW;
-
- unsigned num_components = 4;
- unsigned vector_elements = glsl_get_vector_elements(glsl_without_array(variable->type));
- if (vector_elements)
- num_components = vector_elements;
-
- unsigned gs_out_streams;
- if (variable->data.stream & (1u << 31)) {
- gs_out_streams = variable->data.stream & ~(1u << 31);
- } else {
- assert(variable->data.stream < 4);
- gs_out_streams = 0;
- for (unsigned j = 0; j < num_components; ++j)
- gs_out_streams |= variable->data.stream << (2 * (variable->data.location_frac + j));
- }
-
- unsigned streamx = gs_out_streams & 3;
- unsigned streamy = (gs_out_streams >> 2) & 3;
- unsigned streamz = (gs_out_streams >> 4) & 3;
- unsigned streamw = (gs_out_streams >> 6) & 3;
-
- if (info->output_usagemask[i] & TGSI_WRITEMASK_X) {
- info->output_streams[i] |= streamx;
- info->num_stream_output_components[streamx]++;
- }
- if (info->output_usagemask[i] & TGSI_WRITEMASK_Y) {
- info->output_streams[i] |= streamy << 2;
- info->num_stream_output_components[streamy]++;
- }
- if (info->output_usagemask[i] & TGSI_WRITEMASK_Z) {
- info->output_streams[i] |= streamz << 4;
- info->num_stream_output_components[streamz]++;
- }
- if (info->output_usagemask[i] & TGSI_WRITEMASK_W) {
- info->output_streams[i] |= streamw << 6;
- info->num_stream_output_components[streamw]++;
- }
-
- switch (semantic_name) {
- case TGSI_SEMANTIC_PRIMID:
- info->writes_primid = true;
- break;
- case TGSI_SEMANTIC_VIEWPORT_INDEX:
- info->writes_viewport_index = true;
- break;
- case TGSI_SEMANTIC_LAYER:
- info->writes_layer = true;
- break;
- case TGSI_SEMANTIC_PSIZE:
- info->writes_psize = true;
- break;
- case TGSI_SEMANTIC_CLIPVERTEX:
- info->writes_clipvertex = true;
- break;
- case TGSI_SEMANTIC_COLOR:
- info->colors_written |= 1 << semantic_index;
- break;
- case TGSI_SEMANTIC_STENCIL:
- info->writes_stencil = true;
- break;
- case TGSI_SEMANTIC_SAMPLEMASK:
- info->writes_samplemask = true;
- break;
- case TGSI_SEMANTIC_EDGEFLAG:
- info->writes_edgeflag = true;
- break;
- case TGSI_SEMANTIC_POSITION:
- if (info->processor == PIPE_SHADER_FRAGMENT)
- info->writes_z = true;
- else
- info->writes_position = true;
- break;
- }
-
- if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
- switch (semantic_name) {
- case TGSI_SEMANTIC_PATCH:
- info->reads_perpatch_outputs = true;
- break;
- case TGSI_SEMANTIC_TESSINNER:
- case TGSI_SEMANTIC_TESSOUTER:
- info->reads_tessfactor_outputs = true;
- break;
- default:
- info->reads_pervertex_outputs = true;
- }
- }
- }
-
- info->num_outputs = num_outputs;
-
- nir_foreach_variable(variable, &nir->uniforms) {
- const struct glsl_type *type = variable->type;
- enum glsl_base_type base_type =
- glsl_get_base_type(glsl_without_array(type));
- unsigned aoa_size = MAX2(1, glsl_get_aoa_size(type));
-
- /* We rely on the fact that nir_lower_samplers_as_deref has
- * eliminated struct dereferences.
- */
- if (base_type == GLSL_TYPE_SAMPLER)
- info->samplers_declared |=
- u_bit_consecutive(variable->data.binding, aoa_size);
- else if (base_type == GLSL_TYPE_IMAGE)
- info->images_declared |=
- u_bit_consecutive(variable->data.binding, aoa_size);
- }
-
- info->num_written_clipdistance = nir->info.clip_distance_array_size;
- info->num_written_culldistance = nir->info.cull_distance_array_size;
- info->clipdist_writemask = u_bit_consecutive(0, info->num_written_clipdistance);
- info->culldist_writemask = u_bit_consecutive(0, info->num_written_culldistance);
-
- if (info->processor == PIPE_SHADER_FRAGMENT)
- info->uses_kill = nir->info.fs.uses_discard;
-
- /* TODO make this more accurate */
- info->const_buffers_declared = u_bit_consecutive(0, SI_NUM_CONST_BUFFERS);
- info->shader_buffers_declared = u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS);
-
- func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
- nir_foreach_block(block, func->impl) {
- nir_foreach_instr(instr, block)
- scan_instruction(info, instr);
- }
+ nir_function *func;
+
+ info->base = nir->info;
+ info->stage = nir->info.stage;
+
+ if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
+ if (info->base.tess.primitive_mode == GL_ISOLINES)
+ info->base.tess.primitive_mode = GL_LINES;
+ }
+
+ if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+ /* post_depth_coverage implies early_fragment_tests */
+ info->base.fs.early_fragment_tests |= info->base.fs.post_depth_coverage;
+
+ info->color_interpolate[0] = nir->info.fs.color0_interp;
+ info->color_interpolate[1] = nir->info.fs.color1_interp;
+ for (unsigned i = 0; i < 2; i++) {
+ if (info->color_interpolate[i] == INTERP_MODE_NONE)
+ info->color_interpolate[i] = INTERP_MODE_COLOR;
+ }
+
+ info->color_interpolate_loc[0] = nir->info.fs.color0_sample ? TGSI_INTERPOLATE_LOC_SAMPLE :
+ nir->info.fs.color0_centroid ? TGSI_INTERPOLATE_LOC_CENTROID :
+ TGSI_INTERPOLATE_LOC_CENTER;
+ info->color_interpolate_loc[1] = nir->info.fs.color1_sample ? TGSI_INTERPOLATE_LOC_SAMPLE :
+ nir->info.fs.color1_centroid ? TGSI_INTERPOLATE_LOC_CENTROID :
+ TGSI_INTERPOLATE_LOC_CENTER;
+ }
+
+ info->constbuf0_num_slots = nir->num_uniforms;
+
+ if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
+ info->tessfactors_are_def_in_all_invocs = ac_are_tessfactors_def_in_all_invocs(nir);
+ }
+
+ memset(info->output_semantic_to_slot, -1, sizeof(info->output_semantic_to_slot));
+
+ func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
+ nir_foreach_block (block, func->impl) {
+ nir_foreach_instr (instr, block)
+ scan_instruction(nir, info, instr);
+ }
+
+ /* Add color inputs to the list of inputs. */
+ if (nir->info.stage == MESA_SHADER_FRAGMENT) {
+ for (unsigned i = 0; i < 2; i++) {
+ if ((info->colors_read >> (i * 4)) & 0xf) {
+ info->input_semantic[info->num_inputs] = VARYING_SLOT_COL0 + i;
+ info->input_interpolate[info->num_inputs] = info->color_interpolate[i];
+ info->input_usage_mask[info->num_inputs] = info->colors_read >> (i * 4);
+ info->num_inputs++;
+ }
+ }
+ }
+
+ /* Trim output read masks based on write masks. */
+ for (unsigned i = 0; i < info->num_outputs; i++)
+ info->output_readmask[i] &= info->output_usagemask[i];
}
-/**
- * Perform "lowering" operations on the NIR that are run once when the shader
- * selector is created.
- */
-void
-si_lower_nir(struct si_shader_selector* sel)
+static void si_nir_opts(struct nir_shader *nir, bool first)
{
- /* Adjust the driver location of inputs and outputs. The state tracker
- * interprets them as slots, while the ac/nir backend interprets them
- * as individual components.
- */
- nir_foreach_variable(variable, &sel->nir->inputs)
- variable->data.driver_location *= 4;
-
- nir_foreach_variable(variable, &sel->nir->outputs) {
- variable->data.driver_location *= 4;
-
- if (sel->nir->info.stage == MESA_SHADER_FRAGMENT) {
- if (variable->data.location == FRAG_RESULT_DEPTH)
- variable->data.driver_location += 2;
- else if (variable->data.location == FRAG_RESULT_STENCIL)
- variable->data.driver_location += 1;
- }
- }
-
- /* Perform lowerings (and optimizations) of code.
- *
- * Performance considerations aside, we must:
- * - lower certain ALU operations
- * - ensure constant offsets for texture instructions are folded
- * and copy-propagated
- */
- NIR_PASS_V(sel->nir, nir_lower_io, nir_var_uniform, type_size,
- (nir_lower_io_options)0);
- NIR_PASS_V(sel->nir, nir_lower_uniforms_to_ubo);
-
- NIR_PASS_V(sel->nir, nir_lower_returns);
- NIR_PASS_V(sel->nir, nir_lower_vars_to_ssa);
- NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar);
- NIR_PASS_V(sel->nir, nir_lower_phis_to_scalar);
-
- static const struct nir_lower_tex_options lower_tex_options = {
- .lower_txp = ~0u,
- };
- NIR_PASS_V(sel->nir, nir_lower_tex, &lower_tex_options);
-
- const nir_lower_subgroups_options subgroups_options = {
- .subgroup_size = 64,
- .ballot_bit_size = 32,
- .lower_to_scalar = true,
- .lower_subgroup_masks = true,
- .lower_vote_trivial = false,
- };
- NIR_PASS_V(sel->nir, nir_lower_subgroups, &subgroups_options);
-
- bool progress;
- do {
- progress = false;
-
- /* (Constant) copy propagation is needed for txf with offsets. */
- NIR_PASS(progress, sel->nir, nir_copy_prop);
- NIR_PASS(progress, sel->nir, nir_opt_remove_phis);
- NIR_PASS(progress, sel->nir, nir_opt_dce);
- if (nir_opt_trivial_continues(sel->nir)) {
- progress = true;
- NIR_PASS(progress, sel->nir, nir_copy_prop);
- NIR_PASS(progress, sel->nir, nir_opt_dce);
- }
- NIR_PASS(progress, sel->nir, nir_opt_if);
- NIR_PASS(progress, sel->nir, nir_opt_dead_cf);
- NIR_PASS(progress, sel->nir, nir_opt_cse);
- NIR_PASS(progress, sel->nir, nir_opt_peephole_select, 8);
-
- /* Needed for algebraic lowering */
- NIR_PASS(progress, sel->nir, nir_opt_algebraic);
- NIR_PASS(progress, sel->nir, nir_opt_constant_folding);
-
- NIR_PASS(progress, sel->nir, nir_opt_undef);
- NIR_PASS(progress, sel->nir, nir_opt_conditional_discard);
- if (sel->nir->options->max_unroll_iterations) {
- NIR_PASS(progress, sel->nir, nir_opt_loop_unroll, 0);
- }
- } while (progress);
+ bool progress;
+
+ NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+ NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
+ NIR_PASS_V(nir, nir_lower_phis_to_scalar);
+
+ do {
+ progress = false;
+ bool lower_alu_to_scalar = false;
+ bool lower_phis_to_scalar = false;
+
+ if (first) {
+ bool opt_find_array_copies = false;
+
+ NIR_PASS(progress, nir, nir_split_array_vars, nir_var_function_temp);
+ NIR_PASS(lower_alu_to_scalar, nir, nir_shrink_vec_array_vars, nir_var_function_temp);
+ NIR_PASS(opt_find_array_copies, nir, nir_opt_find_array_copies);
+ NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
+
+ /* Call nir_lower_var_copies() to remove any copies introduced
+ * by nir_opt_find_array_copies().
+ */
+ if (opt_find_array_copies)
+ NIR_PASS(progress, nir, nir_lower_var_copies);
+ progress |= opt_find_array_copies;
+ } else {
+ NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
+ }
+
+ NIR_PASS(progress, nir, nir_opt_dead_write_vars);
+
+ NIR_PASS(lower_alu_to_scalar, nir, nir_opt_trivial_continues);
+ /* (Constant) copy propagation is needed for txf with offsets. */
+ NIR_PASS(progress, nir, nir_copy_prop);
+ NIR_PASS(progress, nir, nir_opt_remove_phis);
+ NIR_PASS(progress, nir, nir_opt_dce);
+ NIR_PASS(lower_phis_to_scalar, nir, nir_opt_if, true);
+ NIR_PASS(progress, nir, nir_opt_dead_cf);
+
+ if (lower_alu_to_scalar)
+ NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
+ if (lower_phis_to_scalar)
+ NIR_PASS_V(nir, nir_lower_phis_to_scalar);
+ progress |= lower_alu_to_scalar | lower_phis_to_scalar;
+
+ NIR_PASS(progress, nir, nir_opt_cse);
+ NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
+
+ /* Needed for algebraic lowering */
+ NIR_PASS(progress, nir, nir_opt_algebraic);
+ NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+ if (!nir->info.flrp_lowered) {
+ unsigned lower_flrp = (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
+ assert(lower_flrp);
+ bool lower_flrp_progress = false;
+
+ NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp, lower_flrp, false /* always_precise */);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir, nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ nir->info.flrp_lowered = true;
+ }
+
+ NIR_PASS(progress, nir, nir_opt_undef);
+ NIR_PASS(progress, nir, nir_opt_conditional_discard);
+ if (nir->options->max_unroll_iterations) {
+ NIR_PASS(progress, nir, nir_opt_loop_unroll, 0);
+ }
+ } while (progress);
}
-static void declare_nir_input_vs(struct si_shader_context *ctx,
- struct nir_variable *variable,
- LLVMValueRef out[4])
+static int type_size_vec4(const struct glsl_type *type, bool bindless)
{
- si_llvm_load_input_vs(ctx, variable->data.driver_location / 4, out);
+ return glsl_count_attribute_slots(type, false);
}
-static void declare_nir_input_fs(struct si_shader_context *ctx,
- struct nir_variable *variable,
- unsigned input_index,
- LLVMValueRef out[4])
+static void si_nir_lower_color(nir_shader *nir)
{
- unsigned slot = variable->data.location;
- if (slot == VARYING_SLOT_POS) {
- out[0] = LLVMGetParam(ctx->main_fn, SI_PARAM_POS_X_FLOAT);
- out[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Y_FLOAT);
- out[2] = LLVMGetParam(ctx->main_fn, SI_PARAM_POS_Z_FLOAT);
- out[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
- LLVMGetParam(ctx->main_fn, SI_PARAM_POS_W_FLOAT));
- return;
- }
-
- si_llvm_load_input_fs(ctx, input_index, out);
+ nir_function_impl *entrypoint = nir_shader_get_entrypoint(nir);
+
+ nir_builder b;
+ nir_builder_init(&b, entrypoint);
+
+ nir_foreach_block (block, entrypoint) {
+ nir_foreach_instr_safe (instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+ if (intrin->intrinsic != nir_intrinsic_load_deref)
+ continue;
+
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ if (deref->mode != nir_var_shader_in)
+ continue;
+
+ b.cursor = nir_before_instr(instr);
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+ nir_ssa_def *def;
+
+ if (var->data.location == VARYING_SLOT_COL0) {
+ def = nir_load_color0(&b);
+ nir->info.fs.color0_interp = var->data.interpolation;
+ nir->info.fs.color0_sample = var->data.sample;
+ nir->info.fs.color0_centroid = var->data.centroid;
+ } else if (var->data.location == VARYING_SLOT_COL1) {
+ def = nir_load_color1(&b);
+ nir->info.fs.color1_interp = var->data.interpolation;
+ nir->info.fs.color1_sample = var->data.sample;
+ nir->info.fs.color1_centroid = var->data.centroid;
+ } else {
+ continue;
+ }
+
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(def));
+ nir_instr_remove(instr);
+ }
+ }
}
-LLVMValueRef si_nir_load_input_gs(struct ac_shader_abi *abi,
- unsigned location,
- unsigned driver_location,
- unsigned component,
- unsigned num_components,
- unsigned vertex_index,
- unsigned const_index,
- LLVMTypeRef type)
+static void si_lower_io(struct nir_shader *nir)
{
- struct si_shader_context *ctx = si_shader_context_from_abi(abi);
-
- LLVMValueRef value[4];
- for (unsigned i = component; i < num_components + component; i++) {
- value[i] = si_llvm_load_input_gs(&ctx->abi, driver_location / 4,
- vertex_index, type, i);
- }
-
- return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
+ /* HW supports indirect indexing for: | Enabled in driver
+ * -------------------------------------------------------
+ * VS inputs | No
+ * TCS inputs | Yes
+ * TES inputs | Yes
+ * GS inputs | No
+ * -------------------------------------------------------
+ * VS outputs before TCS | No
+ * VS outputs before GS | No
+ * TCS outputs | Yes
+ * TES outputs before GS | No
+ */
+ bool has_indirect_inputs = nir->info.stage == MESA_SHADER_TESS_CTRL ||
+ nir->info.stage == MESA_SHADER_TESS_EVAL;
+ bool has_indirect_outputs = nir->info.stage == MESA_SHADER_TESS_CTRL;
+
+ if (!has_indirect_inputs || !has_indirect_outputs) {
+ NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir),
+ !has_indirect_outputs, !has_indirect_inputs);
+
+ /* Since we're doing nir_lower_io_to_temporaries late, we need
+ * to lower all the copy_deref's introduced by
+ * lower_io_to_temporaries before calling nir_lower_io.
+ */
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_lower_var_copies);
+ NIR_PASS_V(nir, nir_lower_global_vars_to_local);
+ }
+
+ if (nir->info.stage == MESA_SHADER_FRAGMENT)
+ si_nir_lower_color(nir);
+
+ NIR_PASS_V(nir, nir_lower_io, nir_var_shader_out | nir_var_shader_in,
+ type_size_vec4, 0);
+ nir->info.io_lowered = true;
+
+ /* This pass needs actual constants */
+ NIR_PASS_V(nir, nir_opt_constant_folding);
+ NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
+ NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_out);
+
+ /* Remove dead derefs, so that nir_validate doesn't fail. */
+ NIR_PASS_V(nir, nir_opt_dce);
+
+ /* Remove input and output nir_variables, because we don't need them
+ * anymore. Also remove uniforms, because those should have been lowered
+ * to UBOs already.
+ */
+ unsigned modes = nir_var_shader_in | nir_var_shader_out | nir_var_uniform;
+ nir_foreach_variable_with_modes_safe(var, nir, modes) {
+ if (var->data.mode == nir_var_uniform &&
+ (glsl_type_get_image_count(var->type) ||
+ glsl_type_get_sampler_count(var->type)))
+ continue;
+
+ exec_node_remove(&var->node);
+ }
}
-static LLVMValueRef
-si_nir_load_sampler_desc(struct ac_shader_abi *abi,
- unsigned descriptor_set, unsigned base_index,
- unsigned constant_index, LLVMValueRef dynamic_index,
- enum ac_descriptor_type desc_type, bool image,
- bool write)
+/**
+ * Perform "lowering" operations on the NIR that are run once when the shader
+ * selector is created.
+ */
+static void si_lower_nir(struct si_screen *sscreen, struct nir_shader *nir)
{
- struct si_shader_context *ctx = si_shader_context_from_abi(abi);
- LLVMBuilderRef builder = ctx->ac.builder;
- LLVMValueRef list = LLVMGetParam(ctx->main_fn, ctx->param_samplers_and_images);
- LLVMValueRef index = dynamic_index;
-
- assert(!descriptor_set);
-
- if (!index)
- index = ctx->ac.i32_0;
-
- index = LLVMBuildAdd(builder, index,
- LLVMConstInt(ctx->ac.i32, base_index + constant_index, false),
- "");
-
- if (image) {
- assert(desc_type == AC_DESC_IMAGE || desc_type == AC_DESC_BUFFER);
- assert(base_index + constant_index < ctx->num_images);
-
- if (dynamic_index)
- index = si_llvm_bound_index(ctx, index, ctx->num_images);
-
- index = LLVMBuildSub(ctx->gallivm.builder,
- LLVMConstInt(ctx->i32, SI_NUM_IMAGES - 1, 0),
- index, "");
-
- /* TODO: be smarter about when we use dcc_off */
- return si_load_image_desc(ctx, list, index, desc_type, write);
- }
-
- assert(base_index + constant_index < ctx->num_samplers);
-
- if (dynamic_index)
- index = si_llvm_bound_index(ctx, index, ctx->num_samplers);
-
- index = LLVMBuildAdd(ctx->gallivm.builder, index,
- LLVMConstInt(ctx->i32, SI_NUM_IMAGES / 2, 0), "");
-
- return si_load_sampler_desc(ctx, list, index, desc_type);
+ /* Perform lowerings (and optimizations) of code.
+ *
+ * Performance considerations aside, we must:
+ * - lower certain ALU operations
+ * - ensure constant offsets for texture instructions are folded
+ * and copy-propagated
+ */
+
+ static const struct nir_lower_tex_options lower_tex_options = {
+ .lower_txp = ~0u,
+ };
+ NIR_PASS_V(nir, nir_lower_tex, &lower_tex_options);
+
+ const nir_lower_subgroups_options subgroups_options = {
+ .subgroup_size = 64,
+ .ballot_bit_size = 64,
+ .lower_to_scalar = true,
+ .lower_subgroup_masks = true,
+ .lower_vote_trivial = false,
+ .lower_vote_eq_to_ballot = true,
+ };
+ NIR_PASS_V(nir, nir_lower_subgroups, &subgroups_options);
+
+ /* Lower load constants to scalar and then clean up the mess */
+ NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
+ NIR_PASS_V(nir, nir_lower_var_copies);
+ NIR_PASS_V(nir, nir_lower_pack);
+ NIR_PASS_V(nir, nir_opt_access);
+ si_nir_opts(nir, true);
+
+ /* Lower large variables that are always constant with load_constant
+ * intrinsics, which get turned into PC-relative loads from a data
+ * section next to the shader.
+ *
+ * st/mesa calls finalize_nir twice, but we can't call this pass twice.
+ */
+ bool changed = false;
+ if (!nir->constant_data) {
+ /* The pass crashes if there are dead temps of lowered IO interface types. */
+ NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
+ NIR_PASS(changed, nir, nir_opt_large_constants, glsl_get_natural_size_align_bytes, 16);
+ }
+
+ changed |= ac_lower_indirect_derefs(nir, sscreen->info.chip_class);
+ if (changed)
+ si_nir_opts(nir, false);
+
+ NIR_PASS_V(nir, nir_lower_bool_to_int32);
+ NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
+
+ if (sscreen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL))
+ NIR_PASS_V(nir, nir_lower_discard_to_demote);
}
-bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir)
+void si_finalize_nir(struct pipe_screen *screen, void *nirptr, bool optimize)
{
- struct tgsi_shader_info *info = &ctx->shader->selector->info;
-
- if (nir->info.stage == MESA_SHADER_VERTEX ||
- nir->info.stage == MESA_SHADER_FRAGMENT) {
- uint64_t processed_inputs = 0;
- nir_foreach_variable(variable, &nir->inputs) {
- unsigned attrib_count = glsl_count_attribute_slots(variable->type,
- nir->info.stage == MESA_SHADER_VERTEX);
- unsigned input_idx = variable->data.driver_location;
-
- assert(attrib_count == 1);
-
- LLVMValueRef data[4];
- unsigned loc = variable->data.location;
-
- /* Packed components share the same location so skip
- * them if we have already processed the location.
- */
- if (processed_inputs & ((uint64_t)1 << loc))
- continue;
-
- if (nir->info.stage == MESA_SHADER_VERTEX)
- declare_nir_input_vs(ctx, variable, data);
- else if (nir->info.stage == MESA_SHADER_FRAGMENT)
- declare_nir_input_fs(ctx, variable, input_idx / 4, data);
-
- for (unsigned chan = 0; chan < 4; chan++) {
- ctx->inputs[input_idx + chan] =
- LLVMBuildBitCast(ctx->ac.builder, data[chan], ctx->ac.i32, "");
- }
- processed_inputs |= ((uint64_t)1 << loc);
- }
- }
-
- ctx->abi.inputs = &ctx->inputs[0];
- ctx->abi.load_sampler_desc = si_nir_load_sampler_desc;
- ctx->abi.clamp_shadow_reference = true;
-
- ctx->num_samplers = util_last_bit(info->samplers_declared);
- ctx->num_images = util_last_bit(info->images_declared);
-
- ac_nir_translate(&ctx->ac, &ctx->abi, nir, NULL);
-
- return true;
+ struct si_screen *sscreen = (struct si_screen *)screen;
+ struct nir_shader *nir = (struct nir_shader *)nirptr;
+
+ nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
+ si_lower_io(nir);
+ si_lower_nir(sscreen, nir);
}