nir: allow specifying a set of opcodes in lower_alu_to_scalar
[mesa.git] / src / gallium / drivers / radeonsi / si_shader_nir.c
index 5d6280b80f7e1b4a530c6cd1e79fcf2ac9d705e7..10e72fd9f4d244f14928650aedc0bd1bc1fc50d0 100644 (file)
@@ -51,7 +51,45 @@ static nir_variable* intrinsic_get_var(nir_intrinsic_instr *instr)
        return nir_deref_instr_get_variable(nir_src_as_deref(instr->src[0]));
 }
 
-static void scan_instruction(struct tgsi_shader_info *info,
+static void gather_intrinsic_load_deref_info(const nir_shader *nir,
+                                            const nir_intrinsic_instr *instr,
+                                            nir_variable *var,
+                                            struct tgsi_shader_info *info)
+{
+       assert(var && var->data.mode == nir_var_shader_in);
+
+       switch (nir->info.stage) {
+       case MESA_SHADER_VERTEX: {
+               unsigned i = var->data.driver_location;
+               unsigned attrib_count = glsl_count_attribute_slots(var->type, false);
+
+               for (unsigned j = 0; j < attrib_count; j++, i++) {
+                       if (glsl_type_is_64bit(glsl_without_array(var->type))) {
+                               /* TODO: set usage mask more accurately for doubles */
+                               info->input_usage_mask[i] = TGSI_WRITEMASK_XYZW;
+                       } else {
+                               uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
+                               info->input_usage_mask[i] |= mask << var->data.location_frac;
+                       }
+               }
+               break;
+       }
+       default: {
+               unsigned semantic_name, semantic_index;
+               tgsi_get_gl_varying_semantic(var->data.location, true,
+                                            &semantic_name, &semantic_index);
+
+               if (semantic_name == TGSI_SEMANTIC_COLOR) {
+                       uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
+                       info->colors_read |= mask << (semantic_index * 4);
+               }
+               break;
+       }
+       }
+}
+
+static void scan_instruction(const struct nir_shader *nir,
+                            struct tgsi_shader_info *info,
                             nir_instr *instr)
 {
        if (instr->type == nir_instr_type_alu) {
@@ -133,6 +171,9 @@ static void scan_instruction(struct tgsi_shader_info *info,
                case nir_intrinsic_load_base_vertex:
                        info->uses_basevertex = 1;
                        break;
+               case nir_intrinsic_load_draw_id:
+                       info->uses_drawid = 1;
+                       break;
                case nir_intrinsic_load_primitive_id:
                        info->uses_primid = 1;
                        break;
@@ -143,16 +184,48 @@ static void scan_instruction(struct tgsi_shader_info *info,
                case nir_intrinsic_load_tess_level_outer:
                        info->reads_tess_factors = true;
                        break;
-               case nir_intrinsic_image_deref_load:
-               case nir_intrinsic_image_deref_size:
-               case nir_intrinsic_image_deref_samples: {
-                       nir_variable *var = intrinsic_get_var(intr);
-                       if (var->data.bindless)
-                               info->uses_bindless_images = true;
+               case nir_intrinsic_bindless_image_load:
+                       info->uses_bindless_images = true;
 
+                       if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
+                               info->uses_bindless_buffer_load = true;
+                       else
+                               info->uses_bindless_image_load = true;
+                       break;
+               case nir_intrinsic_bindless_image_size:
+               case nir_intrinsic_bindless_image_samples:
+                       info->uses_bindless_images = true;
+                       break;
+               case nir_intrinsic_bindless_image_store:
+                       info->uses_bindless_images = true;
+
+                       if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
+                               info->uses_bindless_buffer_store = true;
+                       else
+                               info->uses_bindless_image_store = true;
+
+                       info->writes_memory = true;
                        break;
-               }
                case nir_intrinsic_image_deref_store:
+                       info->writes_memory = true;
+                       break;
+               case nir_intrinsic_bindless_image_atomic_add:
+               case nir_intrinsic_bindless_image_atomic_min:
+               case nir_intrinsic_bindless_image_atomic_max:
+               case nir_intrinsic_bindless_image_atomic_and:
+               case nir_intrinsic_bindless_image_atomic_or:
+               case nir_intrinsic_bindless_image_atomic_xor:
+               case nir_intrinsic_bindless_image_atomic_exchange:
+               case nir_intrinsic_bindless_image_atomic_comp_swap:
+                       info->uses_bindless_images = true;
+
+                       if (nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_BUF)
+                               info->uses_bindless_buffer_atomic = true;
+                       else
+                               info->uses_bindless_image_atomic = true;
+
+                       info->writes_memory = true;
+                       break;
                case nir_intrinsic_image_deref_atomic_add:
                case nir_intrinsic_image_deref_atomic_min:
                case nir_intrinsic_image_deref_atomic_max:
@@ -160,13 +233,9 @@ static void scan_instruction(struct tgsi_shader_info *info,
                case nir_intrinsic_image_deref_atomic_or:
                case nir_intrinsic_image_deref_atomic_xor:
                case nir_intrinsic_image_deref_atomic_exchange:
-               case nir_intrinsic_image_deref_atomic_comp_swap: {
-                       nir_variable *var = intrinsic_get_var(intr);
-                       if (var->data.bindless)
-                               info->uses_bindless_images = true;
-
-                       /* fall-through */
-               }
+               case nir_intrinsic_image_deref_atomic_comp_swap:
+                       info->writes_memory = true;
+                       break;
                case nir_intrinsic_store_ssbo:
                case nir_intrinsic_ssbo_atomic_add:
                case nir_intrinsic_ssbo_atomic_imin:
@@ -187,6 +256,8 @@ static void scan_instruction(struct tgsi_shader_info *info,
                                glsl_get_base_type(glsl_without_array(var->type));
 
                        if (mode == nir_var_shader_in) {
+                               gather_intrinsic_load_deref_info(nir, intr, var, info);
+
                                switch (var->data.interpolation) {
                                case INTERP_MODE_NONE:
                                        if (glsl_base_type_is_integer(base_type))
@@ -250,7 +321,6 @@ static void scan_instruction(struct tgsi_shader_info *info,
 }
 
 void si_nir_scan_tess_ctrl(const struct nir_shader *nir,
-                          const struct tgsi_shader_info *info,
                           struct tgsi_tessctrl_info *out)
 {
        memset(out, 0, sizeof(*out));
@@ -258,14 +328,8 @@ void si_nir_scan_tess_ctrl(const struct nir_shader *nir,
        if (nir->info.stage != MESA_SHADER_TESS_CTRL)
                return;
 
-       /* Initial value = true. Here the pass will accumulate results from
-        * multiple segments surrounded by barriers. If tess factors aren't
-        * written at all, it's a shader bug and we don't care if this will be
-        * true.
-        */
-       out->tessfactors_are_def_in_all_invocs = true;
-
-       /* TODO: Implement scanning of tess factors, see tgsi backend. */
+       out->tessfactors_are_def_in_all_invocs =
+               ac_are_tessfactors_def_in_all_invocs(nir);
 }
 
 void si_nir_scan_shader(const struct nir_shader *nir,
@@ -281,6 +345,11 @@ void si_nir_scan_shader(const struct nir_shader *nir,
        info->properties[TGSI_PROPERTY_NEXT_SHADER] =
                pipe_shader_type_from_mesa(nir->info.next_stage);
 
+       if (nir->info.stage == MESA_SHADER_VERTEX) {
+               info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] =
+                       nir->info.vs.window_space_position;
+       }
+
        if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
                info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT] =
                        nir->info.tess.tcs_vertices_out;
@@ -340,7 +409,7 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                }
        }
 
-       if (nir->info.stage == MESA_SHADER_COMPUTE) {
+       if (gl_shader_stage_is_compute(nir->info.stage)) {
                info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.cs.local_size[0];
                info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.cs.local_size[1];
                info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH] = nir->info.cs.local_size[2];
@@ -368,23 +437,17 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                 * variable->data.driver_location.
                 */
                if (nir->info.stage == MESA_SHADER_VERTEX) {
-                       /* TODO: gather the actual input useage and remove this. */
-                       info->input_usage_mask[i] = TGSI_WRITEMASK_XYZW;
-
-                       if (glsl_type_is_dual_slot(variable->type)) {
-                               num_inputs += 2;
-
-                               /* TODO: gather the actual input useage and remove this. */
-                               info->input_usage_mask[i+1] = TGSI_WRITEMASK_XYZW;
-                       } else
+                       if (glsl_type_is_dual_slot(glsl_without_array(variable->type)))
                                num_inputs++;
+
+                       num_inputs++;
                        continue;
                }
 
                /* Fragment shader position is a system value. */
                if (nir->info.stage == MESA_SHADER_FRAGMENT &&
                    variable->data.location == VARYING_SLOT_POS) {
-                       if (variable->data.pixel_center_integer)
+                       if (nir->info.fs.pixel_center_integer)
                                info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
                                        TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
 
@@ -448,12 +511,6 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                                info->input_interpolate[i] = TGSI_INTERPOLATE_CONSTANT;
                                break;
                        }
-
-                       /* TODO make this more precise */
-                       if (variable->data.location == VARYING_SLOT_COL0)
-                               info->colors_read |= 0x0f;
-                       else if (variable->data.location == VARYING_SLOT_COL1)
-                               info->colors_read |= 0xf0;
                }
        }
 
@@ -637,16 +694,24 @@ void si_nir_scan_shader(const struct nir_shader *nir,
 
        struct set *ubo_set = _mesa_set_create(NULL, _mesa_hash_pointer,
                                               _mesa_key_pointer_equal);
+       struct set *ssbo_set = _mesa_set_create(NULL, _mesa_hash_pointer,
+                                               _mesa_key_pointer_equal);
 
        /* Intialise const_file_max[0] */
        info->const_file_max[0] = -1;
 
+       /* The first 8 are reserved for atomic counters using ssbo */
+       unsigned ssbo_idx = 8;
+
        unsigned ubo_idx = 1;
        nir_foreach_variable(variable, &nir->uniforms) {
                const struct glsl_type *type = variable->type;
                enum glsl_base_type base_type =
                        glsl_get_base_type(glsl_without_array(type));
                unsigned aoa_size = MAX2(1, glsl_get_aoa_size(type));
+               unsigned loc = variable->data.location;
+               int slot_count = glsl_count_attribute_slots(type, false);
+               int max_slot = MAX2(info->const_file_max[0], (int) loc) + slot_count;
 
                /* Gather buffers declared bitmasks. Note: radeonsi doesn't
                 * really use the mask (other than ubo_idx == 1 for regular
@@ -654,12 +719,17 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                 * so we don't need to worry about the ordering.
                 */
                if (variable->interface_type != NULL) {
-                       if (variable->data.mode == nir_var_uniform) {
+                       if (variable->data.mode == nir_var_uniform ||
+                           variable->data.mode == nir_var_mem_ubo ||
+                           variable->data.mode == nir_var_mem_ssbo) {
+
+                               struct set *buf_set = variable->data.mode == nir_var_mem_ssbo ?
+                                       ssbo_set : ubo_set;
 
                                unsigned block_count;
                                if (base_type != GLSL_TYPE_INTERFACE) {
                                        struct set_entry *entry =
-                                               _mesa_set_search(ubo_set, variable->interface_type);
+                                               _mesa_set_search(buf_set, variable->interface_type);
 
                                        /* Check if we have already processed
                                         * a member from this ubo.
@@ -672,16 +742,18 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                                        block_count = aoa_size;
                                }
 
-                               info->const_buffers_declared |= u_bit_consecutive(ubo_idx, block_count);
-                               ubo_idx += block_count;
+                               if (variable->data.mode == nir_var_uniform ||
+                                   variable->data.mode == nir_var_mem_ubo) {
+                                       info->const_buffers_declared |= u_bit_consecutive(ubo_idx, block_count);
+                                       ubo_idx += block_count;
+                               } else {
+                                       assert(variable->data.mode == nir_var_mem_ssbo);
 
-                               _mesa_set_add(ubo_set, variable->interface_type);
-                       }
+                                       info->shader_buffers_declared |= u_bit_consecutive(ssbo_idx, block_count);
+                                       ssbo_idx += block_count;
+                               }
 
-                       if (variable->data.mode == nir_var_shader_storage) {
-                               /* TODO: make this more accurate */
-                               info->shader_buffers_declared =
-                                       u_bit_consecutive(0, SI_NUM_SHADER_BUFFERS);
+                               _mesa_set_add(buf_set, variable->interface_type);
                        }
 
                        continue;
@@ -693,8 +765,7 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                if (base_type == GLSL_TYPE_SAMPLER) {
                        if (variable->data.bindless) {
                                info->const_buffers_declared |= 1;
-                               info->const_file_max[0] +=
-                                       glsl_count_attribute_slots(type, false);
+                               info->const_file_max[0] = max_slot;
                        } else {
                                info->samplers_declared |=
                                        u_bit_consecutive(variable->data.binding, aoa_size);
@@ -702,8 +773,7 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                } else if (base_type == GLSL_TYPE_IMAGE) {
                        if (variable->data.bindless) {
                                info->const_buffers_declared |= 1;
-                               info->const_file_max[0] +=
-                                       glsl_count_attribute_slots(type, false);
+                               info->const_file_max[0] = max_slot;
                        } else {
                                info->images_declared |=
                                        u_bit_consecutive(variable->data.binding, aoa_size);
@@ -718,13 +788,13 @@ void si_nir_scan_shader(const struct nir_shader *nir,
                                        u_bit_consecutive(0, SI_NUM_CONST_BUFFERS);
                        } else {
                                info->const_buffers_declared |= 1;
-                               info->const_file_max[0] +=
-                                       glsl_count_attribute_slots(type, false);
+                               info->const_file_max[0] = max_slot;
                        }
                }
        }
 
        _mesa_set_destroy(ubo_set, NULL);
+       _mesa_set_destroy(ssbo_set, NULL);
 
        info->num_written_clipdistance = nir->info.clip_distance_array_size;
        info->num_written_culldistance = nir->info.cull_distance_array_size;
@@ -737,10 +807,75 @@ void si_nir_scan_shader(const struct nir_shader *nir,
        func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
        nir_foreach_block(block, func->impl) {
                nir_foreach_instr(instr, block)
-                       scan_instruction(info, instr);
+                       scan_instruction(nir, info, instr);
        }
 }
 
+void
+si_nir_opts(struct nir_shader *nir)
+{
+       bool progress;
+        unsigned lower_flrp =
+                (nir->options->lower_flrp16 ? 16 : 0) |
+                (nir->options->lower_flrp32 ? 32 : 0) |
+                (nir->options->lower_flrp64 ? 64 : 0);
+
+       do {
+               progress = false;
+
+               NIR_PASS_V(nir, nir_lower_vars_to_ssa);
+
+               NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
+               NIR_PASS(progress, nir, nir_opt_dead_write_vars);
+
+               NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL);
+               NIR_PASS_V(nir, nir_lower_phis_to_scalar);
+
+               /* (Constant) copy propagation is needed for txf with offsets. */
+               NIR_PASS(progress, nir, nir_copy_prop);
+               NIR_PASS(progress, nir, nir_opt_remove_phis);
+               NIR_PASS(progress, nir, nir_opt_dce);
+               if (nir_opt_trivial_continues(nir)) {
+                       progress = true;
+                       NIR_PASS(progress, nir, nir_copy_prop);
+                       NIR_PASS(progress, nir, nir_opt_dce);
+               }
+               NIR_PASS(progress, nir, nir_opt_if, true);
+               NIR_PASS(progress, nir, nir_opt_dead_cf);
+               NIR_PASS(progress, nir, nir_opt_cse);
+               NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
+
+               /* Needed for algebraic lowering */
+               NIR_PASS(progress, nir, nir_opt_algebraic);
+               NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+               if (lower_flrp != 0) {
+                       bool lower_flrp_progress = false;
+
+                       NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
+                                lower_flrp,
+                                false /* always_precise */,
+                                nir->options->lower_ffma);
+                       if (lower_flrp_progress) {
+                               NIR_PASS(progress, nir,
+                                        nir_opt_constant_folding);
+                               progress = true;
+                       }
+
+                       /* Nothing should rematerialize any flrps, so we only
+                        * need to do this lowering once.
+                        */
+                       lower_flrp = 0;
+               }
+
+               NIR_PASS(progress, nir, nir_opt_undef);
+               NIR_PASS(progress, nir, nir_opt_conditional_discard);
+               if (nir->options->max_unroll_iterations) {
+                       NIR_PASS(progress, nir, nir_opt_loop_unroll, 0);
+               }
+       } while (progress);
+}
+
 /**
  * Perform "lowering" operations on the NIR that are run once when the shader
  * selector is created.
@@ -773,10 +908,6 @@ si_lower_nir(struct si_shader_selector* sel)
         * - ensure constant offsets for texture instructions are folded
         *   and copy-propagated
         */
-       NIR_PASS_V(sel->nir, nir_lower_returns);
-       NIR_PASS_V(sel->nir, nir_lower_vars_to_ssa);
-       NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar);
-       NIR_PASS_V(sel->nir, nir_lower_phis_to_scalar);
 
        static const struct nir_lower_tex_options lower_tex_options = {
                .lower_txp = ~0u,
@@ -795,36 +926,14 @@ si_lower_nir(struct si_shader_selector* sel)
 
        ac_lower_indirect_derefs(sel->nir, sel->screen->info.chip_class);
 
-       NIR_PASS_V(sel->nir, nir_lower_load_const_to_scalar);
+       si_nir_opts(sel->nir);
 
-       bool progress;
-       do {
-               progress = false;
+       NIR_PASS_V(sel->nir, nir_lower_bool_to_int32);
 
-               /* (Constant) copy propagation is needed for txf with offsets. */
-               NIR_PASS(progress, sel->nir, nir_copy_prop);
-               NIR_PASS(progress, sel->nir, nir_opt_remove_phis);
-               NIR_PASS(progress, sel->nir, nir_opt_dce);
-               if (nir_opt_trivial_continues(sel->nir)) {
-                       progress = true;
-                       NIR_PASS(progress, sel->nir, nir_copy_prop);
-                       NIR_PASS(progress, sel->nir, nir_opt_dce);
-               }
-               NIR_PASS(progress, sel->nir, nir_opt_if);
-               NIR_PASS(progress, sel->nir, nir_opt_dead_cf);
-               NIR_PASS(progress, sel->nir, nir_opt_cse);
-               NIR_PASS(progress, sel->nir, nir_opt_peephole_select, 8);
-
-               /* Needed for algebraic lowering */
-               NIR_PASS(progress, sel->nir, nir_opt_algebraic);
-               NIR_PASS(progress, sel->nir, nir_opt_constant_folding);
-
-               NIR_PASS(progress, sel->nir, nir_opt_undef);
-               NIR_PASS(progress, sel->nir, nir_opt_conditional_discard);
-               if (sel->nir->options->max_unroll_iterations) {
-                       NIR_PASS(progress, sel->nir, nir_opt_loop_unroll, 0);
-               }
-       } while (progress);
+       /* Strip the resulting shader so that the shader cache is more likely
+        * to hit from other similar shaders.
+        */
+       nir_strip(sel->nir);
 }
 
 static void declare_nir_input_vs(struct si_shader_context *ctx,
@@ -897,50 +1006,76 @@ si_nir_load_sampler_desc(struct ac_shader_abi *abi,
                         bool write, bool bindless)
 {
        struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+       const struct tgsi_shader_info *info = &ctx->shader->selector->info;
        LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef list = LLVMGetParam(ctx->main_fn, ctx->param_samplers_and_images);
-       LLVMValueRef index;
+       unsigned const_index = base_index + constant_index;
+       bool dcc_off = write;
+
+       /* TODO: images_store and images_atomic are not set */
+       if (!dynamic_index && image &&
+           (info->images_store | info->images_atomic) & (1 << const_index))
+               dcc_off = true;
 
        assert(!descriptor_set);
+       assert(!image || desc_type == AC_DESC_IMAGE || desc_type == AC_DESC_BUFFER);
 
-       dynamic_index = dynamic_index ? dynamic_index : ctx->ac.i32_0;
-       index = LLVMBuildAdd(builder, dynamic_index,
-                            LLVMConstInt(ctx->ac.i32, base_index + constant_index, false),
-                            "");
+       if (bindless) {
+               LLVMValueRef list =
+                       LLVMGetParam(ctx->main_fn, ctx->param_bindless_samplers_and_images);
 
-       if (image) {
-               assert(desc_type == AC_DESC_IMAGE || desc_type == AC_DESC_BUFFER);
-               assert(base_index + constant_index < ctx->num_images);
+               /* dynamic_index is the bindless handle */
+               if (image) {
+                       /* For simplicity, bindless image descriptors use fixed
+                        * 16-dword slots for now.
+                        */
+                       dynamic_index = LLVMBuildMul(ctx->ac.builder, dynamic_index,
+                                            LLVMConstInt(ctx->i32, 2, 0), "");
 
-               if (dynamic_index)
-                       index = si_llvm_bound_index(ctx, index, ctx->num_images);
+                       return si_load_image_desc(ctx, list, dynamic_index, desc_type,
+                                                 dcc_off, true);
+               }
+
+               /* Since bindless handle arithmetic can contain an unsigned integer
+                * wraparound and si_load_sampler_desc assumes there isn't any,
+                * use GEP without "inbounds" (inside ac_build_pointer_add)
+                * to prevent incorrect code generation and hangs.
+                */
+               dynamic_index = LLVMBuildMul(ctx->ac.builder, dynamic_index,
+                                            LLVMConstInt(ctx->i32, 2, 0), "");
+               list = ac_build_pointer_add(&ctx->ac, list, dynamic_index);
+               return si_load_sampler_desc(ctx, list, ctx->i32_0, desc_type);
+       }
+
+       unsigned num_slots = image ? ctx->num_images : ctx->num_samplers;
+       assert(const_index < num_slots);
+
+       LLVMValueRef list = LLVMGetParam(ctx->main_fn, ctx->param_samplers_and_images);
+       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, const_index, false);
+
+       if (dynamic_index) {
+               index = LLVMBuildAdd(builder, index, dynamic_index, "");
+
+               /* From the GL_ARB_shader_image_load_store extension spec:
+                *
+                *    If a shader performs an image load, store, or atomic
+                *    operation using an image variable declared as an array,
+                *    and if the index used to select an individual element is
+                *    negative or greater than or equal to the size of the
+                *    array, the results of the operation are undefined but may
+                *    not lead to termination.
+                */
+               index = si_llvm_bound_index(ctx, index, num_slots);
+       }
 
+       if (image) {
                index = LLVMBuildSub(ctx->ac.builder,
                                     LLVMConstInt(ctx->i32, SI_NUM_IMAGES - 1, 0),
                                     index, "");
-
-               /* TODO: be smarter about when we use dcc_off */
-               return si_load_image_desc(ctx, list, index, desc_type, write, bindless);
+               return si_load_image_desc(ctx, list, index, desc_type, dcc_off, false);
        }
 
-       assert(base_index + constant_index < ctx->num_samplers);
-
-       if (dynamic_index)
-               index = si_llvm_bound_index(ctx, index, ctx->num_samplers);
-
        index = LLVMBuildAdd(ctx->ac.builder, index,
                             LLVMConstInt(ctx->i32, SI_NUM_IMAGES / 2, 0), "");
-
-       if (bindless) {
-               /* Since bindless handle arithmetic can contain an unsigned integer
-                * wraparound and si_load_sampler_desc assumes there isn't any,
-                * use GEP without "inbounds" (inside ac_build_pointer_add)
-                * to prevent incorrect code generation and hangs.
-                */
-               index = LLVMBuildMul(ctx->ac.builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
-               list = ac_build_pointer_add(&ctx->ac, list, index);
-               index = ctx->i32_0;
-       }
        return si_load_sampler_desc(ctx, list, index, desc_type);
 }
 
@@ -969,6 +1104,9 @@ bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir)
                        LLVMValueRef data[4];
                        unsigned loc = variable->data.location;
 
+                       if (loc >= VARYING_SLOT_VAR0 && nir->info.stage == MESA_SHADER_FRAGMENT)
+                               ctx->abi.fs_input_attr_indices[loc - VARYING_SLOT_VAR0] = input_idx / 4;
+
                        for (unsigned i = 0; i < attrib_count; i++) {
                                /* Packed components share the same location so skip
                                 * them if we have already processed the location.
@@ -1005,7 +1143,7 @@ bool si_nir_build_llvm(struct si_shader_context *ctx, struct nir_shader *nir)
        ctx->num_images = util_last_bit(info->images_declared);
 
        if (ctx->shader->selector->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE]) {
-               assert(nir->info.stage == MESA_SHADER_COMPUTE);
+               assert(gl_shader_stage_is_compute(nir->info.stage));
                si_declare_compute_memory(ctx);
        }
        ac_nir_translate(&ctx->ac, &ctx->abi, nir);