radv: Implement buffer stores with less than 4 components.
[mesa.git] / src / amd / common / ac_nir_to_llvm.c
index 467d1dd19ab567d30552493389628d03ea068f32..8dea35178b36827195ba46c2e246384aae4d3982 100644 (file)
@@ -54,7 +54,6 @@ struct ac_nir_context {
 };
 
 static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
-                                    const nir_deref_var *deref,
                                     nir_deref_instr *deref_instr,
                                     enum ac_descriptor_type desc_type,
                                     const nir_tex_instr *instr,
@@ -271,8 +270,9 @@ static LLVMValueRef emit_bcsel(struct ac_llvm_context *ctx,
 {
        LLVMValueRef v = LLVMBuildICmp(ctx->builder, LLVMIntNE, src0,
                                       ctx->i32_0, "");
-       return LLVMBuildSelect(ctx->builder, v, ac_to_integer(ctx, src1),
-                              ac_to_integer(ctx, src2), "");
+       return LLVMBuildSelect(ctx->builder, v,
+                              ac_to_integer_or_pointer(ctx, src1),
+                              ac_to_integer_or_pointer(ctx, src2), "");
 }
 
 static LLVMValueRef emit_minmax_int(struct ac_llvm_context *ctx,
@@ -312,9 +312,18 @@ static LLVMValueRef emit_uint_carry(struct ac_llvm_context *ctx,
 }
 
 static LLVMValueRef emit_b2f(struct ac_llvm_context *ctx,
-                            LLVMValueRef src0)
+                            LLVMValueRef src0,
+                            unsigned bitsize)
 {
-       return LLVMBuildAnd(ctx->builder, src0, LLVMBuildBitCast(ctx->builder, LLVMConstReal(ctx->f32, 1.0), ctx->i32, ""), "");
+       LLVMValueRef result = LLVMBuildAnd(ctx->builder, src0,
+                                          LLVMBuildBitCast(ctx->builder, LLVMConstReal(ctx->f32, 1.0), ctx->i32, ""),
+                                          "");
+       result = LLVMBuildBitCast(ctx->builder, result, ctx->f32, "");
+
+       if (bitsize == 32)
+               return result;
+
+       return LLVMBuildFPExt(ctx->builder, result, ctx->f64, "");
 }
 
 static LLVMValueRef emit_f2b(struct ac_llvm_context *ctx,
@@ -419,10 +428,24 @@ static LLVMValueRef emit_bitfield_extract(struct ac_llvm_context *ctx,
                                          const LLVMValueRef srcs[3])
 {
        LLVMValueRef result;
-       LLVMValueRef icond = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], LLVMConstInt(ctx->i32, 32, false), "");
 
-       result = ac_build_bfe(ctx, srcs[0], srcs[1], srcs[2], is_signed);
-       result = LLVMBuildSelect(ctx->builder, icond, srcs[0], result, "");
+       if (HAVE_LLVM >= 0x0800) {
+               LLVMValueRef icond = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], LLVMConstInt(ctx->i32, 32, false), "");
+               result = ac_build_bfe(ctx, srcs[0], srcs[1], srcs[2], is_signed);
+               result = LLVMBuildSelect(ctx->builder, icond, srcs[0], result, "");
+       } else {
+               /* FIXME: LLVM 7+ returns incorrect result when count is 0.
+                * https://bugs.freedesktop.org/show_bug.cgi?id=107276
+                */
+               LLVMValueRef zero = ctx->i32_0;
+               LLVMValueRef icond1 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], LLVMConstInt(ctx->i32, 32, false), "");
+               LLVMValueRef icond2 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], zero, "");
+
+               result = ac_build_bfe(ctx, srcs[0], srcs[1], srcs[2], is_signed);
+               result = LLVMBuildSelect(ctx->builder, icond1, srcs[0], result, "");
+               result = LLVMBuildSelect(ctx->builder, icond2, zero, result, "");
+       }
+
        return result;
 }
 
@@ -465,14 +488,15 @@ static LLVMValueRef emit_pack_half_2x16(struct ac_llvm_context *ctx,
        comp[0] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_0, "");
        comp[1] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_1, "");
 
-       return ac_build_cvt_pkrtz_f16(ctx, comp);
+       return LLVMBuildBitCast(ctx->builder, ac_build_cvt_pkrtz_f16(ctx, comp),
+                               ctx->i32, "");
 }
 
 static LLVMValueRef emit_unpack_half_2x16(struct ac_llvm_context *ctx,
                                          LLVMValueRef src0)
 {
        LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
-       LLVMValueRef temps[2], result, val;
+       LLVMValueRef temps[2], val;
        int i;
 
        for (i = 0; i < 2; i++) {
@@ -481,12 +505,7 @@ static LLVMValueRef emit_unpack_half_2x16(struct ac_llvm_context *ctx,
                val = LLVMBuildBitCast(ctx->builder, val, ctx->f16, "");
                temps[i] = LLVMBuildFPExt(ctx->builder, val, ctx->f32, "");
        }
-
-       result = LLVMBuildInsertElement(ctx->builder, LLVMGetUndef(ctx->v2f32), temps[0],
-                                       ctx->i32_0, "");
-       result = LLVMBuildInsertElement(ctx->builder, result, temps[1],
-                                       ctx->i32_1, "");
-       return result;
+       return ac_build_gather_values(ctx, temps, 2);
 }
 
 static LLVMValueRef emit_ddxy(struct ac_nir_context *ctx,
@@ -668,34 +687,34 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
                                                     LLVMTypeOf(src[0]), ""),
                                       "");
                break;
-       case nir_op_ilt:
+       case nir_op_ilt32:
                result = emit_int_cmp(&ctx->ac, LLVMIntSLT, src[0], src[1]);
                break;
-       case nir_op_ine:
+       case nir_op_ine32:
                result = emit_int_cmp(&ctx->ac, LLVMIntNE, src[0], src[1]);
                break;
-       case nir_op_ieq:
+       case nir_op_ieq32:
                result = emit_int_cmp(&ctx->ac, LLVMIntEQ, src[0], src[1]);
                break;
-       case nir_op_ige:
+       case nir_op_ige32:
                result = emit_int_cmp(&ctx->ac, LLVMIntSGE, src[0], src[1]);
                break;
-       case nir_op_ult:
+       case nir_op_ult32:
                result = emit_int_cmp(&ctx->ac, LLVMIntULT, src[0], src[1]);
                break;
-       case nir_op_uge:
+       case nir_op_uge32:
                result = emit_int_cmp(&ctx->ac, LLVMIntUGE, src[0], src[1]);
                break;
-       case nir_op_feq:
+       case nir_op_feq32:
                result = emit_float_cmp(&ctx->ac, LLVMRealOEQ, src[0], src[1]);
                break;
-       case nir_op_fne:
+       case nir_op_fne32:
                result = emit_float_cmp(&ctx->ac, LLVMRealUNE, src[0], src[1]);
                break;
-       case nir_op_flt:
+       case nir_op_flt32:
                result = emit_float_cmp(&ctx->ac, LLVMRealOLT, src[0], src[1]);
                break;
-       case nir_op_fge:
+       case nir_op_fge32:
                result = emit_float_cmp(&ctx->ac, LLVMRealOGE, src[0], src[1]);
                break;
        case nir_op_fabs:
@@ -827,15 +846,10 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
                result = emit_bitfield_insert(&ctx->ac, src[0], src[1], src[2], src[3]);
                break;
        case nir_op_bitfield_reverse:
-               result = ac_build_intrinsic(&ctx->ac, "llvm.bitreverse.i32", ctx->ac.i32, src, 1, AC_FUNC_ATTR_READNONE);
+               result = ac_build_bitfield_reverse(&ctx->ac, src[0]);
                break;
        case nir_op_bit_count:
-               if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) == 32)
-                       result = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i32", ctx->ac.i32, src, 1, AC_FUNC_ATTR_READNONE);
-               else {
-                       result = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i64", ctx->ac.i64, src, 1, AC_FUNC_ATTR_READNONE);
-                       result = LLVMBuildTrunc(ctx->ac.builder, result, ctx->ac.i32, "");
-               }
+               result = ac_build_bit_count(&ctx->ac, src[0]);
                break;
        case nir_op_vec2:
        case nir_op_vec3:
@@ -844,34 +858,47 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
                        src[i] = ac_to_integer(&ctx->ac, src[i]);
                result = ac_build_gather_values(&ctx->ac, src, num_components);
                break;
+       case nir_op_f2i16:
        case nir_op_f2i32:
        case nir_op_f2i64:
                src[0] = ac_to_float(&ctx->ac, src[0]);
                result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
                break;
+       case nir_op_f2u16:
        case nir_op_f2u32:
        case nir_op_f2u64:
                src[0] = ac_to_float(&ctx->ac, src[0]);
                result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
                break;
+       case nir_op_i2f16:
        case nir_op_i2f32:
        case nir_op_i2f64:
                src[0] = ac_to_integer(&ctx->ac, src[0]);
                result = LLVMBuildSIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
                break;
+       case nir_op_u2f16:
        case nir_op_u2f32:
        case nir_op_u2f64:
                src[0] = ac_to_integer(&ctx->ac, src[0]);
                result = LLVMBuildUIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
                break;
-       case nir_op_f2f64:
+       case nir_op_f2f16_rtz:
                src[0] = ac_to_float(&ctx->ac, src[0]);
-               result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
+               LLVMValueRef param[2] = { src[0], ctx->ac.f32_0 };
+               result = ac_build_cvt_pkrtz_f16(&ctx->ac, param);
+               result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
                break;
+       case nir_op_f2f16_rtne:
+       case nir_op_f2f16:
        case nir_op_f2f32:
+       case nir_op_f2f64:
                src[0] = ac_to_float(&ctx->ac, src[0]);
-               result = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
+               if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
+                       result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
+               else
+                       result = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
                break;
+       case nir_op_u2u16:
        case nir_op_u2u32:
        case nir_op_u2u64:
                src[0] = ac_to_integer(&ctx->ac, src[0]);
@@ -880,6 +907,7 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
                else
                        result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
                break;
+       case nir_op_i2i16:
        case nir_op_i2i32:
        case nir_op_i2i64:
                src[0] = ac_to_integer(&ctx->ac, src[0]);
@@ -888,7 +916,7 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
                else
                        result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
                break;
-       case nir_op_bcsel:
+       case nir_op_b32csel:
                result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
                break;
        case nir_op_find_lsb:
@@ -913,16 +941,20 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
                src[1] = ac_to_integer(&ctx->ac, src[1]);
                result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
                break;
-       case nir_op_b2f:
-               result = emit_b2f(&ctx->ac, src[0]);
+       case nir_op_b2f16:
+       case nir_op_b2f32:
+       case nir_op_b2f64:
+               result = emit_b2f(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
                break;
-       case nir_op_f2b:
+       case nir_op_f2b32:
                result = emit_f2b(&ctx->ac, src[0]);
                break;
-       case nir_op_b2i:
+       case nir_op_b2i16:
+       case nir_op_b2i32:
+       case nir_op_b2i64:
                result = emit_b2i(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
                break;
-       case nir_op_i2b:
+       case nir_op_i2b32:
                src[0] = ac_to_integer(&ctx->ac, src[0]);
                result = emit_i2b(&ctx->ac, src[0]);
                break;
@@ -976,10 +1008,7 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
 
        case nir_op_pack_64_2x32_split: {
                LLVMValueRef tmp = LLVMGetUndef(ctx->ac.v2i32);
-               tmp = LLVMBuildInsertElement(ctx->ac.builder, tmp,
-                                            src[0], ctx->ac.i32_0, "");
-               tmp = LLVMBuildInsertElement(ctx->ac.builder, tmp,
-                                            src[1], ctx->ac.i32_1, "");
+               tmp = ac_build_gather_values(&ctx->ac, src, 2);
                result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i64, "");
                break;
        }
@@ -1071,7 +1100,7 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
 
        if (result) {
                assert(instr->dest.dest.is_ssa);
-               result = ac_to_integer(&ctx->ac, result);
+               result = ac_to_integer_or_pointer(&ctx->ac, result);
                ctx->ssa_defs[instr->dest.dest.ssa.index] = result;
        }
 }
@@ -1085,6 +1114,10 @@ static void visit_load_const(struct ac_nir_context *ctx,
 
        for (unsigned i = 0; i < instr->def.num_components; ++i) {
                switch (instr->def.bit_size) {
+               case 16:
+                       values[i] = LLVMConstInt(element_type,
+                                                instr->value.u16[i], false);
+                       break;
                case 32:
                        values[i] = LLVMConstInt(element_type,
                                                 instr->value.u32[i], false);
@@ -1139,7 +1172,8 @@ static LLVMValueRef lower_gather4_integer(struct ac_llvm_context *ctx,
                                          struct ac_image_args *args,
                                          const nir_tex_instr *instr)
 {
-       enum glsl_base_type stype = glsl_get_sampler_result_type(var->type);
+       const struct glsl_type *type = glsl_without_array(var->type);
+       enum glsl_base_type stype = glsl_get_sampler_result_type(type);
        LLVMValueRef half_texel[2];
        LLVMValueRef compare_cube_wa = NULL;
        LLVMValueRef result;
@@ -1323,8 +1357,9 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx,
 
        if (instr->op == nir_texop_tg4 && ctx->ac.chip_class <= VI) {
                nir_deref_instr *texture_deref_instr = get_tex_texture_deref(instr);
-               nir_variable *var = texture_deref_instr ? nir_deref_instr_get_variable(texture_deref_instr) : instr->texture->var;
-               enum glsl_base_type stype = glsl_get_sampler_result_type(var->type);
+               nir_variable *var = nir_deref_instr_get_variable(texture_deref_instr);
+               const struct glsl_type *type = glsl_without_array(var->type);
+               enum glsl_base_type stype = glsl_get_sampler_result_type(type);
                if (stype == GLSL_TYPE_UINT || stype == GLSL_TYPE_INT) {
                        return lower_gather4_integer(&ctx->ac, var, args, instr);
                }
@@ -1363,6 +1398,26 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx,
                            get_src(ctx, instr->src[0]), "");
 
        ptr = ac_build_gep0(&ctx->ac, ctx->abi->push_constants, addr);
+
+       if (instr->dest.ssa.bit_size == 16) {
+               unsigned load_dwords = instr->dest.ssa.num_components / 2 + 1;
+               LLVMTypeRef vec_type = LLVMVectorType(LLVMInt16TypeInContext(ctx->ac.context), 2 * load_dwords);
+               ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
+               LLVMValueRef res = LLVMBuildLoad(ctx->ac.builder, ptr, "");
+               res = LLVMBuildBitCast(ctx->ac.builder, res, vec_type, "");
+               LLVMValueRef cond = LLVMBuildLShr(ctx->ac.builder, addr, ctx->ac.i32_1, "");
+               cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->ac.i1, "");
+               LLVMValueRef mask[] = { LLVMConstInt(ctx->ac.i32, 0, false), LLVMConstInt(ctx->ac.i32, 1, false),
+                                       LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
+                                       LLVMConstInt(ctx->ac.i32, 4, false)};
+               LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->dest.ssa.num_components);
+               LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->dest.ssa.num_components);
+               LLVMValueRef shuffle_aligned = LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_aligned, "");
+               LLVMValueRef shuffle_unaligned = LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_unaligned, "");
+               res = LLVMBuildSelect(ctx->ac.builder, cond, shuffle_unaligned, shuffle_aligned, "");
+               return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "");
+       }
+
        ptr = ac_cast_ptr(&ctx->ac, ptr, get_def_type(ctx, &instr->dest.ssa));
 
        return LLVMBuildLoad(ctx->ac.builder, ptr, "");
@@ -1388,31 +1443,24 @@ static uint32_t widen_mask(uint32_t mask, unsigned multiplier)
 static LLVMValueRef extract_vector_range(struct ac_llvm_context *ctx, LLVMValueRef src,
                                          unsigned start, unsigned count)
 {
-       LLVMTypeRef type = LLVMTypeOf(src);
+       LLVMValueRef mask[] = {
+       ctx->i32_0, ctx->i32_1,
+       LLVMConstInt(ctx->i32, 2, false), LLVMConstInt(ctx->i32, 3, false) };
 
-       if (LLVMGetTypeKind(type) != LLVMVectorTypeKind) {
+       unsigned src_elements = ac_get_llvm_num_components(src);
+
+       if (count == src_elements) {
                assert(start == 0);
-               assert(count == 1);
                return src;
+       } else if (count == 1) {
+               assert(start < src_elements);
+               return LLVMBuildExtractElement(ctx->builder, src, mask[start],  "");
+       } else {
+               assert(start + count <= src_elements);
+               assert(count <= 4);
+               LLVMValueRef swizzle = LLVMConstVector(&mask[start], count);
+               return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
        }
-
-       unsigned src_elements = LLVMGetVectorSize(type);
-       assert(start < src_elements);
-       assert(start + count <= src_elements);
-
-       if (start == 0 && count == src_elements)
-               return src;
-
-       if (count == 1)
-               return LLVMBuildExtractElement(ctx->builder, src, LLVMConstInt(ctx->i32, start, false), "");
-
-       assert(count <= 8);
-       LLVMValueRef indices[8];
-       for (unsigned i = 0; i < count; ++i)
-               indices[i] = LLVMConstInt(ctx->i32, start + i, false);
-
-       LLVMValueRef swizzle = LLVMConstVector(indices, count);
-       return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
 }
 
 static void visit_store_ssbo(struct ac_nir_context *ctx,
@@ -1420,33 +1468,24 @@ static void visit_store_ssbo(struct ac_nir_context *ctx,
 {
        const char *store_name;
        LLVMValueRef src_data = get_src(ctx, instr->src[0]);
-       LLVMTypeRef data_type = ctx->ac.f32;
-       int elem_size_mult = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 32;
-       int components_32bit = elem_size_mult * instr->num_components;
+       int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8;
        unsigned writemask = nir_intrinsic_write_mask(instr);
-       LLVMValueRef base_data, base_offset;
-       LLVMValueRef params[6];
-
-       params[1] = ctx->abi->load_ssbo(ctx->abi,
-                                       get_src(ctx, instr->src[1]), true);
-       params[2] = ctx->ac.i32_0; /* vindex */
-       params[4] = ctx->ac.i1false;  /* glc */
-       params[5] = ctx->ac.i1false;  /* slc */
-
-       if (components_32bit > 1)
-               data_type = LLVMVectorType(ctx->ac.f32, components_32bit);
+       enum gl_access_qualifier access = nir_intrinsic_access(instr);
+       LLVMValueRef glc = ctx->ac.i1false;
 
-       writemask = widen_mask(writemask, elem_size_mult);
+       if (access & (ACCESS_VOLATILE | ACCESS_COHERENT))
+               glc = ctx->ac.i1true;
 
-       base_data = ac_to_float(&ctx->ac, src_data);
+       LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi,
+                                       get_src(ctx, instr->src[1]), true);
+       LLVMValueRef base_data = ac_to_float(&ctx->ac, src_data);
        base_data = ac_trim_vector(&ctx->ac, base_data, instr->num_components);
-       base_data = LLVMBuildBitCast(ctx->ac.builder, base_data,
-                                    data_type, "");
-       base_offset = get_src(ctx, instr->src[2]);      /* voffset */
+       LLVMValueRef base_offset = get_src(ctx, instr->src[2]);
+
        while (writemask) {
                int start, count;
-               LLVMValueRef data;
-               LLVMValueRef offset;
+               LLVMValueRef data, offset;
+               LLVMTypeRef data_type;
 
                u_bit_scan_consecutive_range(&writemask, &start, &count);
 
@@ -1456,31 +1495,76 @@ static void visit_store_ssbo(struct ac_nir_context *ctx,
                        writemask |= 1 << (start + 2);
                        count = 2;
                }
+               int num_bytes = count * elem_size_bytes; /* count in bytes */
 
-               if (count > 4) {
-                       writemask |= ((1u << (count - 4)) - 1u) << (start + 4);
-                       count = 4;
+               /* we can only store 4 DWords at the same time.
+                * can only happen for 64 Bit vectors. */
+               if (num_bytes > 16) {
+                       writemask |= ((1u << (count - 2)) - 1u) << (start + 2);
+                       count = 2;
+                       num_bytes = 16;
                }
 
-               if (count == 4) {
-                       store_name = "llvm.amdgcn.buffer.store.v4f32";
-               } else if (count == 2) {
-                       store_name = "llvm.amdgcn.buffer.store.v2f32";
-
-               } else {
-                       assert(count == 1);
-                       store_name = "llvm.amdgcn.buffer.store.f32";
+               /* check alignment of 16 Bit stores */
+               if (elem_size_bytes == 2 && num_bytes > 2 && (start % 2) == 1) {
+                       writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
+                       count = 1;
+                       num_bytes = 2;
                }
                data = extract_vector_range(&ctx->ac, base_data, start, count);
 
-               offset = base_offset;
-               if (start != 0) {
-                       offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, start * 4, false), "");
+               if (start == 0) {
+                       offset = base_offset;
+               } else {
+                       offset = LLVMBuildAdd(ctx->ac.builder, base_offset,
+                                             LLVMConstInt(ctx->ac.i32, start * elem_size_bytes, false), "");
+               }
+               if (num_bytes == 2) {
+                       store_name = "llvm.amdgcn.tbuffer.store.i32";
+                       data_type = ctx->ac.i32;
+                       LLVMValueRef tbuffer_params[] = {
+                               data,
+                               rsrc,
+                               ctx->ac.i32_0, /* vindex */
+                               offset,        /* voffset */
+                               ctx->ac.i32_0,
+                               ctx->ac.i32_0,
+                               LLVMConstInt(ctx->ac.i32, 2, false), // dfmt (= 16bit)
+                               LLVMConstInt(ctx->ac.i32, 4, false), // nfmt (= uint)
+                               glc,
+                               ctx->ac.i1false,
+                       };
+                       ac_build_intrinsic(&ctx->ac, store_name,
+                                          ctx->ac.voidt, tbuffer_params, 10, 0);
+               } else {
+                       switch (num_bytes) {
+                       case 16: /* v4f32 */
+                               store_name = "llvm.amdgcn.buffer.store.v4f32";
+                               data_type = ctx->ac.v4f32;
+                               break;
+                       case 8: /* v2f32 */
+                               store_name = "llvm.amdgcn.buffer.store.v2f32";
+                               data_type = ctx->ac.v2f32;
+                               break;
+                       case 4: /* f32 */
+                               store_name = "llvm.amdgcn.buffer.store.f32";
+                               data_type = ctx->ac.f32;
+                               break;
+                       default:
+                               unreachable("Malformed vector store.");
+                       }
+                       data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
+                       LLVMValueRef params[] = {
+                               data,
+                               rsrc,
+                               ctx->ac.i32_0, /* vindex */
+                               offset,
+                               glc,
+                               ctx->ac.i1false,  /* slc */
+                       };
+                       ac_build_intrinsic(&ctx->ac, store_name,
+                                          ctx->ac.voidt, params, 6, 0);
                }
-               params[0] = data;
-               params[3] = offset;
-               ac_build_intrinsic(&ctx->ac, store_name,
-                                  ctx->ac.voidt, params, 6, 0);
        }
 }
 
@@ -1500,7 +1584,7 @@ static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx,
                                                 true);
        params[arg_count++] = ctx->ac.i32_0; /* vindex */
        params[arg_count++] = get_src(ctx, instr->src[1]);      /* voffset */
-       params[arg_count++] = LLVMConstInt(ctx->ac.i1, 0, false);  /* slc */
+       params[arg_count++] = ctx->ac.i1false;  /* slc */
 
        switch (instr->intrinsic) {
        case nir_intrinsic_ssbo_atomic_add:
@@ -1543,68 +1627,87 @@ static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx,
 static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx,
                                       const nir_intrinsic_instr *instr)
 {
-       LLVMValueRef results[2];
-       int load_components;
+       int elem_size_bytes = instr->dest.ssa.bit_size / 8;
        int num_components = instr->num_components;
-       if (instr->dest.ssa.bit_size == 64)
-               num_components *= 2;
+       enum gl_access_qualifier access = nir_intrinsic_access(instr);
+       LLVMValueRef glc = ctx->ac.i1false;
 
-       for (int i = 0; i < num_components; i += load_components) {
-               load_components = MIN2(num_components - i, 4);
-               const char *load_name;
-               LLVMTypeRef data_type = ctx->ac.f32;
-               LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, i * 4, false);
-               offset = LLVMBuildAdd(ctx->ac.builder, get_src(ctx, instr->src[1]), offset, "");
-
-               if (load_components == 3)
-                       data_type = LLVMVectorType(ctx->ac.f32, 4);
-               else if (load_components > 1)
-                       data_type = LLVMVectorType(ctx->ac.f32, load_components);
-
-               if (load_components >= 3)
-                       load_name = "llvm.amdgcn.buffer.load.v4f32";
-               else if (load_components == 2)
-                       load_name = "llvm.amdgcn.buffer.load.v2f32";
-               else if (load_components == 1)
-                       load_name = "llvm.amdgcn.buffer.load.f32";
-               else
-                       unreachable("unhandled number of components");
-
-               LLVMValueRef params[] = {
-                       ctx->abi->load_ssbo(ctx->abi,
-                                           get_src(ctx, instr->src[0]),
-                                           false),
-                       ctx->ac.i32_0,
-                       offset,
-                       ctx->ac.i1false,
-                       ctx->ac.i1false,
-               };
+       if (access & (ACCESS_VOLATILE | ACCESS_COHERENT))
+               glc = ctx->ac.i1true;
 
-               results[i > 0 ? 1 : 0] = ac_build_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
-       }
+       LLVMValueRef offset = get_src(ctx, instr->src[1]);
+       LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi,
+                                               get_src(ctx, instr->src[0]), false);
+       LLVMValueRef vindex = ctx->ac.i32_0;
+
+       LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.ssa);
+       LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
+
+       LLVMValueRef results[4];
+       for (int i = 0; i < num_components;) {
+               int num_elems = num_components - i;
+               if (elem_size_bytes < 4 && nir_intrinsic_align(instr) % 4 != 0)
+                       num_elems = 1;
+               if (num_elems * elem_size_bytes > 16)
+                       num_elems = 16 / elem_size_bytes;
+               int load_bytes = num_elems * elem_size_bytes;
+
+               LLVMValueRef immoffset = LLVMConstInt(ctx->ac.i32, i * elem_size_bytes, false);
+
+               LLVMValueRef ret;
+               if (load_bytes == 2) {
+                       ret = ac_build_tbuffer_load_short(&ctx->ac,
+                                                         rsrc,
+                                                         vindex,
+                                                         offset,
+                                                         ctx->ac.i32_0,
+                                                         immoffset,
+                                                         glc);
+               } else {
+                       const char *load_name;
+                       LLVMTypeRef data_type;
+                       switch (load_bytes) {
+                       case 16:
+                       case 12:
+                               load_name = "llvm.amdgcn.buffer.load.v4f32";
+                               data_type = ctx->ac.v4f32;
+                               break;
+                       case 8:
+                       case 6:
+                               load_name = "llvm.amdgcn.buffer.load.v2f32";
+                               data_type = ctx->ac.v2f32;
+                               break;
+                       case 4:
+                               load_name = "llvm.amdgcn.buffer.load.f32";
+                               data_type = ctx->ac.f32;
+                               break;
+                       default:
+                               unreachable("Malformed load buffer.");
+                       }
+                       LLVMValueRef params[] = {
+                               rsrc,
+                               vindex,
+                               LLVMBuildAdd(ctx->ac.builder, offset, immoffset, ""),
+                               glc,
+                               ctx->ac.i1false,
+                       };
+                       ret = ac_build_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
+               }
 
-       assume(results[0]);
-       LLVMValueRef ret = results[0];
-       if (num_components > 4 || num_components == 3) {
-               LLVMValueRef masks[] = {
-                       LLVMConstInt(ctx->ac.i32, 0, false), LLVMConstInt(ctx->ac.i32, 1, false),
-                       LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
-                       LLVMConstInt(ctx->ac.i32, 4, false), LLVMConstInt(ctx->ac.i32, 5, false),
-                       LLVMConstInt(ctx->ac.i32, 6, false), LLVMConstInt(ctx->ac.i32, 7, false)
-               };
+               LLVMTypeRef byte_vec = LLVMVectorType(ctx->ac.i8, ac_get_type_size(LLVMTypeOf(ret)));
+               ret = LLVMBuildBitCast(ctx->ac.builder, ret, byte_vec, "");
+               ret = ac_trim_vector(&ctx->ac, ret, load_bytes);
 
-               if (num_components == 6) {
-                       /* we end up with a v4f32 and v2f32 but shuffle fails on that */
-                       results[1] = ac_build_expand_to_vec4(&ctx->ac, results[1], 4);
-               }
+               LLVMTypeRef ret_type = LLVMVectorType(def_elem_type, num_elems);
+               ret = LLVMBuildBitCast(ctx->ac.builder, ret, ret_type, "");
 
-               LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
-               ret = LLVMBuildShuffleVector(ctx->ac.builder, results[0],
-                                            results[num_components > 4 ? 1 : 0], swizzle, "");
+               for (unsigned j = 0; j < num_elems; j++) {
+                       results[i + j] = LLVMBuildExtractElement(ctx->ac.builder, ret, LLVMConstInt(ctx->ac.i32, j, false), "");
+               }
+               i += num_elems;
        }
 
-       return LLVMBuildBitCast(ctx->ac.builder, ret,
-                               get_def_type(ctx, &instr->dest.ssa), "");
+       return ac_build_gather_values(&ctx->ac, results, num_components);
 }
 
 static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx,
@@ -1621,18 +1724,34 @@ static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx,
        if (instr->dest.ssa.bit_size == 64)
                num_components *= 2;
 
-       ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset,
-                                  NULL, 0, false, false, true, true);
-       ret = ac_trim_vector(&ctx->ac, ret, num_components);
+       if (instr->dest.ssa.bit_size == 16) {
+               LLVMValueRef results[num_components];
+               for (unsigned i = 0; i < num_components; ++i) {
+                       results[i] = ac_build_tbuffer_load_short(&ctx->ac,
+                                                                rsrc,
+                                                                ctx->ac.i32_0,
+                                                                offset,
+                                                                ctx->ac.i32_0,
+                                                                LLVMConstInt(ctx->ac.i32, 2 * i, 0),
+                                                                ctx->ac.i1false);
+               }
+               ret = ac_build_gather_values(&ctx->ac, results, num_components);
+       } else {
+               ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset,
+                                          NULL, 0, false, false, true, true);
+
+               ret = ac_trim_vector(&ctx->ac, ret, num_components);
+       }
+
        return LLVMBuildBitCast(ctx->ac.builder, ret,
                                get_def_type(ctx, &instr->dest.ssa), "");
 }
 
 static void
-get_deref_instr_offset(struct ac_nir_context *ctx, nir_deref_instr *instr,
-                      bool vs_in, unsigned *vertex_index_out,
-                      LLVMValueRef *vertex_index_ref,
-                      unsigned *const_out, LLVMValueRef *indir_out)
+get_deref_offset(struct ac_nir_context *ctx, nir_deref_instr *instr,
+                 bool vs_in, unsigned *vertex_index_out,
+                 LLVMValueRef *vertex_index_ref,
+                 unsigned *const_out, LLVMValueRef *indir_out)
 {
        nir_variable *var = nir_deref_instr_get_variable(instr);
        nir_deref_path path;
@@ -1697,124 +1816,6 @@ out:
        *indir_out = offset;
 }
 
-static void
-get_deref_offset(struct ac_nir_context *ctx, nir_deref_var *deref,
-                bool vs_in, unsigned *vertex_index_out,
-                LLVMValueRef *vertex_index_ref,
-                unsigned *const_out, LLVMValueRef *indir_out)
-{
-       unsigned const_offset = 0;
-       nir_deref *tail = &deref->deref;
-       LLVMValueRef offset = NULL;
-
-       if (vertex_index_out != NULL || vertex_index_ref != NULL) {
-               tail = tail->child;
-               nir_deref_array *deref_array = nir_deref_as_array(tail);
-               if (vertex_index_out)
-                       *vertex_index_out = deref_array->base_offset;
-
-               if (vertex_index_ref) {
-                       LLVMValueRef vtx = LLVMConstInt(ctx->ac.i32, deref_array->base_offset, false);
-                       if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
-                               vtx = LLVMBuildAdd(ctx->ac.builder, vtx, get_src(ctx, deref_array->indirect), "");
-                       }
-                       *vertex_index_ref = vtx;
-               }
-       }
-
-       if (deref->var->data.compact) {
-               assert(tail->child->deref_type == nir_deref_type_array);
-               assert(glsl_type_is_scalar(glsl_without_array(deref->var->type)));
-               nir_deref_array *deref_array = nir_deref_as_array(tail->child);
-               /* We always lower indirect dereferences for "compact" array vars. */
-               assert(deref_array->deref_array_type == nir_deref_array_type_direct);
-
-               const_offset = deref_array->base_offset;
-               goto out;
-       }
-
-       while (tail->child != NULL) {
-               const struct glsl_type *parent_type = tail->type;
-               tail = tail->child;
-
-               if (tail->deref_type == nir_deref_type_array) {
-                       nir_deref_array *deref_array = nir_deref_as_array(tail);
-                       LLVMValueRef index, stride, local_offset;
-                       unsigned size = glsl_count_attribute_slots(tail->type, vs_in);
-
-                       const_offset += size * deref_array->base_offset;
-                       if (deref_array->deref_array_type == nir_deref_array_type_direct)
-                               continue;
-
-                       assert(deref_array->deref_array_type == nir_deref_array_type_indirect);
-                       index = get_src(ctx, deref_array->indirect);
-                       stride = LLVMConstInt(ctx->ac.i32, size, 0);
-                       local_offset = LLVMBuildMul(ctx->ac.builder, stride, index, "");
-
-                       if (offset)
-                               offset = LLVMBuildAdd(ctx->ac.builder, offset, local_offset, "");
-                       else
-                               offset = local_offset;
-               } else if (tail->deref_type == nir_deref_type_struct) {
-                       nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
-
-                       for (unsigned i = 0; i < deref_struct->index; i++) {
-                               const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
-                               const_offset += glsl_count_attribute_slots(ft, vs_in);
-                       }
-               } else
-                       unreachable("unsupported deref type");
-
-       }
-out:
-       if (const_offset && offset)
-               offset = LLVMBuildAdd(ctx->ac.builder, offset,
-                                     LLVMConstInt(ctx->ac.i32, const_offset, 0),
-                                     "");
-
-       *const_out = const_offset;
-       *indir_out = offset;
-}
-
-static LLVMValueRef
-build_gep_for_deref(struct ac_nir_context *ctx,
-                   nir_deref_var *deref)
-{
-       struct hash_entry *entry = _mesa_hash_table_search(ctx->vars, deref->var);
-       assert(entry->data);
-       LLVMValueRef val = entry->data;
-       nir_deref *tail = deref->deref.child;
-       while (tail != NULL) {
-               LLVMValueRef offset;
-               switch (tail->deref_type) {
-               case nir_deref_type_array: {
-                       nir_deref_array *array = nir_deref_as_array(tail);
-                       offset = LLVMConstInt(ctx->ac.i32, array->base_offset, 0);
-                       if (array->deref_array_type ==
-                           nir_deref_array_type_indirect) {
-                               offset = LLVMBuildAdd(ctx->ac.builder, offset,
-                                                     get_src(ctx,
-                                                             array->indirect),
-                                                     "");
-                       }
-                       break;
-               }
-               case nir_deref_type_struct: {
-                       nir_deref_struct *deref_struct =
-                               nir_deref_as_struct(tail);
-                       offset = LLVMConstInt(ctx->ac.i32,
-                                             deref_struct->index, 0);
-                       break;
-               }
-               default:
-                       unreachable("bad deref type");
-               }
-               val = ac_build_gep0(&ctx->ac, val, offset);
-               tail = tail->child;
-       }
-       return val;
-}
-
 static LLVMValueRef load_tess_varyings(struct ac_nir_context *ctx,
                                       nir_intrinsic_instr *instr,
                                       bool load_inputs)
@@ -1824,24 +1825,16 @@ static LLVMValueRef load_tess_varyings(struct ac_nir_context *ctx,
        LLVMValueRef indir_index = NULL;
        unsigned const_index = 0;
 
-       bool uses_deref_chain = instr->intrinsic == nir_intrinsic_load_var;
-       nir_variable *var = uses_deref_chain ? instr->variables[0]->var :
-                            nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+       nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
 
        unsigned location = var->data.location;
        unsigned driver_location = var->data.driver_location;
        const bool is_patch =  var->data.patch;
        const bool is_compact = var->data.compact;
 
-       if (uses_deref_chain) {
-               get_deref_offset(ctx, instr->variables[0],
-                                false, NULL, is_patch ? NULL : &vertex_index,
-                                &const_index, &indir_index);
-       } else {
-               get_deref_instr_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
-                                      false, NULL, is_patch ? NULL : &vertex_index,
-                                      &const_index, &indir_index);
-       }
+       get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+                        false, NULL, is_patch ? NULL : &vertex_index,
+                        &const_index, &indir_index);
 
        LLVMTypeRef dest_type = get_def_type(ctx, &instr->dest.ssa);
 
@@ -1857,39 +1850,45 @@ static LLVMValueRef load_tess_varyings(struct ac_nir_context *ctx,
                                              var->data.location_frac,
                                              instr->num_components,
                                              is_patch, is_compact, load_inputs);
+       if (instr->dest.ssa.bit_size == 16) {
+               result = ac_to_integer(&ctx->ac, result);
+               result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
+       }
        return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
 }
 
 static LLVMValueRef visit_load_var(struct ac_nir_context *ctx,
                                   nir_intrinsic_instr *instr)
 {
-       bool uses_deref_chain = instr->intrinsic == nir_intrinsic_load_var;
-       nir_variable *var = uses_deref_chain ? instr->variables[0]->var :
-                           nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+       nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
 
        LLVMValueRef values[8];
-       int idx = var->data.driver_location;
+       int idx = 0;
        int ve = instr->dest.ssa.num_components;
-       unsigned comp = var->data.location_frac;
+       unsigned comp = 0;
        LLVMValueRef indir_index;
        LLVMValueRef ret;
        unsigned const_index;
-       unsigned stride = var->data.compact ? 1 : 4;
-       bool vs_in = ctx->stage == MESA_SHADER_VERTEX &&
-                    var->data.mode == nir_var_shader_in;
-
-       if (uses_deref_chain) {
-               get_deref_offset(ctx, instr->variables[0], vs_in, NULL, NULL,
-                                &const_index, &indir_index);
-       } else {
-               get_deref_instr_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), vs_in, NULL, NULL,
-                                      &const_index, &indir_index);
+       unsigned stride = 4;
+       int mode = nir_var_shared;
+       
+       if (var) {
+               bool vs_in = ctx->stage == MESA_SHADER_VERTEX &&
+                       var->data.mode == nir_var_shader_in;
+               if (var->data.compact)
+                       stride = 1;
+               idx = var->data.driver_location;
+               comp = var->data.location_frac;
+               mode = var->data.mode;
+
+               get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), vs_in, NULL, NULL,
+                                &const_index, &indir_index);
        }
 
        if (instr->dest.ssa.bit_size == 64)
                ve *= 2;
 
-       switch (var->data.mode) {
+       switch (mode) {
        case nir_var_shader_in:
                if (ctx->stage == MESA_SHADER_TESS_CTRL ||
                    ctx->stage == MESA_SHADER_TESS_EVAL) {
@@ -1900,14 +1899,8 @@ static LLVMValueRef visit_load_var(struct ac_nir_context *ctx,
                        LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
                        LLVMValueRef indir_index;
                        unsigned const_index, vertex_index;
-                       if (uses_deref_chain) {
-                               get_deref_offset(ctx, instr->variables[0],
-                                                false, &vertex_index, NULL,
-                                                &const_index, &indir_index);
-                       } else {
-                               get_deref_instr_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
-                                                      false, &vertex_index, NULL, &const_index, &indir_index);
-                       }
+                       get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+                                        false, &vertex_index, NULL, &const_index, &indir_index);
 
                        return ctx->abi->load_inputs(ctx->abi, var->data.location,
                                                     var->data.driver_location,
@@ -1951,9 +1944,7 @@ static LLVMValueRef visit_load_var(struct ac_nir_context *ctx,
                }
                break;
        case nir_var_shared: {
-               LLVMValueRef address = uses_deref_chain ?
-                                          build_gep_for_deref(ctx, instr->variables[0])
-                                        : get_src(ctx, instr->src[0]);
+               LLVMValueRef address = get_src(ctx, instr->src[0]);
                LLVMValueRef val = LLVMBuildLoad(ctx->ac.builder, address, "");
                return LLVMBuildBitCast(ctx->ac.builder, val,
                                        get_def_type(ctx, &instr->dest.ssa),
@@ -1994,25 +1985,18 @@ static void
 visit_store_var(struct ac_nir_context *ctx,
                nir_intrinsic_instr *instr)
 {
-        bool uses_deref_chain = instr->intrinsic == nir_intrinsic_store_var;
-        nir_variable *var = uses_deref_chain ? instr->variables[0]->var :
-                            nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+        nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
 
        LLVMValueRef temp_ptr, value;
        int idx = var->data.driver_location;
        unsigned comp = var->data.location_frac;
-       LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[uses_deref_chain ? 0 : 1]));
+       LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[1]));
        int writemask = instr->const_index[0];
        LLVMValueRef indir_index;
        unsigned const_index;
 
-       if (uses_deref_chain) {
-               get_deref_offset(ctx, instr->variables[0], false,
-                                NULL, NULL, &const_index, &indir_index);
-       } else {
-               get_deref_instr_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), false,
-                                      NULL, NULL, &const_index, &indir_index);
-       }
+       get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), false,
+                        NULL, NULL, &const_index, &indir_index);
 
        if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src)) == 64) {
 
@@ -2034,15 +2018,9 @@ visit_store_var(struct ac_nir_context *ctx,
                        unsigned const_index = 0;
                        const bool is_patch = var->data.patch;
 
-                       if (uses_deref_chain) {
-                               get_deref_offset(ctx, instr->variables[0],
-                                                false, NULL, is_patch ? NULL : &vertex_index,
-                                                &const_index, &indir_index);
-                       } else {
-                               get_deref_instr_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
-                                                false, NULL, is_patch ? NULL : &vertex_index,
-                                                &const_index, &indir_index);
-                       }
+                       get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+                                        false, NULL, is_patch ? NULL : &vertex_index,
+                                        &const_index, &indir_index);
 
                        ctx->abi->store_tcs_outputs(ctx->abi, var,
                                                    vertex_index, indir_index,
@@ -2106,10 +2084,8 @@ visit_store_var(struct ac_nir_context *ctx,
                break;
        case nir_var_shared: {
                int writemask = instr->const_index[0];
-               LLVMValueRef address = uses_deref_chain ?
-                                            build_gep_for_deref(ctx, instr->variables[0])
-                                          : get_src(ctx, instr->src[0]);
-               LLVMValueRef val = get_src(ctx, instr->src[uses_deref_chain ? 0 : 1]);
+               LLVMValueRef address = get_src(ctx, instr->src[0]);
+               LLVMValueRef val = get_src(ctx, instr->src[1]);
                if (util_is_power_of_two_nonzero(writemask)) {
                        val = LLVMBuildBitCast(
                           ctx->ac.builder, val,
@@ -2236,32 +2212,8 @@ static LLVMValueRef adjust_sample_index_using_fmask(struct ac_llvm_context *ctx,
        return sample_index;
 }
 
-static bool is_var_image_intrinsic(const nir_intrinsic_instr *instr)
-{
-       switch(instr->intrinsic) {
-       case nir_intrinsic_image_var_samples:
-       case nir_intrinsic_image_var_load:
-       case nir_intrinsic_image_var_store:
-       case nir_intrinsic_image_var_atomic_add:
-       case nir_intrinsic_image_var_atomic_min:
-       case nir_intrinsic_image_var_atomic_max:
-       case nir_intrinsic_image_var_atomic_and:
-       case nir_intrinsic_image_var_atomic_or:
-       case nir_intrinsic_image_var_atomic_xor:
-       case nir_intrinsic_image_var_atomic_exchange:
-       case nir_intrinsic_image_var_atomic_comp_swap:
-       case nir_intrinsic_image_var_size:
-               return true;
-       default:
-               return false;
-       }
-}
-
 static nir_variable *get_image_variable(const nir_intrinsic_instr *instr)
 {
-       if (is_var_image_intrinsic(instr))
-               return instr->variables[0]->var;
-
        assert(instr->src[0].is_ssa);
        return nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
 }
@@ -2271,26 +2223,21 @@ static LLVMValueRef get_image_descriptor(struct ac_nir_context *ctx,
                                          enum ac_descriptor_type desc_type,
                                          bool write)
 {
-       if (is_var_image_intrinsic(instr))
-               return get_sampler_desc(ctx, instr->variables[0], NULL, desc_type, NULL, true, true);
-
-       return get_sampler_desc(ctx, NULL, nir_instr_as_deref(instr->src[0].ssa->parent_instr), desc_type, NULL, true, true);
+       return get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), desc_type, NULL, true, write);
 }
 
 static void get_image_coords(struct ac_nir_context *ctx,
                             const nir_intrinsic_instr *instr,
                             struct ac_image_args *args)
 {
-       /* As the deref instrinsics have the deref as src 0, everything is shifted. */
-       int src_shift = is_var_image_intrinsic(instr) ? 0 : 1;
        const struct glsl_type *type = glsl_without_array(get_image_variable(instr)->type);
 
-       LLVMValueRef src0 = get_src(ctx, instr->src[src_shift]);
+       LLVMValueRef src0 = get_src(ctx, instr->src[1]);
        LLVMValueRef masks[] = {
                LLVMConstInt(ctx->ac.i32, 0, false), LLVMConstInt(ctx->ac.i32, 1, false),
                LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
        };
-       LLVMValueRef sample_index = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[src_shift + 1]), 0);
+       LLVMValueRef sample_index = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
 
        int count;
        enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
@@ -2302,7 +2249,7 @@ static void get_image_coords(struct ac_nir_context *ctx,
        bool gfx9_1d = ctx->ac.chip_class >= GFX9 && dim == GLSL_SAMPLER_DIM_1D;
        count = image_type_to_components_count(dim, is_array);
 
-       if (is_ms) {
+       if (is_ms && instr->intrinsic == nir_intrinsic_image_deref_load) {
                LLVMValueRef fmask_load_address[3];
                int chan;
 
@@ -2325,10 +2272,11 @@ static void get_image_coords(struct ac_nir_context *ctx,
                                                               fmask_load_address[1],
                                                               fmask_load_address[2],
                                                               sample_index,
-                                                              get_image_descriptor(ctx, instr, AC_DESC_FMASK, false));
+                                                              get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+                                                                               AC_DESC_FMASK, NULL, false, false));
        }
        if (count == 1 && !gfx9_1d) {
-               if (instr->src[src_shift].ssa->num_components)
+               if (instr->src[1].ssa->num_components)
                        args->coords[0] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
                else
                        args->coords[0] = src0;
@@ -2398,15 +2346,12 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx,
 
        const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
        if (dim == GLSL_SAMPLER_DIM_BUF) {
-               /* As the deref instrinsics have the deref as src 0, everything is shifted. */
-               int src_shift = is_var_image_intrinsic(instr) ? 0 : 1;
-
                unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
                unsigned num_channels = util_last_bit(mask);
                LLVMValueRef rsrc, vindex;
 
                rsrc = get_image_buffer_descriptor(ctx, instr, false);
-               vindex = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[src_shift]),
+               vindex = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
                                                 ctx->ac.i32_0, "");
 
                /* TODO: set "glc" and "can_speculate" when OpenGL needs it. */
@@ -2426,7 +2371,7 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx,
                                            glsl_sampler_type_is_array(type));
                args.dmask = 15;
                args.attributes = AC_FUNC_ATTR_READONLY;
-               if (var->data.image._volatile || var->data.image.coherent)
+               if (var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT))
                        args.cache_policy |= ac_glc;
 
                res = ac_build_image_opcode(&ctx->ac, &args);
@@ -2446,31 +2391,44 @@ static void visit_image_store(struct ac_nir_context *ctx,
        if (force_glc)
                glc = ctx->ac.i1true;
 
-       /* As the deref instrinsics have the deref as src 0, everything is shifted. */
-       int src_shift = is_var_image_intrinsic(instr) ? 0 : 1;
-
        if (dim == GLSL_SAMPLER_DIM_BUF) {
+               char name[48];
+               const char *types[] = { "f32", "v2f32", "v4f32" };
                LLVMValueRef rsrc = get_image_buffer_descriptor(ctx, instr, true);
+               LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[3]));
+               unsigned src_channels = ac_get_llvm_num_components(src);
 
-               params[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[src_shift + 2])); /* data */
+               if (src_channels == 3)
+                       src = ac_build_expand(&ctx->ac, src, 3, 4);
+
+               params[0] = src; /* data */
                params[1] = rsrc;
-               params[2] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[src_shift]),
+               params[2] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
                                                    ctx->ac.i32_0, ""); /* vindex */
                params[3] = ctx->ac.i32_0; /* voffset */
-               params[4] = glc;  /* glc */
-               params[5] = ctx->ac.i1false;  /* slc */
-               ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32", ctx->ac.voidt,
-                                  params, 6, 0);
+               snprintf(name, sizeof(name), "%s.%s",
+                        HAVE_LLVM >= 0x800 ? "llvm.amdgcn.struct.buffer.store.format"
+                                           : "llvm.amdgcn.buffer.store.format",
+                        types[CLAMP(src_channels, 1, 3) - 1]);
+
+               if (HAVE_LLVM >= 0x800) {
+                       params[4] = ctx->ac.i32_0; /* soffset */
+                       params[5] = glc ? ctx->ac.i32_1 : ctx->ac.i32_0;
+               } else {
+                       params[4] = glc;  /* glc */
+                       params[5] = ctx->ac.i1false;  /* slc */
+               }
+               ac_build_intrinsic(&ctx->ac, name, ctx->ac.voidt, params, 6, 0);
        } else {
                struct ac_image_args args = {};
                args.opcode = ac_image_store;
-               args.data[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[src_shift + 2]));
+               args.data[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[3]));
                get_image_coords(ctx, instr, &args);
-               args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, true);;
+               args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, true);
                args.dim = get_ac_image_dim(&ctx->ac, glsl_get_sampler_dim(type),
                                            glsl_sampler_type_is_array(type));
                args.dmask = 15;
-               if (force_glc || var->data.image._volatile || var->data.image.coherent)
+               if (force_glc || (var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT)))
                        args.cache_policy |= ac_glc;
 
                ac_build_image_opcode(&ctx->ac, &args);
@@ -2485,10 +2443,9 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx,
        int param_count = 0;
        const nir_variable *var = get_image_variable(instr);
 
-       bool cmpswap = instr->intrinsic == nir_intrinsic_image_var_atomic_comp_swap ||
-                      instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap;
+       bool cmpswap = instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap;
        const char *atomic_name;
-       char intrinsic_name[41];
+       char intrinsic_name[64];
        enum ac_atomic_op atomic_subop;
        const struct glsl_type *type = glsl_without_array(var->type);
        MAYBE_UNUSED int length;
@@ -2496,42 +2453,34 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx,
        bool is_unsigned = glsl_get_sampler_result_type(type) == GLSL_TYPE_UINT;
 
        switch (instr->intrinsic) {
-       case nir_intrinsic_image_var_atomic_add:
        case nir_intrinsic_image_deref_atomic_add:
                atomic_name = "add";
                atomic_subop = ac_atomic_add;
                break;
-       case nir_intrinsic_image_var_atomic_min:
        case nir_intrinsic_image_deref_atomic_min:
                atomic_name = is_unsigned ? "umin" : "smin";
                atomic_subop = is_unsigned ? ac_atomic_umin : ac_atomic_smin;
                break;
-       case nir_intrinsic_image_var_atomic_max:
        case nir_intrinsic_image_deref_atomic_max:
                atomic_name = is_unsigned ? "umax" : "smax";
                atomic_subop = is_unsigned ? ac_atomic_umax : ac_atomic_smax;
                break;
-       case nir_intrinsic_image_var_atomic_and:
        case nir_intrinsic_image_deref_atomic_and:
                atomic_name = "and";
                atomic_subop = ac_atomic_and;
                break;
-       case nir_intrinsic_image_var_atomic_or:
        case nir_intrinsic_image_deref_atomic_or:
                atomic_name = "or";
                atomic_subop = ac_atomic_or;
                break;
-       case nir_intrinsic_image_var_atomic_xor:
        case nir_intrinsic_image_deref_atomic_xor:
                atomic_name = "xor";
                atomic_subop = ac_atomic_xor;
                break;
-       case nir_intrinsic_image_var_atomic_exchange:
        case nir_intrinsic_image_deref_atomic_exchange:
                atomic_name = "swap";
                atomic_subop = ac_atomic_swap;
                break;
-       case nir_intrinsic_image_var_atomic_comp_swap:
        case nir_intrinsic_image_deref_atomic_comp_swap:
                atomic_name = "cmpswap";
                atomic_subop = 0; /* not used */
@@ -2540,22 +2489,27 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx,
                abort();
        }
 
-       /* As the deref instrinsics have the deref as src 0, everything is shifted. */
-       int src_shift = is_var_image_intrinsic(instr) ? 0 : 1;
-
        if (cmpswap)
-               params[param_count++] = get_src(ctx, instr->src[src_shift + 3]);
-       params[param_count++] = get_src(ctx, instr->src[src_shift + 2]);
+               params[param_count++] = get_src(ctx, instr->src[4]);
+       params[param_count++] = get_src(ctx, instr->src[3]);
 
        if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
                params[param_count++] = get_image_buffer_descriptor(ctx, instr, true);
-               params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[src_shift]),
+               params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
                                                                ctx->ac.i32_0, ""); /* vindex */
                params[param_count++] = ctx->ac.i32_0; /* voffset */
-               params[param_count++] = ctx->ac.i1false;  /* slc */
+               if (HAVE_LLVM >= 0x800) {
+                       params[param_count++] = ctx->ac.i32_0; /* soffset */
+                       params[param_count++] = ctx->ac.i32_0;  /* slc */
+
+                       length = snprintf(intrinsic_name, sizeof(intrinsic_name),
+                                         "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name);
+               } else {
+                       params[param_count++] = ctx->ac.i1false;  /* slc */
 
-               length = snprintf(intrinsic_name, sizeof(intrinsic_name),
-                                 "llvm.amdgcn.buffer.atomic.%s", atomic_name);
+                       length = snprintf(intrinsic_name, sizeof(intrinsic_name),
+                                         "llvm.amdgcn.buffer.atomic.%s", atomic_name);
+               }
 
                assert(length < sizeof(intrinsic_name));
                return ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.i32,
@@ -2636,10 +2590,6 @@ static LLVMValueRef visit_image_size(struct ac_nir_context *ctx,
        return res;
 }
 
-#define NOOP_WAITCNT 0xf7f
-#define LGKM_CNT 0x07f
-#define VM_CNT 0xf70
-
 static void emit_membar(struct ac_llvm_context *ac,
                        const nir_intrinsic_instr *instr)
 {
@@ -2675,8 +2625,7 @@ void ac_emit_barrier(struct ac_llvm_context *ac, gl_shader_stage stage)
                ac_build_waitcnt(ac, LGKM_CNT & VM_CNT);
                return;
        }
-       ac_build_intrinsic(ac, "llvm.amdgcn.s.barrier",
-                          ac->voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
+       ac_build_s_barrier(ac);
 }
 
 static void emit_discard(struct ac_nir_context *ctx,
@@ -2690,7 +2639,7 @@ static void emit_discard(struct ac_nir_context *ctx,
                                     ctx->ac.i32_0, "");
        } else {
                assert(instr->intrinsic == nir_intrinsic_discard);
-               cond = LLVMConstInt(ctx->ac.i1, false, 0);
+               cond = ctx->ac.i1false;
        }
 
        ctx->abi->emit_kill(ctx->abi, cond);
@@ -2748,7 +2697,7 @@ visit_first_invocation(struct ac_nir_context *ctx)
        LLVMValueRef active_set = ac_build_ballot(&ctx->ac, ctx->ac.i32_1);
 
        /* The second argument is whether cttz(0) should be defined, but we do not care. */
-       LLVMValueRef args[] = {active_set, LLVMConstInt(ctx->ac.i1, 0, false)};
+       LLVMValueRef args[] = {active_set, ctx->ac.i1false};
        LLVMValueRef result =  ac_build_intrinsic(&ctx->ac,
                                                  "llvm.cttz.i64",
                                                  ctx->ac.i64, args, 2,
@@ -2805,8 +2754,8 @@ static LLVMValueRef visit_var_atomic(struct ac_nir_context *ctx,
        LLVMValueRef result;
        LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
 
-       if (instr->intrinsic == nir_intrinsic_var_atomic_comp_swap ||
-           instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap) {
+       if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap ||
+           instr->intrinsic == nir_intrinsic_deref_atomic_comp_swap) {
                LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);
                result = LLVMBuildAtomicCmpXchg(ctx->ac.builder,
                                                ptr, src, src1,
@@ -2817,40 +2766,40 @@ static LLVMValueRef visit_var_atomic(struct ac_nir_context *ctx,
        } else {
                LLVMAtomicRMWBinOp op;
                switch (instr->intrinsic) {
-               case nir_intrinsic_var_atomic_add:
                case nir_intrinsic_shared_atomic_add:
+               case nir_intrinsic_deref_atomic_add:
                        op = LLVMAtomicRMWBinOpAdd;
                        break;
-               case nir_intrinsic_var_atomic_umin:
                case nir_intrinsic_shared_atomic_umin:
+               case nir_intrinsic_deref_atomic_umin:
                        op = LLVMAtomicRMWBinOpUMin;
                        break;
-               case nir_intrinsic_var_atomic_umax:
                case nir_intrinsic_shared_atomic_umax:
+               case nir_intrinsic_deref_atomic_umax:
                        op = LLVMAtomicRMWBinOpUMax;
                        break;
-               case nir_intrinsic_var_atomic_imin:
                case nir_intrinsic_shared_atomic_imin:
+               case nir_intrinsic_deref_atomic_imin:
                        op = LLVMAtomicRMWBinOpMin;
                        break;
-               case nir_intrinsic_var_atomic_imax:
                case nir_intrinsic_shared_atomic_imax:
+               case nir_intrinsic_deref_atomic_imax:
                        op = LLVMAtomicRMWBinOpMax;
                        break;
-               case nir_intrinsic_var_atomic_and:
                case nir_intrinsic_shared_atomic_and:
+               case nir_intrinsic_deref_atomic_and:
                        op = LLVMAtomicRMWBinOpAnd;
                        break;
-               case nir_intrinsic_var_atomic_or:
                case nir_intrinsic_shared_atomic_or:
+               case nir_intrinsic_deref_atomic_or:
                        op = LLVMAtomicRMWBinOpOr;
                        break;
-               case nir_intrinsic_var_atomic_xor:
                case nir_intrinsic_shared_atomic_xor:
+               case nir_intrinsic_deref_atomic_xor:
                        op = LLVMAtomicRMWBinOpXor;
                        break;
-               case nir_intrinsic_var_atomic_exchange:
                case nir_intrinsic_shared_atomic_exchange:
+               case nir_intrinsic_deref_atomic_exchange:
                        op = LLVMAtomicRMWBinOpXchg;
                        break;
                default:
@@ -2887,24 +2836,26 @@ static LLVMValueRef visit_interp(struct ac_nir_context *ctx,
        LLVMValueRef src_c0 = NULL;
        LLVMValueRef src_c1 = NULL;
        LLVMValueRef src0 = NULL;
-       int input_index = instr->variables[0]->var->data.location - VARYING_SLOT_VAR0;
+
+       nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+       int input_index = var->data.location - VARYING_SLOT_VAR0;
        switch (instr->intrinsic) {
-       case nir_intrinsic_interp_var_at_centroid:
+       case nir_intrinsic_interp_deref_at_centroid:
                location = INTERP_CENTROID;
                break;
-       case nir_intrinsic_interp_var_at_sample:
-       case nir_intrinsic_interp_var_at_offset:
+       case nir_intrinsic_interp_deref_at_sample:
+       case nir_intrinsic_interp_deref_at_offset:
                location = INTERP_CENTER;
-               src0 = get_src(ctx, instr->src[0]);
+               src0 = get_src(ctx, instr->src[1]);
                break;
        default:
                break;
        }
 
-       if (instr->intrinsic == nir_intrinsic_interp_var_at_offset) {
+       if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
                src_c0 = ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, src0, ctx->ac.i32_0, ""));
                src_c1 = ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, src0, ctx->ac.i32_1, ""));
-       } else if (instr->intrinsic == nir_intrinsic_interp_var_at_sample) {
+       } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
                LLVMValueRef sample_position;
                LLVMValueRef halfval = LLVMConstReal(ctx->ac.f32, 0.5f);
 
@@ -2916,7 +2867,7 @@ static LLVMValueRef visit_interp(struct ac_nir_context *ctx,
                src_c1 = LLVMBuildExtractElement(ctx->ac.builder, sample_position, ctx->ac.i32_1, "");
                src_c1 = LLVMBuildFSub(ctx->ac.builder, src_c1, halfval, "");
        }
-       interp_param = ctx->abi->lookup_interp_param(ctx->abi, instr->variables[0]->var->data.interpolation, location);
+       interp_param = ctx->abi->lookup_interp_param(ctx->abi, var->data.interpolation, location);
        attr_number = LLVMConstInt(ctx->ac.i32, input_index, false);
 
        if (location == INTERP_CENTER) {
@@ -2945,11 +2896,8 @@ static LLVMValueRef visit_interp(struct ac_nir_context *ctx,
                        interp_el = LLVMBuildBitCast(ctx->ac.builder, interp_el,
                                                     ctx->ac.f32, "");
 
-                       temp1 = LLVMBuildFMul(ctx->ac.builder, ddx_el, src_c0, "");
-                       temp1 = LLVMBuildFAdd(ctx->ac.builder, temp1, interp_el, "");
-
-                       temp2 = LLVMBuildFMul(ctx->ac.builder, ddy_el, src_c1, "");
-                       temp2 = LLVMBuildFAdd(ctx->ac.builder, temp2, temp1, "");
+                       temp1 = ac_build_fmad(&ctx->ac, ddx_el, src_c0, interp_el);
+                       temp2 = ac_build_fmad(&ctx->ac, ddy_el, src_c1, temp1);
 
                        ij_out[i] = LLVMBuildBitCast(ctx->ac.builder,
                                                     temp2, ctx->ac.i32, "");
@@ -2980,7 +2928,7 @@ static LLVMValueRef visit_interp(struct ac_nir_context *ctx,
                }
        }
        return ac_build_varying_gather_values(&ctx->ac, result, instr->num_components,
-                                             instr->variables[0]->var->data.location_frac);
+                                             var->data.location_frac);
 }
 
 static void visit_intrinsic(struct ac_nir_context *ctx,
@@ -3140,11 +3088,9 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
                result = visit_get_buffer_size(ctx, instr);
                break;
        case nir_intrinsic_load_deref:
-       case nir_intrinsic_load_var:
                result = visit_load_var(ctx, instr);
                break;
        case nir_intrinsic_store_deref:
-       case nir_intrinsic_store_var:
                visit_store_var(ctx, instr);
                break;
        case nir_intrinsic_load_shared:
@@ -3153,26 +3099,15 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
        case nir_intrinsic_store_shared:
                visit_store_shared(ctx, instr);
                break;
-       case nir_intrinsic_image_var_samples:
        case nir_intrinsic_image_deref_samples:
                result = visit_image_samples(ctx, instr);
                break;
-       case nir_intrinsic_image_var_load:
        case nir_intrinsic_image_deref_load:
                result = visit_image_load(ctx, instr);
                break;
-       case nir_intrinsic_image_var_store:
        case nir_intrinsic_image_deref_store:
                visit_image_store(ctx, instr);
                break;
-       case nir_intrinsic_image_var_atomic_add:
-       case nir_intrinsic_image_var_atomic_min:
-       case nir_intrinsic_image_var_atomic_max:
-       case nir_intrinsic_image_var_atomic_and:
-       case nir_intrinsic_image_var_atomic_or:
-       case nir_intrinsic_image_var_atomic_xor:
-       case nir_intrinsic_image_var_atomic_exchange:
-       case nir_intrinsic_image_var_atomic_comp_swap:
        case nir_intrinsic_image_deref_atomic_add:
        case nir_intrinsic_image_deref_atomic_min:
        case nir_intrinsic_image_deref_atomic_max:
@@ -3183,7 +3118,6 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
        case nir_intrinsic_image_deref_atomic_comp_swap:
                result = visit_image_atomic(ctx, instr);
                break;
-       case nir_intrinsic_image_var_size:
        case nir_intrinsic_image_deref_size:
                result = visit_image_size(ctx, instr);
                break;
@@ -3219,23 +3153,23 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
                result = visit_var_atomic(ctx, instr, ptr, 1);
                break;
        }
-       case nir_intrinsic_var_atomic_add:
-       case nir_intrinsic_var_atomic_imin:
-       case nir_intrinsic_var_atomic_umin:
-       case nir_intrinsic_var_atomic_imax:
-       case nir_intrinsic_var_atomic_umax:
-       case nir_intrinsic_var_atomic_and:
-       case nir_intrinsic_var_atomic_or:
-       case nir_intrinsic_var_atomic_xor:
-       case nir_intrinsic_var_atomic_exchange:
-       case nir_intrinsic_var_atomic_comp_swap: {
-               LLVMValueRef ptr = build_gep_for_deref(ctx, instr->variables[0]);
-               result = visit_var_atomic(ctx, instr, ptr, 0);
+       case nir_intrinsic_deref_atomic_add:
+       case nir_intrinsic_deref_atomic_imin:
+       case nir_intrinsic_deref_atomic_umin:
+       case nir_intrinsic_deref_atomic_imax:
+       case nir_intrinsic_deref_atomic_umax:
+       case nir_intrinsic_deref_atomic_and:
+       case nir_intrinsic_deref_atomic_or:
+       case nir_intrinsic_deref_atomic_xor:
+       case nir_intrinsic_deref_atomic_exchange:
+       case nir_intrinsic_deref_atomic_comp_swap: {
+               LLVMValueRef ptr = get_src(ctx, instr->src[0]);
+               result = visit_var_atomic(ctx, instr, ptr, 1);
                break;
        }
-       case nir_intrinsic_interp_var_at_centroid:
-       case nir_intrinsic_interp_var_at_sample:
-       case nir_intrinsic_interp_var_at_offset:
+       case nir_intrinsic_interp_deref_at_centroid:
+       case nir_intrinsic_interp_deref_at_sample:
+       case nir_intrinsic_interp_deref_at_offset:
                result = visit_interp(ctx, instr);
                break;
        case nir_intrinsic_emit_vertex:
@@ -3313,7 +3247,6 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
 }
 
 static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
-                                    const nir_deref_var *deref,
                                     nir_deref_instr *deref_instr,
                                     enum ac_descriptor_type desc_type,
                                     const nir_tex_instr *tex_instr,
@@ -3325,11 +3258,11 @@ static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
        unsigned base_index;
        bool bindless = false;
 
-       if (!deref && !deref_instr) {
+       if (!deref_instr) {
                assert(tex_instr && !image);
                descriptor_set = 0;
                base_index = tex_instr->sampler_index;
-       } else if(deref_instr) {
+       } else {
                while(deref_instr->deref_type != nir_deref_type_var) {
                        unsigned array_size = glsl_get_aoa_size(deref_instr->type);
                        if (!array_size)
@@ -3355,41 +3288,6 @@ static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
                }
                descriptor_set = deref_instr->var->data.descriptor_set;
                base_index = deref_instr->var->data.binding;
-        } else {
-               const nir_deref *tail = &deref->deref;
-               while (tail->child) {
-                       const nir_deref_array *child = nir_deref_as_array(tail->child);
-                       unsigned array_size = glsl_get_aoa_size(tail->child->type);
-
-                       if (!array_size)
-                               array_size = 1;
-
-                       assert(child->deref_array_type != nir_deref_array_type_wildcard);
-
-                       if (child->deref_array_type == nir_deref_array_type_indirect) {
-                               LLVMValueRef indirect = get_src(ctx, child->indirect);
-
-                               indirect = LLVMBuildMul(ctx->ac.builder, indirect,
-                                       LLVMConstInt(ctx->ac.i32, array_size, false), "");
-
-                               if (!index)
-                                       index = indirect;
-                               else
-                                       index = LLVMBuildAdd(ctx->ac.builder, index, indirect, "");
-                       }
-
-                       constant_index += child->base_offset * array_size;
-
-                       tail = &child->deref;
-               }
-               descriptor_set = deref->var->data.descriptor_set;
-
-               if (deref->var->data.bindless) {
-                       bindless = deref->var->data.bindless;
-                       base_index = deref->var->data.driver_location;
-               } else {
-                       base_index = deref->var->data.binding;
-               }
        }
 
        return ctx->abi->load_sampler_desc(ctx->abi,
@@ -3435,8 +3333,6 @@ static void tex_fetch_ptrs(struct ac_nir_context *ctx,
 {
        nir_deref_instr *texture_deref_instr = NULL;
        nir_deref_instr *sampler_deref_instr = NULL;
-       nir_deref_var *texture_deref_var = NULL;
-       nir_deref_var *sampler_deref_var = NULL;
 
        for (unsigned i = 0; i < instr->num_srcs; i++) {
                switch (instr->src[i].src_type) {
@@ -3454,30 +3350,25 @@ static void tex_fetch_ptrs(struct ac_nir_context *ctx,
        if (!sampler_deref_instr)
                sampler_deref_instr = texture_deref_instr;
 
-       if (!texture_deref_instr) {
-               texture_deref_var = instr->texture;
-               sampler_deref_var = instr->sampler ? instr->sampler : instr->texture;
-       }
-
        if (instr->sampler_dim  == GLSL_SAMPLER_DIM_BUF)
-               *res_ptr = get_sampler_desc(ctx, texture_deref_var, texture_deref_instr, AC_DESC_BUFFER, instr, false, false);
+               *res_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_BUFFER, instr, false, false);
        else
-               *res_ptr = get_sampler_desc(ctx, texture_deref_var, texture_deref_instr, AC_DESC_IMAGE, instr, false, false);
+               *res_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_IMAGE, instr, false, false);
        if (samp_ptr) {
-               *samp_ptr = get_sampler_desc(ctx, sampler_deref_var, sampler_deref_instr, AC_DESC_SAMPLER, instr, false, false);
+               *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, AC_DESC_SAMPLER, instr, false, false);
                if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
                        *samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
        }
-       if (fmask_ptr && !instr->sampler && (instr->op == nir_texop_txf_ms ||
-                                            instr->op == nir_texop_samples_identical))
-               *fmask_ptr = get_sampler_desc(ctx, instr->texture, texture_deref_instr, AC_DESC_FMASK, instr, false, false);
+       if (fmask_ptr && (instr->op == nir_texop_txf_ms ||
+                         instr->op == nir_texop_samples_identical))
+               *fmask_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_FMASK, instr, false, false);
 }
 
 static LLVMValueRef apply_round_slice(struct ac_llvm_context *ctx,
                                      LLVMValueRef coord)
 {
        coord = ac_to_float(ctx, coord);
-       coord = ac_build_intrinsic(ctx, "llvm.rint.f32", ctx->f32, &coord, 1, 0);
+       coord = ac_build_round(ctx, coord);
        coord = ac_to_integer(ctx, coord);
        return coord;
 }
@@ -3789,7 +3680,6 @@ static void visit_post_phi(struct ac_nir_context *ctx,
 
 static void phi_post_pass(struct ac_nir_context *ctx)
 {
-       struct hash_entry *entry;
        hash_table_foreach(ctx->phis, entry) {
                visit_post_phi(ctx, (nir_phi_instr*)entry->key,
                               (LLVMValueRef)entry->data);
@@ -3851,6 +3741,9 @@ static void visit_deref(struct ac_nir_context *ctx,
                result = ac_build_gep0(&ctx->ac, get_src(ctx, instr->parent),
                                       get_src(ctx, instr->arr.index));
                break;
+       case nir_deref_type_cast:
+               result = get_src(ctx, instr->parent);
+               break;
        default:
                unreachable("Unhandled deref_instr deref type");
        }
@@ -3989,10 +3882,12 @@ ac_handle_shader_output_decl(struct ac_llvm_context *ctx,
                }
        }
 
+       bool is_16bit = glsl_type_is_16bit(variable->type);
+       LLVMTypeRef type = is_16bit ? ctx->f16 : ctx->f32;
        for (unsigned i = 0; i < attrib_count; ++i) {
                for (unsigned chan = 0; chan < 4; chan++) {
                        abi->outputs[ac_llvm_reg_index_soa(output_loc + i, chan)] =
-                                      ac_build_alloca_undef(ctx, ctx->f32, "");
+                                      ac_build_alloca_undef(ctx, type, "");
                }
        }
 }
@@ -4092,7 +3987,7 @@ setup_shared(struct ac_nir_context *ctx,
                        LLVMAddGlobalInAddressSpace(
                           ctx->ac.module, glsl_to_llvm_type(&ctx->ac, variable->type),
                           variable->name ? variable->name : "",
-                          AC_LOCAL_ADDR_SPACE);
+                          AC_ADDR_SPACE_LDS);
                _mesa_hash_table_insert(ctx->vars, variable, shared);
        }
 }
@@ -4181,3 +4076,164 @@ ac_lower_indirect_derefs(struct nir_shader *nir, enum chip_class chip_class)
 
        nir_lower_indirect_derefs(nir, indirect_mask);
 }
+
+static unsigned
+get_inst_tessfactor_writemask(nir_intrinsic_instr *intrin)
+{
+       if (intrin->intrinsic != nir_intrinsic_store_deref)
+               return 0;
+
+       nir_variable *var =
+               nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[0]));
+
+       if (var->data.mode != nir_var_shader_out)
+               return 0;
+
+       unsigned writemask = 0;
+       const int location = var->data.location;
+       unsigned first_component = var->data.location_frac;
+       unsigned num_comps = intrin->dest.ssa.num_components;
+
+       if (location == VARYING_SLOT_TESS_LEVEL_INNER)
+               writemask = ((1 << num_comps + 1) - 1) << first_component;
+       else if (location == VARYING_SLOT_TESS_LEVEL_OUTER)
+               writemask = (((1 << num_comps + 1) - 1) << first_component) << 4;
+
+       return writemask;
+}
+
+static void
+scan_tess_ctrl(nir_cf_node *cf_node, unsigned *upper_block_tf_writemask,
+              unsigned *cond_block_tf_writemask,
+              bool *tessfactors_are_def_in_all_invocs, bool is_nested_cf)
+{
+       switch (cf_node->type) {
+       case nir_cf_node_block: {
+               nir_block *block = nir_cf_node_as_block(cf_node);
+               nir_foreach_instr(instr, block) {
+                       if (instr->type != nir_instr_type_intrinsic)
+                               continue;
+
+                       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+                       if (intrin->intrinsic == nir_intrinsic_barrier) {
+
+                               /* If we find a barrier in nested control flow put this in the
+                                * too hard basket. In GLSL this is not possible but it is in
+                                * SPIR-V.
+                                */
+                               if (is_nested_cf) {
+                                       *tessfactors_are_def_in_all_invocs = false;
+                                       return;
+                               }
+
+                               /* The following case must be prevented:
+                                *    gl_TessLevelInner = ...;
+                                *    barrier();
+                                *    if (gl_InvocationID == 1)
+                                *       gl_TessLevelInner = ...;
+                                *
+                                * If you consider disjoint code segments separated by barriers, each
+                                * such segment that writes tess factor channels should write the same
+                                * channels in all codepaths within that segment.
+                                */
+                               if (upper_block_tf_writemask || cond_block_tf_writemask) {
+                                       /* Accumulate the result: */
+                                       *tessfactors_are_def_in_all_invocs &=
+                                               !(*cond_block_tf_writemask & ~(*upper_block_tf_writemask));
+
+                                       /* Analyze the next code segment from scratch. */
+                                       *upper_block_tf_writemask = 0;
+                                       *cond_block_tf_writemask = 0;
+                               }
+                       } else
+                               *upper_block_tf_writemask |= get_inst_tessfactor_writemask(intrin);
+               }
+
+               break;
+       }
+       case nir_cf_node_if: {
+               unsigned then_tessfactor_writemask = 0;
+               unsigned else_tessfactor_writemask = 0;
+
+               nir_if *if_stmt = nir_cf_node_as_if(cf_node);
+               foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->then_list) {
+                       scan_tess_ctrl(nested_node, &then_tessfactor_writemask,
+                                      cond_block_tf_writemask,
+                                      tessfactors_are_def_in_all_invocs, true);
+               }
+
+               foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->else_list) {
+                       scan_tess_ctrl(nested_node, &else_tessfactor_writemask,
+                                      cond_block_tf_writemask,
+                                      tessfactors_are_def_in_all_invocs, true);
+               }
+
+               if (then_tessfactor_writemask || else_tessfactor_writemask) {
+                       /* If both statements write the same tess factor channels,
+                        * we can say that the upper block writes them too.
+                        */
+                       *upper_block_tf_writemask |= then_tessfactor_writemask &
+                               else_tessfactor_writemask;
+                       *cond_block_tf_writemask |= then_tessfactor_writemask |
+                               else_tessfactor_writemask;
+               }
+
+               break;
+       }
+       case nir_cf_node_loop: {
+               nir_loop *loop = nir_cf_node_as_loop(cf_node);
+               foreach_list_typed(nir_cf_node, nested_node, node, &loop->body) {
+                       scan_tess_ctrl(nested_node, cond_block_tf_writemask,
+                                      cond_block_tf_writemask,
+                                      tessfactors_are_def_in_all_invocs, true);
+               }
+
+               break;
+       }
+       default:
+               unreachable("unknown cf node type");
+       }
+}
+
+bool
+ac_are_tessfactors_def_in_all_invocs(const struct nir_shader *nir)
+{
+       assert(nir->info.stage == MESA_SHADER_TESS_CTRL);
+
+       /* The pass works as follows:
+        * If all codepaths write tess factors, we can say that all
+        * invocations define tess factors.
+        *
+        * Each tess factor channel is tracked separately.
+        */
+       unsigned main_block_tf_writemask = 0; /* if main block writes tess factors */
+       unsigned cond_block_tf_writemask = 0; /* if cond block writes tess factors */
+
+       /* Initial value = true. Here the pass will accumulate results from
+        * multiple segments surrounded by barriers. If tess factors aren't
+        * written at all, it's a shader bug and we don't care if this will be
+        * true.
+        */
+       bool tessfactors_are_def_in_all_invocs = true;
+
+       nir_foreach_function(function, nir) {
+               if (function->impl) {
+                       foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
+                               scan_tess_ctrl(node, &main_block_tf_writemask,
+                                              &cond_block_tf_writemask,
+                                              &tessfactors_are_def_in_all_invocs,
+                                              false);
+                       }
+               }
+       }
+
+       /* Accumulate the result for the last code segment separated by a
+        * barrier.
+        */
+       if (main_block_tf_writemask || cond_block_tf_writemask) {
+               tessfactors_are_def_in_all_invocs &=
+                       !(cond_block_tf_writemask & ~main_block_tf_writemask);
+       }
+
+       return tessfactors_are_def_in_all_invocs;
+}