#include "ac_binary.h"
#include "sid.h"
#include "nir/nir.h"
+#include "nir/nir_deref.h"
#include "util/bitscan.h"
+#include "util/u_math.h"
#include "ac_shader_abi.h"
#include "ac_shader_util.h"
gl_shader_stage stage;
+ LLVMValueRef *ssa_defs;
+
struct hash_table *defs;
struct hash_table *phis;
struct hash_table *vars;
};
static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
- const nir_deref_var *deref,
+ nir_deref_instr *deref_instr,
enum ac_descriptor_type desc_type,
const nir_tex_instr *instr,
bool image, bool write);
}
}
+static enum ac_image_dim
+get_ac_sampler_dim(const struct ac_llvm_context *ctx, enum glsl_sampler_dim dim,
+ bool is_array)
+{
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ if (ctx->chip_class >= GFX9)
+ return is_array ? ac_image_2darray : ac_image_2d;
+ return is_array ? ac_image_1darray : ac_image_1d;
+ case GLSL_SAMPLER_DIM_2D:
+ case GLSL_SAMPLER_DIM_RECT:
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return is_array ? ac_image_2darray : ac_image_2d;
+ case GLSL_SAMPLER_DIM_3D:
+ return ac_image_3d;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return ac_image_cube;
+ case GLSL_SAMPLER_DIM_MS:
+ return is_array ? ac_image_2darraymsaa : ac_image_2dmsaa;
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ return ac_image_2darray;
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return ac_image_2darraymsaa;
+ default:
+ unreachable("bad sampler dim");
+ }
+}
+
+static enum ac_image_dim
+get_ac_image_dim(const struct ac_llvm_context *ctx, enum glsl_sampler_dim sdim,
+ bool is_array)
+{
+ enum ac_image_dim dim = get_ac_sampler_dim(ctx, sdim, is_array);
+
+ if (dim == ac_image_cube ||
+ (ctx->chip_class <= VI && dim == ac_image_3d))
+ dim = ac_image_2darray;
+
+ return dim;
+}
+
static LLVMTypeRef get_def_type(struct ac_nir_context *ctx,
const nir_ssa_def *def)
{
static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
{
assert(src.is_ssa);
- struct hash_entry *entry = _mesa_hash_table_search(nir->defs, src.ssa);
- return (LLVMValueRef)entry->data;
+ return nir->ssa_defs[src.ssa->index];
}
static LLVMValueRef
{
LLVMValueRef v = LLVMBuildICmp(ctx->builder, LLVMIntNE, src0,
ctx->i32_0, "");
- return LLVMBuildSelect(ctx->builder, v, ac_to_integer(ctx, src1),
- ac_to_integer(ctx, src2), "");
+ return LLVMBuildSelect(ctx->builder, v,
+ ac_to_integer_or_pointer(ctx, src1),
+ ac_to_integer_or_pointer(ctx, src2), "");
}
static LLVMValueRef emit_minmax_int(struct ac_llvm_context *ctx,
}
static LLVMValueRef emit_b2f(struct ac_llvm_context *ctx,
- LLVMValueRef src0)
+ LLVMValueRef src0,
+ unsigned bitsize)
{
- return LLVMBuildAnd(ctx->builder, src0, LLVMBuildBitCast(ctx->builder, LLVMConstReal(ctx->f32, 1.0), ctx->i32, ""), "");
+ LLVMValueRef result = LLVMBuildAnd(ctx->builder, src0,
+ LLVMBuildBitCast(ctx->builder, LLVMConstReal(ctx->f32, 1.0), ctx->i32, ""),
+ "");
+ result = LLVMBuildBitCast(ctx->builder, result, ctx->f32, "");
+
+ if (bitsize == 32)
+ return result;
+
+ return LLVMBuildFPExt(ctx->builder, result, ctx->f64, "");
}
static LLVMValueRef emit_f2b(struct ac_llvm_context *ctx,
const LLVMValueRef srcs[3])
{
LLVMValueRef result;
- LLVMValueRef icond = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], LLVMConstInt(ctx->i32, 32, false), "");
- result = ac_build_bfe(ctx, srcs[0], srcs[1], srcs[2], is_signed);
- result = LLVMBuildSelect(ctx->builder, icond, srcs[0], result, "");
+ if (HAVE_LLVM >= 0x0800) {
+ LLVMValueRef icond = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], LLVMConstInt(ctx->i32, 32, false), "");
+ result = ac_build_bfe(ctx, srcs[0], srcs[1], srcs[2], is_signed);
+ result = LLVMBuildSelect(ctx->builder, icond, srcs[0], result, "");
+ } else {
+ /* FIXME: LLVM 7+ returns incorrect result when count is 0.
+ * https://bugs.freedesktop.org/show_bug.cgi?id=107276
+ */
+ LLVMValueRef zero = ctx->i32_0;
+ LLVMValueRef icond1 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], LLVMConstInt(ctx->i32, 32, false), "");
+ LLVMValueRef icond2 = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], zero, "");
+
+ result = ac_build_bfe(ctx, srcs[0], srcs[1], srcs[2], is_signed);
+ result = LLVMBuildSelect(ctx->builder, icond1, srcs[0], result, "");
+ result = LLVMBuildSelect(ctx->builder, icond2, zero, result, "");
+ }
+
return result;
}
comp[0] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_0, "");
comp[1] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_1, "");
- return ac_build_cvt_pkrtz_f16(ctx, comp);
+ return LLVMBuildBitCast(ctx->builder, ac_build_cvt_pkrtz_f16(ctx, comp),
+ ctx->i32, "");
}
static LLVMValueRef emit_unpack_half_2x16(struct ac_llvm_context *ctx,
LLVMValueRef src0)
{
LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
- LLVMValueRef temps[2], result, val;
+ LLVMValueRef temps[2], val;
int i;
for (i = 0; i < 2; i++) {
val = LLVMBuildBitCast(ctx->builder, val, ctx->f16, "");
temps[i] = LLVMBuildFPExt(ctx->builder, val, ctx->f32, "");
}
-
- result = LLVMBuildInsertElement(ctx->builder, LLVMGetUndef(ctx->v2f32), temps[0],
- ctx->i32_0, "");
- result = LLVMBuildInsertElement(ctx->builder, result, temps[1],
- ctx->i32_1, "");
- return result;
+ return ac_build_gather_values(ctx, temps, 2);
}
static LLVMValueRef emit_ddxy(struct ac_nir_context *ctx,
LLVMTypeOf(src[0]), ""),
"");
break;
- case nir_op_ilt:
+ case nir_op_ilt32:
result = emit_int_cmp(&ctx->ac, LLVMIntSLT, src[0], src[1]);
break;
- case nir_op_ine:
+ case nir_op_ine32:
result = emit_int_cmp(&ctx->ac, LLVMIntNE, src[0], src[1]);
break;
- case nir_op_ieq:
+ case nir_op_ieq32:
result = emit_int_cmp(&ctx->ac, LLVMIntEQ, src[0], src[1]);
break;
- case nir_op_ige:
+ case nir_op_ige32:
result = emit_int_cmp(&ctx->ac, LLVMIntSGE, src[0], src[1]);
break;
- case nir_op_ult:
+ case nir_op_ult32:
result = emit_int_cmp(&ctx->ac, LLVMIntULT, src[0], src[1]);
break;
- case nir_op_uge:
+ case nir_op_uge32:
result = emit_int_cmp(&ctx->ac, LLVMIntUGE, src[0], src[1]);
break;
- case nir_op_feq:
+ case nir_op_feq32:
result = emit_float_cmp(&ctx->ac, LLVMRealOEQ, src[0], src[1]);
break;
- case nir_op_fne:
+ case nir_op_fne32:
result = emit_float_cmp(&ctx->ac, LLVMRealUNE, src[0], src[1]);
break;
- case nir_op_flt:
+ case nir_op_flt32:
result = emit_float_cmp(&ctx->ac, LLVMRealOLT, src[0], src[1]);
break;
- case nir_op_fge:
+ case nir_op_fge32:
result = emit_float_cmp(&ctx->ac, LLVMRealOGE, src[0], src[1]);
break;
case nir_op_fabs:
result = ac_build_fdiv(&ctx->ac, instr->dest.dest.ssa.bit_size == 32 ? ctx->ac.f32_1 : ctx->ac.f64_1,
result);
break;
+ case nir_op_frexp_exp:
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.frexp.exp.i32.f64",
+ ctx->ac.i32, src, 1, AC_FUNC_ATTR_READNONE);
+
+ break;
+ case nir_op_frexp_sig:
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.frexp.mant.f64",
+ ctx->ac.f64, src, 1, AC_FUNC_ATTR_READNONE);
+ break;
case nir_op_fmax:
result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum",
ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
result = emit_bitfield_insert(&ctx->ac, src[0], src[1], src[2], src[3]);
break;
case nir_op_bitfield_reverse:
- result = ac_build_intrinsic(&ctx->ac, "llvm.bitreverse.i32", ctx->ac.i32, src, 1, AC_FUNC_ATTR_READNONE);
+ result = ac_build_bitfield_reverse(&ctx->ac, src[0]);
break;
case nir_op_bit_count:
- if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) == 32)
- result = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i32", ctx->ac.i32, src, 1, AC_FUNC_ATTR_READNONE);
- else {
- result = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i64", ctx->ac.i64, src, 1, AC_FUNC_ATTR_READNONE);
- result = LLVMBuildTrunc(ctx->ac.builder, result, ctx->ac.i32, "");
- }
+ result = ac_build_bit_count(&ctx->ac, src[0]);
break;
case nir_op_vec2:
case nir_op_vec3:
src[i] = ac_to_integer(&ctx->ac, src[i]);
result = ac_build_gather_values(&ctx->ac, src, num_components);
break;
+ case nir_op_f2i16:
case nir_op_f2i32:
case nir_op_f2i64:
src[0] = ac_to_float(&ctx->ac, src[0]);
result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
break;
+ case nir_op_f2u16:
case nir_op_f2u32:
case nir_op_f2u64:
src[0] = ac_to_float(&ctx->ac, src[0]);
result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
break;
+ case nir_op_i2f16:
case nir_op_i2f32:
case nir_op_i2f64:
src[0] = ac_to_integer(&ctx->ac, src[0]);
result = LLVMBuildSIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
break;
+ case nir_op_u2f16:
case nir_op_u2f32:
case nir_op_u2f64:
src[0] = ac_to_integer(&ctx->ac, src[0]);
result = LLVMBuildUIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
break;
- case nir_op_f2f64:
+ case nir_op_f2f16_rtz:
src[0] = ac_to_float(&ctx->ac, src[0]);
- result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
+ LLVMValueRef param[2] = { src[0], ctx->ac.f32_0 };
+ result = ac_build_cvt_pkrtz_f16(&ctx->ac, param);
+ result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
break;
+ case nir_op_f2f16_rtne:
+ case nir_op_f2f16:
case nir_op_f2f32:
+ case nir_op_f2f64:
src[0] = ac_to_float(&ctx->ac, src[0]);
- result = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
+ if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < ac_get_elem_bits(&ctx->ac, def_type))
+ result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
+ else
+ result = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
break;
+ case nir_op_u2u16:
case nir_op_u2u32:
case nir_op_u2u64:
src[0] = ac_to_integer(&ctx->ac, src[0]);
else
result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
break;
+ case nir_op_i2i16:
case nir_op_i2i32:
case nir_op_i2i64:
src[0] = ac_to_integer(&ctx->ac, src[0]);
else
result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
break;
- case nir_op_bcsel:
+ case nir_op_b32csel:
result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
break;
case nir_op_find_lsb:
src[1] = ac_to_integer(&ctx->ac, src[1]);
result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
break;
- case nir_op_b2f:
- result = emit_b2f(&ctx->ac, src[0]);
+ case nir_op_b2f16:
+ case nir_op_b2f32:
+ case nir_op_b2f64:
+ result = emit_b2f(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
break;
- case nir_op_f2b:
+ case nir_op_f2b32:
result = emit_f2b(&ctx->ac, src[0]);
break;
- case nir_op_b2i:
+ case nir_op_b2i16:
+ case nir_op_b2i32:
+ case nir_op_b2i64:
result = emit_b2i(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
break;
- case nir_op_i2b:
+ case nir_op_i2b32:
src[0] = ac_to_integer(&ctx->ac, src[0]);
result = emit_i2b(&ctx->ac, src[0]);
break;
case nir_op_pack_64_2x32_split: {
LLVMValueRef tmp = LLVMGetUndef(ctx->ac.v2i32);
- tmp = LLVMBuildInsertElement(ctx->ac.builder, tmp,
- src[0], ctx->ac.i32_0, "");
- tmp = LLVMBuildInsertElement(ctx->ac.builder, tmp,
- src[1], ctx->ac.i32_1, "");
+ tmp = ac_build_gather_values(&ctx->ac, src, 2);
result = LLVMBuildBitCast(ctx->ac.builder, tmp, ctx->ac.i64, "");
break;
}
break;
}
+ case nir_op_fmin3:
+ result = emit_intrin_2f_param(&ctx->ac, "llvm.minnum",
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
+ result = emit_intrin_2f_param(&ctx->ac, "llvm.minnum",
+ ac_to_float_type(&ctx->ac, def_type), result, src[2]);
+ break;
+ case nir_op_umin3:
+ result = emit_minmax_int(&ctx->ac, LLVMIntULT, src[0], src[1]);
+ result = emit_minmax_int(&ctx->ac, LLVMIntULT, result, src[2]);
+ break;
+ case nir_op_imin3:
+ result = emit_minmax_int(&ctx->ac, LLVMIntSLT, src[0], src[1]);
+ result = emit_minmax_int(&ctx->ac, LLVMIntSLT, result, src[2]);
+ break;
+ case nir_op_fmax3:
+ result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum",
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
+ result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum",
+ ac_to_float_type(&ctx->ac, def_type), result, src[2]);
+ break;
+ case nir_op_umax3:
+ result = emit_minmax_int(&ctx->ac, LLVMIntUGT, src[0], src[1]);
+ result = emit_minmax_int(&ctx->ac, LLVMIntUGT, result, src[2]);
+ break;
+ case nir_op_imax3:
+ result = emit_minmax_int(&ctx->ac, LLVMIntSGT, src[0], src[1]);
+ result = emit_minmax_int(&ctx->ac, LLVMIntSGT, result, src[2]);
+ break;
+ case nir_op_fmed3: {
+ LLVMValueRef tmp1 = emit_intrin_2f_param(&ctx->ac, "llvm.minnum",
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
+ LLVMValueRef tmp2 = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum",
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
+ tmp2 = emit_intrin_2f_param(&ctx->ac, "llvm.minnum",
+ ac_to_float_type(&ctx->ac, def_type), tmp2, src[2]);
+ result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum",
+ ac_to_float_type(&ctx->ac, def_type), tmp1, tmp2);
+ break;
+ }
+ case nir_op_imed3: {
+ LLVMValueRef tmp1 = emit_minmax_int(&ctx->ac, LLVMIntSLT, src[0], src[1]);
+ LLVMValueRef tmp2 = emit_minmax_int(&ctx->ac, LLVMIntSGT, src[0], src[1]);
+ tmp2 = emit_minmax_int(&ctx->ac, LLVMIntSLT, tmp2, src[2]);
+ result = emit_minmax_int(&ctx->ac, LLVMIntSGT, tmp1, tmp2);
+ break;
+ }
+ case nir_op_umed3: {
+ LLVMValueRef tmp1 = emit_minmax_int(&ctx->ac, LLVMIntULT, src[0], src[1]);
+ LLVMValueRef tmp2 = emit_minmax_int(&ctx->ac, LLVMIntUGT, src[0], src[1]);
+ tmp2 = emit_minmax_int(&ctx->ac, LLVMIntULT, tmp2, src[2]);
+ result = emit_minmax_int(&ctx->ac, LLVMIntUGT, tmp1, tmp2);
+ break;
+ }
+
default:
fprintf(stderr, "Unknown NIR alu instr: ");
nir_print_instr(&instr->instr, stderr);
if (result) {
assert(instr->dest.dest.is_ssa);
- result = ac_to_integer(&ctx->ac, result);
- _mesa_hash_table_insert(ctx->defs, &instr->dest.dest.ssa,
- result);
+ result = ac_to_integer_or_pointer(&ctx->ac, result);
+ ctx->ssa_defs[instr->dest.dest.ssa.index] = result;
}
}
for (unsigned i = 0; i < instr->def.num_components; ++i) {
switch (instr->def.bit_size) {
+ case 16:
+ values[i] = LLVMConstInt(element_type,
+ instr->value.u16[i], false);
+ break;
case 32:
values[i] = LLVMConstInt(element_type,
instr->value.u32[i], false);
} else
value = values[0];
- _mesa_hash_table_insert(ctx->defs, &instr->def, value);
+ ctx->ssa_defs[instr->def.index] = value;
}
static LLVMValueRef
return size;
}
-/**
- * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
- * intrinsic names).
- */
-static void build_int_type_name(
- LLVMTypeRef type,
- char *buf, unsigned bufsize)
-{
- assert(bufsize >= 6);
-
- if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
- snprintf(buf, bufsize, "v%ui32",
- LLVMGetVectorSize(type));
- else
- strcpy(buf, "i32");
-}
-
static LLVMValueRef lower_gather4_integer(struct ac_llvm_context *ctx,
+ nir_variable *var,
struct ac_image_args *args,
const nir_tex_instr *instr)
{
- enum glsl_base_type stype = glsl_get_sampler_result_type(instr->texture->var->type);
- LLVMValueRef coord = args->addr;
+ const struct glsl_type *type = glsl_without_array(var->type);
+ enum glsl_base_type stype = glsl_get_sampler_result_type(type);
LLVMValueRef half_texel[2];
LLVMValueRef compare_cube_wa = NULL;
LLVMValueRef result;
- int c;
- unsigned coord_vgpr_index = (unsigned)args->offset + (unsigned)args->compare;
//TODO Rect
{
struct ac_image_args txq_args = { 0 };
- txq_args.da = instr->is_array || instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
+ txq_args.dim = get_ac_sampler_dim(ctx, instr->sampler_dim, instr->is_array);
txq_args.opcode = ac_image_get_resinfo;
txq_args.dmask = 0xf;
- txq_args.addr = ctx->i32_0;
+ txq_args.lod = ctx->i32_0;
txq_args.resource = args->resource;
+ txq_args.attributes = AC_FUNC_ATTR_READNONE;
LLVMValueRef size = ac_build_image_opcode(ctx, &txq_args);
- for (c = 0; c < 2; c++) {
+ for (unsigned c = 0; c < 2; c++) {
half_texel[c] = LLVMBuildExtractElement(ctx->builder, size,
LLVMConstInt(ctx->i32, c, false), "");
half_texel[c] = LLVMBuildUIToFP(ctx->builder, half_texel[c], ctx->f32, "");
}
}
- LLVMValueRef orig_coords = args->addr;
+ LLVMValueRef orig_coords[2] = { args->coords[0], args->coords[1] };
- for (c = 0; c < 2; c++) {
+ for (unsigned c = 0; c < 2; c++) {
LLVMValueRef tmp;
- LLVMValueRef index = LLVMConstInt(ctx->i32, coord_vgpr_index + c, 0);
- tmp = LLVMBuildExtractElement(ctx->builder, coord, index, "");
- tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->f32, "");
- tmp = LLVMBuildFAdd(ctx->builder, tmp, half_texel[c], "");
- tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->i32, "");
- coord = LLVMBuildInsertElement(ctx->builder, coord, tmp, index, "");
+ tmp = LLVMBuildBitCast(ctx->builder, args->coords[c], ctx->f32, "");
+ args->coords[c] = LLVMBuildFAdd(ctx->builder, tmp, half_texel[c], "");
}
-
/*
* Apparantly cube has issue with integer types that the workaround doesn't solve,
* so this tests if the format is 8_8_8_8 and an integer type do an alternate
args->resource = LLVMBuildInsertElement(ctx->builder, args->resource, tmp2, ctx->i32_1, "");
/* don't modify the coordinates for this case */
- coord = LLVMBuildSelect(ctx->builder, compare_cube_wa, orig_coords, coord, "");
+ for (unsigned c = 0; c < 2; ++c)
+ args->coords[c] = LLVMBuildSelect(
+ ctx->builder, compare_cube_wa,
+ orig_coords[c], args->coords[c], "");
}
- args->addr = coord;
+
+ args->attributes = AC_FUNC_ATTR_READNONE;
result = ac_build_image_opcode(ctx, args);
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
LLVMValueRef tmp, tmp2;
/* if the cube workaround is in place, f2i the result. */
- for (c = 0; c < 4; c++) {
+ for (unsigned c = 0; c < 4; c++) {
tmp = LLVMBuildExtractElement(ctx->builder, result, LLVMConstInt(ctx->i32, c, false), "");
if (stype == GLSL_TYPE_UINT)
tmp2 = LLVMBuildFPToUI(ctx->builder, tmp, ctx->i32, "");
return result;
}
+static nir_deref_instr *get_tex_texture_deref(const nir_tex_instr *instr)
+{
+ nir_deref_instr *texture_deref_instr = NULL;
+
+ for (unsigned i = 0; i < instr->num_srcs; i++) {
+ switch (instr->src[i].src_type) {
+ case nir_tex_src_texture_deref:
+ texture_deref_instr = nir_src_as_deref(instr->src[i].src);
+ break;
+ default:
+ break;
+ }
+ }
+ return texture_deref_instr;
+}
+
static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx,
const nir_tex_instr *instr,
- bool lod_is_zero,
struct ac_image_args *args)
{
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
- return ac_build_buffer_load_format(&ctx->ac,
- args->resource,
- args->addr,
- ctx->ac.i32_0,
- util_last_bit(mask),
- false, true);
+ if (ctx->abi->gfx9_stride_size_workaround) {
+ return ac_build_buffer_load_format_gfx9_safe(&ctx->ac,
+ args->resource,
+ args->coords[0],
+ ctx->ac.i32_0,
+ util_last_bit(mask),
+ false, true);
+ } else {
+ return ac_build_buffer_load_format(&ctx->ac,
+ args->resource,
+ args->coords[0],
+ ctx->ac.i32_0,
+ util_last_bit(mask),
+ false, true);
+ }
}
args->opcode = ac_image_sample;
- args->compare = instr->is_shadow;
switch (instr->op) {
case nir_texop_txf:
case nir_texop_txf_ms:
case nir_texop_samples_identical:
- args->opcode = lod_is_zero ||
+ args->opcode = args->level_zero ||
instr->sampler_dim == GLSL_SAMPLER_DIM_MS ?
ac_image_load : ac_image_load_mip;
- args->compare = false;
- args->offset = false;
- break;
- case nir_texop_txb:
- args->bias = true;
- break;
- case nir_texop_txl:
- if (lod_is_zero)
- args->level_zero = true;
- else
- args->lod = true;
+ args->level_zero = false;
break;
case nir_texop_txs:
case nir_texop_query_levels:
args->opcode = ac_image_get_resinfo;
+ if (!args->lod)
+ args->lod = ctx->ac.i32_0;
+ args->level_zero = false;
break;
case nir_texop_tex:
- if (ctx->stage != MESA_SHADER_FRAGMENT)
+ if (ctx->stage != MESA_SHADER_FRAGMENT) {
+ assert(!args->lod);
args->level_zero = true;
- break;
- case nir_texop_txd:
- args->deriv = true;
+ }
break;
case nir_texop_tg4:
args->opcode = ac_image_gather4;
break;
case nir_texop_lod:
args->opcode = ac_image_get_lod;
- args->compare = false;
- args->offset = false;
break;
default:
break;
}
if (instr->op == nir_texop_tg4 && ctx->ac.chip_class <= VI) {
- enum glsl_base_type stype = glsl_get_sampler_result_type(instr->texture->var->type);
+ nir_deref_instr *texture_deref_instr = get_tex_texture_deref(instr);
+ nir_variable *var = nir_deref_instr_get_variable(texture_deref_instr);
+ const struct glsl_type *type = glsl_without_array(var->type);
+ enum glsl_base_type stype = glsl_get_sampler_result_type(type);
if (stype == GLSL_TYPE_UINT || stype == GLSL_TYPE_INT) {
- return lower_gather4_integer(&ctx->ac, args, instr);
+ return lower_gather4_integer(&ctx->ac, var, args, instr);
+ }
+ }
+
+ /* Fixup for GFX9 which allocates 1D textures as 2D. */
+ if (instr->op == nir_texop_lod && ctx->ac.chip_class >= GFX9) {
+ if ((args->dim == ac_image_2darray ||
+ args->dim == ac_image_2d) && !args->coords[1]) {
+ args->coords[1] = ctx->ac.i32_0;
}
}
+
+ args->attributes = AC_FUNC_ATTR_READNONE;
return ac_build_image_opcode(&ctx->ac, args);
}
get_src(ctx, instr->src[0]), "");
ptr = ac_build_gep0(&ctx->ac, ctx->abi->push_constants, addr);
+
+ if (instr->dest.ssa.bit_size == 16) {
+ unsigned load_dwords = instr->dest.ssa.num_components / 2 + 1;
+ LLVMTypeRef vec_type = LLVMVectorType(LLVMInt16TypeInContext(ctx->ac.context), 2 * load_dwords);
+ ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
+ LLVMValueRef res = LLVMBuildLoad(ctx->ac.builder, ptr, "");
+ res = LLVMBuildBitCast(ctx->ac.builder, res, vec_type, "");
+ LLVMValueRef cond = LLVMBuildLShr(ctx->ac.builder, addr, ctx->ac.i32_1, "");
+ cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->ac.i1, "");
+ LLVMValueRef mask[] = { LLVMConstInt(ctx->ac.i32, 0, false), LLVMConstInt(ctx->ac.i32, 1, false),
+ LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
+ LLVMConstInt(ctx->ac.i32, 4, false)};
+ LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->dest.ssa.num_components);
+ LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->dest.ssa.num_components);
+ LLVMValueRef shuffle_aligned = LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_aligned, "");
+ LLVMValueRef shuffle_unaligned = LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_unaligned, "");
+ res = LLVMBuildSelect(ctx->ac.builder, cond, shuffle_unaligned, shuffle_aligned, "");
+ return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "");
+ }
+
ptr = ac_cast_ptr(&ctx->ac, ptr, get_def_type(ctx, &instr->dest.ssa));
return LLVMBuildLoad(ctx->ac.builder, ptr, "");
static LLVMValueRef extract_vector_range(struct ac_llvm_context *ctx, LLVMValueRef src,
unsigned start, unsigned count)
{
- LLVMTypeRef type = LLVMTypeOf(src);
+ LLVMValueRef mask[] = {
+ ctx->i32_0, ctx->i32_1,
+ LLVMConstInt(ctx->i32, 2, false), LLVMConstInt(ctx->i32, 3, false) };
- if (LLVMGetTypeKind(type) != LLVMVectorTypeKind) {
+ unsigned src_elements = ac_get_llvm_num_components(src);
+
+ if (count == src_elements) {
assert(start == 0);
- assert(count == 1);
return src;
+ } else if (count == 1) {
+ assert(start < src_elements);
+ return LLVMBuildExtractElement(ctx->builder, src, mask[start], "");
+ } else {
+ assert(start + count <= src_elements);
+ assert(count <= 4);
+ LLVMValueRef swizzle = LLVMConstVector(&mask[start], count);
+ return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
}
-
- unsigned src_elements = LLVMGetVectorSize(type);
- assert(start < src_elements);
- assert(start + count <= src_elements);
-
- if (start == 0 && count == src_elements)
- return src;
-
- if (count == 1)
- return LLVMBuildExtractElement(ctx->builder, src, LLVMConstInt(ctx->i32, start, false), "");
-
- assert(count <= 8);
- LLVMValueRef indices[8];
- for (unsigned i = 0; i < count; ++i)
- indices[i] = LLVMConstInt(ctx->i32, start + i, false);
-
- LLVMValueRef swizzle = LLVMConstVector(indices, count);
- return LLVMBuildShuffleVector(ctx->builder, src, src, swizzle, "");
}
static void visit_store_ssbo(struct ac_nir_context *ctx,
{
const char *store_name;
LLVMValueRef src_data = get_src(ctx, instr->src[0]);
- LLVMTypeRef data_type = ctx->ac.f32;
- int elem_size_mult = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 32;
- int components_32bit = elem_size_mult * instr->num_components;
+ int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8;
unsigned writemask = nir_intrinsic_write_mask(instr);
- LLVMValueRef base_data, base_offset;
- LLVMValueRef params[6];
-
- params[1] = ctx->abi->load_ssbo(ctx->abi,
- get_src(ctx, instr->src[1]), true);
- params[2] = ctx->ac.i32_0; /* vindex */
- params[4] = ctx->ac.i1false; /* glc */
- params[5] = ctx->ac.i1false; /* slc */
-
- if (components_32bit > 1)
- data_type = LLVMVectorType(ctx->ac.f32, components_32bit);
+ enum gl_access_qualifier access = nir_intrinsic_access(instr);
+ LLVMValueRef glc = ctx->ac.i1false;
- writemask = widen_mask(writemask, elem_size_mult);
+ if (access & (ACCESS_VOLATILE | ACCESS_COHERENT))
+ glc = ctx->ac.i1true;
- base_data = ac_to_float(&ctx->ac, src_data);
+ LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi,
+ get_src(ctx, instr->src[1]), true);
+ LLVMValueRef base_data = ac_to_float(&ctx->ac, src_data);
base_data = ac_trim_vector(&ctx->ac, base_data, instr->num_components);
- base_data = LLVMBuildBitCast(ctx->ac.builder, base_data,
- data_type, "");
- base_offset = get_src(ctx, instr->src[2]); /* voffset */
+ LLVMValueRef base_offset = get_src(ctx, instr->src[2]);
+
while (writemask) {
int start, count;
- LLVMValueRef data;
- LLVMValueRef offset;
+ LLVMValueRef data, offset;
+ LLVMTypeRef data_type;
u_bit_scan_consecutive_range(&writemask, &start, &count);
writemask |= 1 << (start + 2);
count = 2;
}
+ int num_bytes = count * elem_size_bytes; /* count in bytes */
- if (count > 4) {
- writemask |= ((1u << (count - 4)) - 1u) << (start + 4);
- count = 4;
+ /* we can only store 4 DWords at the same time.
+ * can only happen for 64 Bit vectors. */
+ if (num_bytes > 16) {
+ writemask |= ((1u << (count - 2)) - 1u) << (start + 2);
+ count = 2;
+ num_bytes = 16;
}
- if (count == 4) {
- store_name = "llvm.amdgcn.buffer.store.v4f32";
- } else if (count == 2) {
- store_name = "llvm.amdgcn.buffer.store.v2f32";
-
- } else {
- assert(count == 1);
- store_name = "llvm.amdgcn.buffer.store.f32";
+ /* check alignment of 16 Bit stores */
+ if (elem_size_bytes == 2 && num_bytes > 2 && (start % 2) == 1) {
+ writemask |= ((1u << (count - 1)) - 1u) << (start + 1);
+ count = 1;
+ num_bytes = 2;
}
data = extract_vector_range(&ctx->ac, base_data, start, count);
- offset = base_offset;
- if (start != 0) {
- offset = LLVMBuildAdd(ctx->ac.builder, offset, LLVMConstInt(ctx->ac.i32, start * 4, false), "");
+ if (start == 0) {
+ offset = base_offset;
+ } else {
+ offset = LLVMBuildAdd(ctx->ac.builder, base_offset,
+ LLVMConstInt(ctx->ac.i32, start * elem_size_bytes, false), "");
+ }
+ if (num_bytes == 2) {
+ store_name = "llvm.amdgcn.tbuffer.store.i32";
+ data_type = ctx->ac.i32;
+ LLVMValueRef tbuffer_params[] = {
+ data,
+ rsrc,
+ ctx->ac.i32_0, /* vindex */
+ offset, /* voffset */
+ ctx->ac.i32_0,
+ ctx->ac.i32_0,
+ LLVMConstInt(ctx->ac.i32, 2, false), // dfmt (= 16bit)
+ LLVMConstInt(ctx->ac.i32, 4, false), // nfmt (= uint)
+ glc,
+ ctx->ac.i1false,
+ };
+ ac_build_intrinsic(&ctx->ac, store_name,
+ ctx->ac.voidt, tbuffer_params, 10, 0);
+ } else {
+ switch (num_bytes) {
+ case 16: /* v4f32 */
+ store_name = "llvm.amdgcn.buffer.store.v4f32";
+ data_type = ctx->ac.v4f32;
+ break;
+ case 8: /* v2f32 */
+ store_name = "llvm.amdgcn.buffer.store.v2f32";
+ data_type = ctx->ac.v2f32;
+ break;
+ case 4: /* f32 */
+ store_name = "llvm.amdgcn.buffer.store.f32";
+ data_type = ctx->ac.f32;
+ break;
+ default:
+ unreachable("Malformed vector store.");
+ }
+ data = LLVMBuildBitCast(ctx->ac.builder, data, data_type, "");
+ LLVMValueRef params[] = {
+ data,
+ rsrc,
+ ctx->ac.i32_0, /* vindex */
+ offset,
+ glc,
+ ctx->ac.i1false, /* slc */
+ };
+ ac_build_intrinsic(&ctx->ac, store_name,
+ ctx->ac.voidt, params, 6, 0);
}
- params[0] = data;
- params[3] = offset;
- ac_build_intrinsic(&ctx->ac, store_name,
- ctx->ac.voidt, params, 6, 0);
}
}
true);
params[arg_count++] = ctx->ac.i32_0; /* vindex */
params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
- params[arg_count++] = LLVMConstInt(ctx->ac.i1, 0, false); /* slc */
+ params[arg_count++] = ctx->ac.i1false; /* slc */
switch (instr->intrinsic) {
case nir_intrinsic_ssbo_atomic_add:
static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr)
{
- LLVMValueRef results[2];
- int load_components;
+ int elem_size_bytes = instr->dest.ssa.bit_size / 8;
int num_components = instr->num_components;
- if (instr->dest.ssa.bit_size == 64)
- num_components *= 2;
+ enum gl_access_qualifier access = nir_intrinsic_access(instr);
+ LLVMValueRef glc = ctx->ac.i1false;
- for (int i = 0; i < num_components; i += load_components) {
- load_components = MIN2(num_components - i, 4);
- const char *load_name;
- LLVMTypeRef data_type = ctx->ac.f32;
- LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, i * 4, false);
- offset = LLVMBuildAdd(ctx->ac.builder, get_src(ctx, instr->src[1]), offset, "");
-
- if (load_components == 3)
- data_type = LLVMVectorType(ctx->ac.f32, 4);
- else if (load_components > 1)
- data_type = LLVMVectorType(ctx->ac.f32, load_components);
-
- if (load_components >= 3)
- load_name = "llvm.amdgcn.buffer.load.v4f32";
- else if (load_components == 2)
- load_name = "llvm.amdgcn.buffer.load.v2f32";
- else if (load_components == 1)
- load_name = "llvm.amdgcn.buffer.load.f32";
- else
- unreachable("unhandled number of components");
-
- LLVMValueRef params[] = {
- ctx->abi->load_ssbo(ctx->abi,
- get_src(ctx, instr->src[0]),
- false),
- ctx->ac.i32_0,
- offset,
- ctx->ac.i1false,
- ctx->ac.i1false,
- };
+ if (access & (ACCESS_VOLATILE | ACCESS_COHERENT))
+ glc = ctx->ac.i1true;
- results[i > 0 ? 1 : 0] = ac_build_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
- }
+ LLVMValueRef offset = get_src(ctx, instr->src[1]);
+ LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi,
+ get_src(ctx, instr->src[0]), false);
+ LLVMValueRef vindex = ctx->ac.i32_0;
+
+ LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.ssa);
+ LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
+
+ LLVMValueRef results[4];
+ for (int i = 0; i < num_components;) {
+ int num_elems = num_components - i;
+ if (elem_size_bytes < 4 && nir_intrinsic_align(instr) % 4 != 0)
+ num_elems = 1;
+ if (num_elems * elem_size_bytes > 16)
+ num_elems = 16 / elem_size_bytes;
+ int load_bytes = num_elems * elem_size_bytes;
+
+ LLVMValueRef immoffset = LLVMConstInt(ctx->ac.i32, i * elem_size_bytes, false);
+
+ LLVMValueRef ret;
+ if (load_bytes == 2) {
+ ret = ac_build_tbuffer_load_short(&ctx->ac,
+ rsrc,
+ vindex,
+ offset,
+ ctx->ac.i32_0,
+ immoffset,
+ glc);
+ } else {
+ const char *load_name;
+ LLVMTypeRef data_type;
+ switch (load_bytes) {
+ case 16:
+ case 12:
+ load_name = "llvm.amdgcn.buffer.load.v4f32";
+ data_type = ctx->ac.v4f32;
+ break;
+ case 8:
+ case 6:
+ load_name = "llvm.amdgcn.buffer.load.v2f32";
+ data_type = ctx->ac.v2f32;
+ break;
+ case 4:
+ load_name = "llvm.amdgcn.buffer.load.f32";
+ data_type = ctx->ac.f32;
+ break;
+ default:
+ unreachable("Malformed load buffer.");
+ }
+ LLVMValueRef params[] = {
+ rsrc,
+ vindex,
+ LLVMBuildAdd(ctx->ac.builder, offset, immoffset, ""),
+ glc,
+ ctx->ac.i1false,
+ };
+ ret = ac_build_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
+ }
- assume(results[0]);
- LLVMValueRef ret = results[0];
- if (num_components > 4 || num_components == 3) {
- LLVMValueRef masks[] = {
- LLVMConstInt(ctx->ac.i32, 0, false), LLVMConstInt(ctx->ac.i32, 1, false),
- LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
- LLVMConstInt(ctx->ac.i32, 4, false), LLVMConstInt(ctx->ac.i32, 5, false),
- LLVMConstInt(ctx->ac.i32, 6, false), LLVMConstInt(ctx->ac.i32, 7, false)
- };
+ LLVMTypeRef byte_vec = LLVMVectorType(ctx->ac.i8, ac_get_type_size(LLVMTypeOf(ret)));
+ ret = LLVMBuildBitCast(ctx->ac.builder, ret, byte_vec, "");
+ ret = ac_trim_vector(&ctx->ac, ret, load_bytes);
+
+ LLVMTypeRef ret_type = LLVMVectorType(def_elem_type, num_elems);
+ ret = LLVMBuildBitCast(ctx->ac.builder, ret, ret_type, "");
- LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
- ret = LLVMBuildShuffleVector(ctx->ac.builder, results[0],
- results[num_components > 4 ? 1 : 0], swizzle, "");
+ for (unsigned j = 0; j < num_elems; j++) {
+ results[i + j] = LLVMBuildExtractElement(ctx->ac.builder, ret, LLVMConstInt(ctx->ac.i32, j, false), "");
+ }
+ i += num_elems;
}
- return LLVMBuildBitCast(ctx->ac.builder, ret,
- get_def_type(ctx, &instr->dest.ssa), "");
+ return ac_build_gather_values(&ctx->ac, results, num_components);
}
static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx,
if (instr->dest.ssa.bit_size == 64)
num_components *= 2;
- ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset,
- NULL, 0, false, false, true, true);
- ret = ac_trim_vector(&ctx->ac, ret, num_components);
+ if (instr->dest.ssa.bit_size == 16) {
+ LLVMValueRef results[num_components];
+ for (unsigned i = 0; i < num_components; ++i) {
+ results[i] = ac_build_tbuffer_load_short(&ctx->ac,
+ rsrc,
+ ctx->ac.i32_0,
+ offset,
+ ctx->ac.i32_0,
+ LLVMConstInt(ctx->ac.i32, 2 * i, 0),
+ ctx->ac.i1false);
+ }
+ ret = ac_build_gather_values(&ctx->ac, results, num_components);
+ } else {
+ ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset,
+ NULL, 0, false, false, true, true);
+
+ ret = ac_trim_vector(&ctx->ac, ret, num_components);
+ }
+
return LLVMBuildBitCast(ctx->ac.builder, ret,
get_def_type(ctx, &instr->dest.ssa), "");
}
static void
-get_deref_offset(struct ac_nir_context *ctx, nir_deref_var *deref,
- bool vs_in, unsigned *vertex_index_out,
- LLVMValueRef *vertex_index_ref,
- unsigned *const_out, LLVMValueRef *indir_out)
+get_deref_offset(struct ac_nir_context *ctx, nir_deref_instr *instr,
+ bool vs_in, unsigned *vertex_index_out,
+ LLVMValueRef *vertex_index_ref,
+ unsigned *const_out, LLVMValueRef *indir_out)
{
- unsigned const_offset = 0;
- nir_deref *tail = &deref->deref;
- LLVMValueRef offset = NULL;
+ nir_variable *var = nir_deref_instr_get_variable(instr);
+ nir_deref_path path;
+ unsigned idx_lvl = 1;
- if (vertex_index_out != NULL || vertex_index_ref != NULL) {
- tail = tail->child;
- nir_deref_array *deref_array = nir_deref_as_array(tail);
- if (vertex_index_out)
- *vertex_index_out = deref_array->base_offset;
+ nir_deref_path_init(&path, instr, NULL);
+ if (vertex_index_out != NULL || vertex_index_ref != NULL) {
if (vertex_index_ref) {
- LLVMValueRef vtx = LLVMConstInt(ctx->ac.i32, deref_array->base_offset, false);
- if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
- vtx = LLVMBuildAdd(ctx->ac.builder, vtx, get_src(ctx, deref_array->indirect), "");
- }
- *vertex_index_ref = vtx;
+ *vertex_index_ref = get_src(ctx, path.path[idx_lvl]->arr.index);
+ if (vertex_index_out)
+ *vertex_index_out = 0;
+ } else {
+ nir_const_value *v = nir_src_as_const_value(path.path[idx_lvl]->arr.index);
+ assert(v);
+ *vertex_index_out = v->u32[0];
}
+ ++idx_lvl;
}
- if (deref->var->data.compact) {
- assert(tail->child->deref_type == nir_deref_type_array);
- assert(glsl_type_is_scalar(glsl_without_array(deref->var->type)));
- nir_deref_array *deref_array = nir_deref_as_array(tail->child);
- /* We always lower indirect dereferences for "compact" array vars. */
- assert(deref_array->deref_array_type == nir_deref_array_type_direct);
+ uint32_t const_offset = 0;
+ LLVMValueRef offset = NULL;
- const_offset = deref_array->base_offset;
+ if (var->data.compact) {
+ assert(instr->deref_type == nir_deref_type_array);
+ nir_const_value *v = nir_src_as_const_value(instr->arr.index);
+ assert(v);
+ const_offset = v->u32[0];
goto out;
}
- while (tail->child != NULL) {
- const struct glsl_type *parent_type = tail->type;
- tail = tail->child;
-
- if (tail->deref_type == nir_deref_type_array) {
- nir_deref_array *deref_array = nir_deref_as_array(tail);
- LLVMValueRef index, stride, local_offset;
- unsigned size = glsl_count_attribute_slots(tail->type, vs_in);
-
- const_offset += size * deref_array->base_offset;
- if (deref_array->deref_array_type == nir_deref_array_type_direct)
- continue;
-
- assert(deref_array->deref_array_type == nir_deref_array_type_indirect);
- index = get_src(ctx, deref_array->indirect);
- stride = LLVMConstInt(ctx->ac.i32, size, 0);
- local_offset = LLVMBuildMul(ctx->ac.builder, stride, index, "");
-
- if (offset)
- offset = LLVMBuildAdd(ctx->ac.builder, offset, local_offset, "");
- else
- offset = local_offset;
- } else if (tail->deref_type == nir_deref_type_struct) {
- nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
+ for (; path.path[idx_lvl]; ++idx_lvl) {
+ const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
+ if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
+ unsigned index = path.path[idx_lvl]->strct.index;
- for (unsigned i = 0; i < deref_struct->index; i++) {
+ for (unsigned i = 0; i < index; i++) {
const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
const_offset += glsl_count_attribute_slots(ft, vs_in);
}
+ } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
+ unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, vs_in);
+ LLVMValueRef array_off = LLVMBuildMul(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, size, 0),
+ get_src(ctx, path.path[idx_lvl]->arr.index), "");
+ if (offset)
+ offset = LLVMBuildAdd(ctx->ac.builder, offset, array_off, "");
+ else
+ offset = array_off;
} else
- unreachable("unsupported deref type");
-
+ unreachable("Uhandled deref type in get_deref_instr_offset");
}
+
out:
+ nir_deref_path_finish(&path);
+
if (const_offset && offset)
offset = LLVMBuildAdd(ctx->ac.builder, offset,
LLVMConstInt(ctx->ac.i32, const_offset, 0),
*indir_out = offset;
}
-static LLVMValueRef
-build_gep_for_deref(struct ac_nir_context *ctx,
- nir_deref_var *deref)
-{
- struct hash_entry *entry = _mesa_hash_table_search(ctx->vars, deref->var);
- assert(entry->data);
- LLVMValueRef val = entry->data;
- nir_deref *tail = deref->deref.child;
- while (tail != NULL) {
- LLVMValueRef offset;
- switch (tail->deref_type) {
- case nir_deref_type_array: {
- nir_deref_array *array = nir_deref_as_array(tail);
- offset = LLVMConstInt(ctx->ac.i32, array->base_offset, 0);
- if (array->deref_array_type ==
- nir_deref_array_type_indirect) {
- offset = LLVMBuildAdd(ctx->ac.builder, offset,
- get_src(ctx,
- array->indirect),
- "");
- }
- break;
- }
- case nir_deref_type_struct: {
- nir_deref_struct *deref_struct =
- nir_deref_as_struct(tail);
- offset = LLVMConstInt(ctx->ac.i32,
- deref_struct->index, 0);
- break;
- }
- default:
- unreachable("bad deref type");
- }
- val = ac_build_gep0(&ctx->ac, val, offset);
- tail = tail->child;
- }
- return val;
-}
-
static LLVMValueRef load_tess_varyings(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr,
bool load_inputs)
LLVMValueRef vertex_index = NULL;
LLVMValueRef indir_index = NULL;
unsigned const_index = 0;
- unsigned location = instr->variables[0]->var->data.location;
- unsigned driver_location = instr->variables[0]->var->data.driver_location;
- const bool is_patch = instr->variables[0]->var->data.patch;
- const bool is_compact = instr->variables[0]->var->data.compact;
- get_deref_offset(ctx, instr->variables[0],
- false, NULL, is_patch ? NULL : &vertex_index,
- &const_index, &indir_index);
+ nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+
+ unsigned location = var->data.location;
+ unsigned driver_location = var->data.driver_location;
+ const bool is_patch = var->data.patch;
+ const bool is_compact = var->data.compact;
+
+ get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+ false, NULL, is_patch ? NULL : &vertex_index,
+ &const_index, &indir_index);
LLVMTypeRef dest_type = get_def_type(ctx, &instr->dest.ssa);
result = ctx->abi->load_tess_varyings(ctx->abi, src_component_type,
vertex_index, indir_index,
const_index, location, driver_location,
- instr->variables[0]->var->data.location_frac,
+ var->data.location_frac,
instr->num_components,
is_patch, is_compact, load_inputs);
+ if (instr->dest.ssa.bit_size == 16) {
+ result = ac_to_integer(&ctx->ac, result);
+ result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
+ }
return LLVMBuildBitCast(ctx->ac.builder, result, dest_type, "");
}
static LLVMValueRef visit_load_var(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr)
{
+ nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+
LLVMValueRef values[8];
- int idx = instr->variables[0]->var->data.driver_location;
+ int idx = 0;
int ve = instr->dest.ssa.num_components;
- unsigned comp = instr->variables[0]->var->data.location_frac;
+ unsigned comp = 0;
LLVMValueRef indir_index;
LLVMValueRef ret;
unsigned const_index;
- unsigned stride = instr->variables[0]->var->data.compact ? 1 : 4;
- bool vs_in = ctx->stage == MESA_SHADER_VERTEX &&
- instr->variables[0]->var->data.mode == nir_var_shader_in;
- get_deref_offset(ctx, instr->variables[0], vs_in, NULL, NULL,
- &const_index, &indir_index);
+ unsigned stride = 4;
+ int mode = nir_var_shared;
+
+ if (var) {
+ bool vs_in = ctx->stage == MESA_SHADER_VERTEX &&
+ var->data.mode == nir_var_shader_in;
+ if (var->data.compact)
+ stride = 1;
+ idx = var->data.driver_location;
+ comp = var->data.location_frac;
+ mode = var->data.mode;
+
+ get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), vs_in, NULL, NULL,
+ &const_index, &indir_index);
+ }
if (instr->dest.ssa.bit_size == 64)
ve *= 2;
- switch (instr->variables[0]->var->data.mode) {
+ switch (mode) {
case nir_var_shader_in:
if (ctx->stage == MESA_SHADER_TESS_CTRL ||
ctx->stage == MESA_SHADER_TESS_EVAL) {
LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
LLVMValueRef indir_index;
unsigned const_index, vertex_index;
- get_deref_offset(ctx, instr->variables[0],
- false, &vertex_index, NULL,
- &const_index, &indir_index);
+ get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+ false, &vertex_index, NULL, &const_index, &indir_index);
- return ctx->abi->load_inputs(ctx->abi, instr->variables[0]->var->data.location,
- instr->variables[0]->var->data.driver_location,
- instr->variables[0]->var->data.location_frac,
+ return ctx->abi->load_inputs(ctx->abi, var->data.location,
+ var->data.driver_location,
+ var->data.location_frac,
instr->num_components, vertex_index, const_index, type);
}
for (unsigned chan = comp; chan < ve + comp; chan++) {
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
- instr->variables[0]->var->type,
+ var->type,
ctx->stage == MESA_SHADER_VERTEX);
count -= chan / 4;
LLVMValueRef tmp_vec = ac_build_gather_values_extended(
for (unsigned chan = 0; chan < ve; chan++) {
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
- instr->variables[0]->var->type, false);
+ var->type, false);
count -= chan / 4;
LLVMValueRef tmp_vec = ac_build_gather_values_extended(
&ctx->ac, ctx->locals + idx + chan, count,
}
break;
case nir_var_shared: {
- LLVMValueRef address = build_gep_for_deref(ctx,
- instr->variables[0]);
+ LLVMValueRef address = get_src(ctx, instr->src[0]);
LLVMValueRef val = LLVMBuildLoad(ctx->ac.builder, address, "");
return LLVMBuildBitCast(ctx->ac.builder, val,
get_def_type(ctx, &instr->dest.ssa),
for (unsigned chan = comp; chan < ve + comp; chan++) {
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
- instr->variables[0]->var->type, false);
+ var->type, false);
count -= chan / 4;
LLVMValueRef tmp_vec = ac_build_gather_values_extended(
&ctx->ac, ctx->abi->outputs + idx + chan, count,
visit_store_var(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr)
{
+ nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+
LLVMValueRef temp_ptr, value;
- int idx = instr->variables[0]->var->data.driver_location;
- unsigned comp = instr->variables[0]->var->data.location_frac;
- LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
- int writemask = instr->const_index[0] << comp;
+ int idx = var->data.driver_location;
+ unsigned comp = var->data.location_frac;
+ LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[1]));
+ int writemask = instr->const_index[0];
LLVMValueRef indir_index;
unsigned const_index;
- get_deref_offset(ctx, instr->variables[0], false,
- NULL, NULL, &const_index, &indir_index);
+
+ get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), false,
+ NULL, NULL, &const_index, &indir_index);
if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src)) == 64) {
writemask = widen_mask(writemask, 2);
}
- switch (instr->variables[0]->var->data.mode) {
+ writemask = writemask << comp;
+
+ switch (var->data.mode) {
case nir_var_shader_out:
if (ctx->stage == MESA_SHADER_TESS_CTRL) {
LLVMValueRef vertex_index = NULL;
LLVMValueRef indir_index = NULL;
unsigned const_index = 0;
- const unsigned location = instr->variables[0]->var->data.location;
- const unsigned driver_location = instr->variables[0]->var->data.driver_location;
- const unsigned comp = instr->variables[0]->var->data.location_frac;
- const bool is_patch = instr->variables[0]->var->data.patch;
- const bool is_compact = instr->variables[0]->var->data.compact;
-
- get_deref_offset(ctx, instr->variables[0],
- false, NULL, is_patch ? NULL : &vertex_index,
- &const_index, &indir_index);
-
- ctx->abi->store_tcs_outputs(ctx->abi, vertex_index, indir_index,
- const_index, location, driver_location,
- src, comp, is_patch, is_compact, writemask);
+ const bool is_patch = var->data.patch;
+
+ get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+ false, NULL, is_patch ? NULL : &vertex_index,
+ &const_index, &indir_index);
+
+ ctx->abi->store_tcs_outputs(ctx->abi, var,
+ vertex_index, indir_index,
+ const_index, src, writemask);
return;
}
value = ac_llvm_extract_elem(&ctx->ac, src, chan - comp);
- if (instr->variables[0]->var->data.compact)
+ if (var->data.compact)
stride = 1;
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
- instr->variables[0]->var->type, false);
+ var->type, false);
count -= chan / 4;
LLVMValueRef tmp_vec = ac_build_gather_values_extended(
&ctx->ac, ctx->abi->outputs + idx + chan, count,
value = ac_llvm_extract_elem(&ctx->ac, src, chan);
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
- instr->variables[0]->var->type, false);
+ var->type, false);
count -= chan / 4;
LLVMValueRef tmp_vec = ac_build_gather_values_extended(
&ctx->ac, ctx->locals + idx + chan, count,
break;
case nir_var_shared: {
int writemask = instr->const_index[0];
- LLVMValueRef address = build_gep_for_deref(ctx,
- instr->variables[0]);
- LLVMValueRef val = get_src(ctx, instr->src[0]);
- unsigned components =
- glsl_get_vector_elements(
- nir_deref_tail(&instr->variables[0]->deref)->type);
- if (writemask == (1 << components) - 1) {
+ LLVMValueRef address = get_src(ctx, instr->src[0]);
+ LLVMValueRef val = get_src(ctx, instr->src[1]);
+ if (util_is_power_of_two_nonzero(writemask)) {
val = LLVMBuildBitCast(
ctx->ac.builder, val,
LLVMGetElementType(LLVMTypeOf(address)), "");
return 0;
}
-static bool
-glsl_is_array_image(const struct glsl_type *type)
-{
- const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
-
- if (glsl_sampler_type_is_array(type))
- return true;
-
- return dim == GLSL_SAMPLER_DIM_CUBE ||
- dim == GLSL_SAMPLER_DIM_3D ||
- dim == GLSL_SAMPLER_DIM_SUBPASS ||
- dim == GLSL_SAMPLER_DIM_SUBPASS_MS;
-}
-
/* Adjust the sample index according to FMASK.
*
LLVMValueRef sample_index,
LLVMValueRef fmask_desc_ptr)
{
- LLVMValueRef fmask_load_address[4];
+ struct ac_image_args args = {0};
LLVMValueRef res;
- fmask_load_address[0] = coord_x;
- fmask_load_address[1] = coord_y;
- if (coord_z) {
- fmask_load_address[2] = coord_z;
- fmask_load_address[3] = LLVMGetUndef(ctx->i32);
- }
-
- struct ac_image_args args = {0};
+ args.coords[0] = coord_x;
+ args.coords[1] = coord_y;
+ if (coord_z)
+ args.coords[2] = coord_z;
args.opcode = ac_image_load;
- args.da = coord_z ? true : false;
+ args.dim = coord_z ? ac_image_2darray : ac_image_2d;
args.resource = fmask_desc_ptr;
args.dmask = 0xf;
- args.addr = ac_build_gather_values(ctx, fmask_load_address, coord_z ? 4 : 2);
+ args.attributes = AC_FUNC_ATTR_READNONE;
res = ac_build_image_opcode(ctx, &args);
return sample_index;
}
-static LLVMValueRef get_image_coords(struct ac_nir_context *ctx,
- const nir_intrinsic_instr *instr)
+static nir_variable *get_image_variable(const nir_intrinsic_instr *instr)
+{
+ assert(instr->src[0].is_ssa);
+ return nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+}
+
+static LLVMValueRef get_image_descriptor(struct ac_nir_context *ctx,
+ const nir_intrinsic_instr *instr,
+ enum ac_descriptor_type desc_type,
+ bool write)
{
- const struct glsl_type *type = glsl_without_array(instr->variables[0]->var->type);
+ return get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), desc_type, NULL, true, write);
+}
+
+static void get_image_coords(struct ac_nir_context *ctx,
+ const nir_intrinsic_instr *instr,
+ struct ac_image_args *args)
+{
+ const struct glsl_type *type = glsl_without_array(get_image_variable(instr)->type);
- LLVMValueRef src0 = get_src(ctx, instr->src[0]);
- LLVMValueRef coords[4];
+ LLVMValueRef src0 = get_src(ctx, instr->src[1]);
LLVMValueRef masks[] = {
LLVMConstInt(ctx->ac.i32, 0, false), LLVMConstInt(ctx->ac.i32, 1, false),
LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
};
- LLVMValueRef res;
- LLVMValueRef sample_index = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[1]), 0);
+ LLVMValueRef sample_index = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
int count;
enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
bool gfx9_1d = ctx->ac.chip_class >= GFX9 && dim == GLSL_SAMPLER_DIM_1D;
count = image_type_to_components_count(dim, is_array);
- if (is_ms) {
+ if (is_ms && instr->intrinsic == nir_intrinsic_image_deref_load) {
LLVMValueRef fmask_load_address[3];
int chan;
LLVMBuildAdd(ctx->ac.builder, fmask_load_address[chan],
LLVMBuildFPToUI(ctx->ac.builder, ctx->abi->frag_pos[chan],
ctx->ac.i32, ""), "");
- fmask_load_address[2] = ac_to_integer(&ctx->ac, ctx->abi->inputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)]);
+ fmask_load_address[2] = ac_to_integer(&ctx->ac, ctx->abi->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)]);
}
sample_index = adjust_sample_index_using_fmask(&ctx->ac,
fmask_load_address[0],
fmask_load_address[1],
fmask_load_address[2],
sample_index,
- get_sampler_desc(ctx, instr->variables[0], AC_DESC_FMASK, NULL, true, false));
+ get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
+ AC_DESC_FMASK, NULL, false, false));
}
if (count == 1 && !gfx9_1d) {
- if (instr->src[0].ssa->num_components)
- res = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
+ if (instr->src[1].ssa->num_components)
+ args->coords[0] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
else
- res = src0;
+ args->coords[0] = src0;
} else {
int chan;
if (is_ms)
count--;
for (chan = 0; chan < count; ++chan) {
- coords[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
+ args->coords[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
}
if (add_frag_pos) {
- for (chan = 0; chan < 2; ++chan)
- coords[chan] = LLVMBuildAdd(ctx->ac.builder, coords[chan], LLVMBuildFPToUI(ctx->ac.builder, ctx->abi->frag_pos[chan],
+ for (chan = 0; chan < 2; ++chan) {
+ args->coords[chan] = LLVMBuildAdd(
+ ctx->ac.builder, args->coords[chan],
+ LLVMBuildFPToUI(
+ ctx->ac.builder, ctx->abi->frag_pos[chan],
ctx->ac.i32, ""), "");
- coords[2] = ac_to_integer(&ctx->ac, ctx->abi->inputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)]);
+ }
+ args->coords[2] = ac_to_integer(&ctx->ac,
+ ctx->abi->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)]);
count++;
}
if (gfx9_1d) {
if (is_array) {
- coords[2] = coords[1];
- coords[1] = ctx->ac.i32_0;
+ args->coords[2] = args->coords[1];
+ args->coords[1] = ctx->ac.i32_0;
} else
- coords[1] = ctx->ac.i32_0;
+ args->coords[1] = ctx->ac.i32_0;
count++;
}
if (is_ms) {
- coords[count] = sample_index;
+ args->coords[count] = sample_index;
count++;
}
+ }
+}
- if (count == 3) {
- coords[3] = LLVMGetUndef(ctx->ac.i32);
- count = 4;
- }
- res = ac_build_gather_values(&ctx->ac, coords, count);
+static LLVMValueRef get_image_buffer_descriptor(struct ac_nir_context *ctx,
+ const nir_intrinsic_instr *instr, bool write)
+{
+ LLVMValueRef rsrc = get_image_descriptor(ctx, instr, AC_DESC_BUFFER, write);
+ if (ctx->abi->gfx9_stride_size_workaround) {
+ LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, 0), "");
+ LLVMValueRef stride = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 1, 0), "");
+ stride = LLVMBuildLShr(ctx->ac.builder, stride, LLVMConstInt(ctx->ac.i32, 16, 0), "");
+
+ LLVMValueRef new_elem_count = LLVMBuildSelect(ctx->ac.builder,
+ LLVMBuildICmp(ctx->ac.builder, LLVMIntUGT, elem_count, stride, ""),
+ elem_count, stride, "");
+
+ rsrc = LLVMBuildInsertElement(ctx->ac.builder, rsrc, new_elem_count,
+ LLVMConstInt(ctx->ac.i32, 2, 0), "");
}
- return res;
+ return rsrc;
}
static LLVMValueRef visit_image_load(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr)
{
- LLVMValueRef params[7];
LLVMValueRef res;
- char intrinsic_name[64];
- const nir_variable *var = instr->variables[0]->var;
+ const nir_variable *var = get_image_variable(instr);
const struct glsl_type *type = var->type;
- if(instr->variables[0]->deref.child)
- type = instr->variables[0]->deref.child->type;
-
type = glsl_without_array(type);
const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
unsigned num_channels = util_last_bit(mask);
LLVMValueRef rsrc, vindex;
- rsrc = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER, NULL, true, false);
- vindex = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[0]),
+ rsrc = get_image_buffer_descriptor(ctx, instr, false);
+ vindex = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, "");
/* TODO: set "glc" and "can_speculate" when OpenGL needs it. */
res = ac_trim_vector(&ctx->ac, res, instr->dest.ssa.num_components);
res = ac_to_integer(&ctx->ac, res);
} else {
- LLVMValueRef da = glsl_is_array_image(type) ? ctx->ac.i1true : ctx->ac.i1false;
- LLVMValueRef slc = ctx->ac.i1false;
-
- params[0] = get_image_coords(ctx, instr);
- params[1] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, NULL, true, false);
- params[2] = LLVMConstInt(ctx->ac.i32, 15, false); /* dmask */
- params[3] = (var->data.image._volatile || var->data.image.coherent) ?
- ctx->ac.i1true : ctx->ac.i1false;
- params[4] = slc;
- params[5] = ctx->ac.i1false;
- params[6] = da;
-
- ac_get_image_intr_name("llvm.amdgcn.image.load",
- ctx->ac.v4f32, /* vdata */
- LLVMTypeOf(params[0]), /* coords */
- LLVMTypeOf(params[1]), /* rsrc */
- intrinsic_name, sizeof(intrinsic_name));
-
- res = ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.v4f32,
- params, 7, AC_FUNC_ATTR_READONLY);
+ struct ac_image_args args = {};
+ args.opcode = ac_image_load;
+ get_image_coords(ctx, instr, &args);
+ args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, false);
+ args.dim = get_ac_image_dim(&ctx->ac, glsl_get_sampler_dim(type),
+ glsl_sampler_type_is_array(type));
+ args.dmask = 15;
+ args.attributes = AC_FUNC_ATTR_READONLY;
+ if (var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT))
+ args.cache_policy |= ac_glc;
+
+ res = ac_build_image_opcode(&ctx->ac, &args);
}
return ac_to_integer(&ctx->ac, res);
}
nir_intrinsic_instr *instr)
{
LLVMValueRef params[8];
- char intrinsic_name[64];
- const nir_variable *var = instr->variables[0]->var;
+ const nir_variable *var = get_image_variable(instr);
const struct glsl_type *type = glsl_without_array(var->type);
const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
LLVMValueRef glc = ctx->ac.i1false;
glc = ctx->ac.i1true;
if (dim == GLSL_SAMPLER_DIM_BUF) {
- params[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[2])); /* data */
- params[1] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER, NULL, true, true);
- params[2] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[0]),
+ char name[48];
+ const char *types[] = { "f32", "v2f32", "v4f32" };
+ LLVMValueRef rsrc = get_image_buffer_descriptor(ctx, instr, true);
+ LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[3]));
+ unsigned src_channels = ac_get_llvm_num_components(src);
+
+ if (src_channels == 3)
+ src = ac_build_expand(&ctx->ac, src, 3, 4);
+
+ params[0] = src; /* data */
+ params[1] = rsrc;
+ params[2] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, ""); /* vindex */
params[3] = ctx->ac.i32_0; /* voffset */
- params[4] = glc; /* glc */
- params[5] = ctx->ac.i1false; /* slc */
- ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32", ctx->ac.voidt,
- params, 6, 0);
+ snprintf(name, sizeof(name), "%s.%s",
+ HAVE_LLVM >= 0x800 ? "llvm.amdgcn.struct.buffer.store.format"
+ : "llvm.amdgcn.buffer.store.format",
+ types[CLAMP(src_channels, 1, 3) - 1]);
+
+ if (HAVE_LLVM >= 0x800) {
+ params[4] = ctx->ac.i32_0; /* soffset */
+ params[5] = glc ? ctx->ac.i32_1 : ctx->ac.i32_0;
+ } else {
+ params[4] = glc; /* glc */
+ params[5] = ctx->ac.i1false; /* slc */
+ }
+ ac_build_intrinsic(&ctx->ac, name, ctx->ac.voidt, params, 6, 0);
} else {
- LLVMValueRef da = glsl_is_array_image(type) ? ctx->ac.i1true : ctx->ac.i1false;
- LLVMValueRef slc = ctx->ac.i1false;
-
- params[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[2]));
- params[1] = get_image_coords(ctx, instr); /* coords */
- params[2] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, NULL, true, true);
- params[3] = LLVMConstInt(ctx->ac.i32, 15, false); /* dmask */
- params[4] = (force_glc || var->data.image._volatile || var->data.image.coherent) ?
- ctx->ac.i1true : ctx->ac.i1false;
- params[5] = slc;
- params[6] = ctx->ac.i1false;
- params[7] = da;
-
- ac_get_image_intr_name("llvm.amdgcn.image.store",
- LLVMTypeOf(params[0]), /* vdata */
- LLVMTypeOf(params[1]), /* coords */
- LLVMTypeOf(params[2]), /* rsrc */
- intrinsic_name, sizeof(intrinsic_name));
-
- ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.voidt,
- params, 8, 0);
+ struct ac_image_args args = {};
+ args.opcode = ac_image_store;
+ args.data[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[3]));
+ get_image_coords(ctx, instr, &args);
+ args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, true);
+ args.dim = get_ac_image_dim(&ctx->ac, glsl_get_sampler_dim(type),
+ glsl_sampler_type_is_array(type));
+ args.dmask = 15;
+ if (force_glc || (var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT)))
+ args.cache_policy |= ac_glc;
+
+ ac_build_image_opcode(&ctx->ac, &args);
}
}
{
LLVMValueRef params[7];
int param_count = 0;
- const nir_variable *var = instr->variables[0]->var;
+ const nir_variable *var = get_image_variable(instr);
+ bool cmpswap = instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap;
const char *atomic_name;
- char intrinsic_name[41];
+ char intrinsic_name[64];
+ enum ac_atomic_op atomic_subop;
const struct glsl_type *type = glsl_without_array(var->type);
MAYBE_UNUSED int length;
bool is_unsigned = glsl_get_sampler_result_type(type) == GLSL_TYPE_UINT;
switch (instr->intrinsic) {
- case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_deref_atomic_add:
atomic_name = "add";
+ atomic_subop = ac_atomic_add;
break;
- case nir_intrinsic_image_atomic_min:
+ case nir_intrinsic_image_deref_atomic_min:
atomic_name = is_unsigned ? "umin" : "smin";
+ atomic_subop = is_unsigned ? ac_atomic_umin : ac_atomic_smin;
break;
- case nir_intrinsic_image_atomic_max:
+ case nir_intrinsic_image_deref_atomic_max:
atomic_name = is_unsigned ? "umax" : "smax";
+ atomic_subop = is_unsigned ? ac_atomic_umax : ac_atomic_smax;
break;
- case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_deref_atomic_and:
atomic_name = "and";
+ atomic_subop = ac_atomic_and;
break;
- case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_deref_atomic_or:
atomic_name = "or";
+ atomic_subop = ac_atomic_or;
break;
- case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_xor:
atomic_name = "xor";
+ atomic_subop = ac_atomic_xor;
break;
- case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_exchange:
atomic_name = "swap";
+ atomic_subop = ac_atomic_swap;
break;
- case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
atomic_name = "cmpswap";
+ atomic_subop = 0; /* not used */
break;
default:
abort();
}
- if (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap)
- params[param_count++] = get_src(ctx, instr->src[3]);
- params[param_count++] = get_src(ctx, instr->src[2]);
+ if (cmpswap)
+ params[param_count++] = get_src(ctx, instr->src[4]);
+ params[param_count++] = get_src(ctx, instr->src[3]);
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
- params[param_count++] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER,
- NULL, true, true);
- params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[0]),
+ params[param_count++] = get_image_buffer_descriptor(ctx, instr, true);
+ params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, ""); /* vindex */
params[param_count++] = ctx->ac.i32_0; /* voffset */
- params[param_count++] = ctx->ac.i1false; /* slc */
-
- length = snprintf(intrinsic_name, sizeof(intrinsic_name),
- "llvm.amdgcn.buffer.atomic.%s", atomic_name);
- } else {
- char coords_type[8];
+ if (HAVE_LLVM >= 0x800) {
+ params[param_count++] = ctx->ac.i32_0; /* soffset */
+ params[param_count++] = ctx->ac.i32_0; /* slc */
- LLVMValueRef coords = params[param_count++] = get_image_coords(ctx, instr);
- params[param_count++] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE,
- NULL, true, true);
- params[param_count++] = ctx->ac.i1false; /* r128 */
- params[param_count++] = glsl_is_array_image(type) ? ctx->ac.i1true : ctx->ac.i1false; /* da */
- params[param_count++] = ctx->ac.i1false; /* slc */
+ length = snprintf(intrinsic_name, sizeof(intrinsic_name),
+ "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name);
+ } else {
+ params[param_count++] = ctx->ac.i1false; /* slc */
- build_int_type_name(LLVMTypeOf(coords),
- coords_type, sizeof(coords_type));
+ length = snprintf(intrinsic_name, sizeof(intrinsic_name),
+ "llvm.amdgcn.buffer.atomic.%s", atomic_name);
+ }
- length = snprintf(intrinsic_name, sizeof(intrinsic_name),
- "llvm.amdgcn.image.atomic.%s.%s", atomic_name, coords_type);
+ assert(length < sizeof(intrinsic_name));
+ return ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.i32,
+ params, param_count, 0);
+ } else {
+ struct ac_image_args args = {};
+ args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic;
+ args.atomic = atomic_subop;
+ args.data[0] = params[0];
+ if (cmpswap)
+ args.data[1] = params[1];
+ get_image_coords(ctx, instr, &args);
+ args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, true);
+ args.dim = get_ac_image_dim(&ctx->ac, glsl_get_sampler_dim(type),
+ glsl_sampler_type_is_array(type));
+
+ return ac_build_image_opcode(&ctx->ac, &args);
}
-
- assert(length < sizeof(intrinsic_name));
- return ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.i32, params, param_count, 0);
}
static LLVMValueRef visit_image_samples(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr)
{
- const nir_variable *var = instr->variables[0]->var;
+ const nir_variable *var = get_image_variable(instr);
const struct glsl_type *type = glsl_without_array(var->type);
struct ac_image_args args = { 0 };
- args.da = glsl_is_array_image(type);
+ args.dim = get_ac_sampler_dim(&ctx->ac, glsl_get_sampler_dim(type),
+ glsl_sampler_type_is_array(type));
args.dmask = 0xf;
- args.resource = get_sampler_desc(ctx, instr->variables[0],
- AC_DESC_IMAGE, NULL, true, false);
+ args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, false);
args.opcode = ac_image_get_resinfo;
- args.addr = ctx->ac.i32_0;
+ args.lod = ctx->ac.i32_0;
+ args.attributes = AC_FUNC_ATTR_READNONE;
return ac_build_image_opcode(&ctx->ac, &args);
}
const nir_intrinsic_instr *instr)
{
LLVMValueRef res;
- const nir_variable *var = instr->variables[0]->var;
+ const nir_variable *var = get_image_variable(instr);
const struct glsl_type *type = glsl_without_array(var->type);
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF)
- return get_buffer_size(ctx,
- get_sampler_desc(ctx, instr->variables[0],
- AC_DESC_BUFFER, NULL, true, false), true);
+ return get_buffer_size(ctx, get_image_descriptor(ctx, instr, AC_DESC_BUFFER, false), true);
struct ac_image_args args = { 0 };
- args.da = glsl_is_array_image(type);
+ args.dim = get_ac_image_dim(&ctx->ac, glsl_get_sampler_dim(type),
+ glsl_sampler_type_is_array(type));
args.dmask = 0xf;
- args.resource = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, NULL, true, false);
+ args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, false);
args.opcode = ac_image_get_resinfo;
- args.addr = ctx->ac.i32_0;
+ args.lod = ctx->ac.i32_0;
+ args.attributes = AC_FUNC_ATTR_READNONE;
res = ac_build_image_opcode(&ctx->ac, &args);
return res;
}
-#define NOOP_WAITCNT 0xf7f
-#define LGKM_CNT 0x07f
-#define VM_CNT 0xf70
-
static void emit_membar(struct ac_llvm_context *ac,
const nir_intrinsic_instr *instr)
{
ac_build_waitcnt(ac, LGKM_CNT & VM_CNT);
return;
}
- ac_build_intrinsic(ac, "llvm.amdgcn.s.barrier",
- ac->voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
+ ac_build_s_barrier(ac);
}
static void emit_discard(struct ac_nir_context *ctx,
ctx->ac.i32_0, "");
} else {
assert(instr->intrinsic == nir_intrinsic_discard);
- cond = LLVMConstInt(ctx->ac.i1, false, 0);
+ cond = ctx->ac.i1false;
}
ctx->abi->emit_kill(ctx->abi, cond);
LLVMValueRef active_set = ac_build_ballot(&ctx->ac, ctx->ac.i32_1);
/* The second argument is whether cttz(0) should be defined, but we do not care. */
- LLVMValueRef args[] = {active_set, LLVMConstInt(ctx->ac.i1, 0, false)};
+ LLVMValueRef args[] = {active_set, ctx->ac.i1false};
LLVMValueRef result = ac_build_intrinsic(&ctx->ac,
"llvm.cttz.i64",
ctx->ac.i64, args, 2,
LLVMValueRef result;
LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
- if (instr->intrinsic == nir_intrinsic_var_atomic_comp_swap ||
- instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap) {
+ if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap ||
+ instr->intrinsic == nir_intrinsic_deref_atomic_comp_swap) {
LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);
result = LLVMBuildAtomicCmpXchg(ctx->ac.builder,
ptr, src, src1,
LLVMAtomicOrderingSequentiallyConsistent,
LLVMAtomicOrderingSequentiallyConsistent,
false);
+ result = LLVMBuildExtractValue(ctx->ac.builder, result, 0, "");
} else {
LLVMAtomicRMWBinOp op;
switch (instr->intrinsic) {
- case nir_intrinsic_var_atomic_add:
case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_deref_atomic_add:
op = LLVMAtomicRMWBinOpAdd;
break;
- case nir_intrinsic_var_atomic_umin:
case nir_intrinsic_shared_atomic_umin:
+ case nir_intrinsic_deref_atomic_umin:
op = LLVMAtomicRMWBinOpUMin;
break;
- case nir_intrinsic_var_atomic_umax:
case nir_intrinsic_shared_atomic_umax:
+ case nir_intrinsic_deref_atomic_umax:
op = LLVMAtomicRMWBinOpUMax;
break;
- case nir_intrinsic_var_atomic_imin:
case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_deref_atomic_imin:
op = LLVMAtomicRMWBinOpMin;
break;
- case nir_intrinsic_var_atomic_imax:
case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_deref_atomic_imax:
op = LLVMAtomicRMWBinOpMax;
break;
- case nir_intrinsic_var_atomic_and:
case nir_intrinsic_shared_atomic_and:
+ case nir_intrinsic_deref_atomic_and:
op = LLVMAtomicRMWBinOpAnd;
break;
- case nir_intrinsic_var_atomic_or:
case nir_intrinsic_shared_atomic_or:
+ case nir_intrinsic_deref_atomic_or:
op = LLVMAtomicRMWBinOpOr;
break;
- case nir_intrinsic_var_atomic_xor:
case nir_intrinsic_shared_atomic_xor:
+ case nir_intrinsic_deref_atomic_xor:
op = LLVMAtomicRMWBinOpXor;
break;
- case nir_intrinsic_var_atomic_exchange:
case nir_intrinsic_shared_atomic_exchange:
+ case nir_intrinsic_deref_atomic_exchange:
op = LLVMAtomicRMWBinOpXchg;
break;
default:
LLVMValueRef src_c0 = NULL;
LLVMValueRef src_c1 = NULL;
LLVMValueRef src0 = NULL;
- int input_index = instr->variables[0]->var->data.location - VARYING_SLOT_VAR0;
+
+ nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
+ int input_index = var->data.location - VARYING_SLOT_VAR0;
switch (instr->intrinsic) {
- case nir_intrinsic_interp_var_at_centroid:
+ case nir_intrinsic_interp_deref_at_centroid:
location = INTERP_CENTROID;
break;
- case nir_intrinsic_interp_var_at_sample:
- case nir_intrinsic_interp_var_at_offset:
+ case nir_intrinsic_interp_deref_at_sample:
+ case nir_intrinsic_interp_deref_at_offset:
location = INTERP_CENTER;
- src0 = get_src(ctx, instr->src[0]);
+ src0 = get_src(ctx, instr->src[1]);
break;
default:
break;
}
- if (instr->intrinsic == nir_intrinsic_interp_var_at_offset) {
+ if (instr->intrinsic == nir_intrinsic_interp_deref_at_offset) {
src_c0 = ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, src0, ctx->ac.i32_0, ""));
src_c1 = ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder, src0, ctx->ac.i32_1, ""));
- } else if (instr->intrinsic == nir_intrinsic_interp_var_at_sample) {
+ } else if (instr->intrinsic == nir_intrinsic_interp_deref_at_sample) {
LLVMValueRef sample_position;
LLVMValueRef halfval = LLVMConstReal(ctx->ac.f32, 0.5f);
src_c1 = LLVMBuildExtractElement(ctx->ac.builder, sample_position, ctx->ac.i32_1, "");
src_c1 = LLVMBuildFSub(ctx->ac.builder, src_c1, halfval, "");
}
- interp_param = ctx->abi->lookup_interp_param(ctx->abi, instr->variables[0]->var->data.interpolation, location);
+ interp_param = ctx->abi->lookup_interp_param(ctx->abi, var->data.interpolation, location);
attr_number = LLVMConstInt(ctx->ac.i32, input_index, false);
if (location == INTERP_CENTER) {
interp_el = LLVMBuildBitCast(ctx->ac.builder, interp_el,
ctx->ac.f32, "");
- temp1 = LLVMBuildFMul(ctx->ac.builder, ddx_el, src_c0, "");
- temp1 = LLVMBuildFAdd(ctx->ac.builder, temp1, interp_el, "");
-
- temp2 = LLVMBuildFMul(ctx->ac.builder, ddy_el, src_c1, "");
- temp2 = LLVMBuildFAdd(ctx->ac.builder, temp2, temp1, "");
+ temp1 = ac_build_fmad(&ctx->ac, ddx_el, src_c0, interp_el);
+ temp2 = ac_build_fmad(&ctx->ac, ddy_el, src_c1, temp1);
ij_out[i] = LLVMBuildBitCast(ctx->ac.builder,
temp2, ctx->ac.i32, "");
}
}
return ac_build_varying_gather_values(&ctx->ac, result, instr->num_components,
- instr->variables[0]->var->data.location_frac);
+ var->data.location_frac);
}
static void visit_intrinsic(struct ac_nir_context *ctx,
result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0]));
break;
case nir_intrinsic_read_invocation:
- case nir_intrinsic_read_first_invocation: {
- LLVMValueRef args[2];
-
- /* Value */
- args[0] = get_src(ctx, instr->src[0]);
-
- unsigned num_args;
- const char *intr_name;
- if (instr->intrinsic == nir_intrinsic_read_invocation) {
- num_args = 2;
- intr_name = "llvm.amdgcn.readlane";
-
- /* Invocation */
- args[1] = get_src(ctx, instr->src[1]);
- } else {
- num_args = 1;
- intr_name = "llvm.amdgcn.readfirstlane";
- }
-
- /* We currently have no other way to prevent LLVM from lifting the icmp
- * calls to a dominating basic block.
- */
- ac_build_optimization_barrier(&ctx->ac, &args[0]);
-
- result = ac_build_intrinsic(&ctx->ac, intr_name,
- ctx->ac.i32, args, num_args,
- AC_FUNC_ATTR_READNONE |
- AC_FUNC_ATTR_CONVERGENT);
+ result = ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]),
+ get_src(ctx, instr->src[1]));
+ break;
+ case nir_intrinsic_read_first_invocation:
+ result = ac_build_readlane(&ctx->ac, get_src(ctx, instr->src[0]), NULL);
break;
- }
case nir_intrinsic_load_subgroup_invocation:
result = ac_get_thread_id(&ctx->ac);
break;
result = ac_build_gather_values(&ctx->ac, values, 3);
break;
}
- case nir_intrinsic_load_base_vertex: {
+ case nir_intrinsic_load_base_vertex:
+ case nir_intrinsic_load_first_vertex:
result = ctx->abi->load_base_vertex(ctx->abi);
break;
- }
case nir_intrinsic_load_local_group_size:
result = ctx->abi->load_local_group_size(ctx->abi);
break;
case nir_intrinsic_get_buffer_size:
result = visit_get_buffer_size(ctx, instr);
break;
- case nir_intrinsic_load_var:
+ case nir_intrinsic_load_deref:
result = visit_load_var(ctx, instr);
break;
- case nir_intrinsic_store_var:
+ case nir_intrinsic_store_deref:
visit_store_var(ctx, instr);
break;
case nir_intrinsic_load_shared:
case nir_intrinsic_store_shared:
visit_store_shared(ctx, instr);
break;
- case nir_intrinsic_image_samples:
+ case nir_intrinsic_image_deref_samples:
result = visit_image_samples(ctx, instr);
break;
- case nir_intrinsic_image_load:
+ case nir_intrinsic_image_deref_load:
result = visit_image_load(ctx, instr);
break;
- case nir_intrinsic_image_store:
+ case nir_intrinsic_image_deref_store:
visit_image_store(ctx, instr);
break;
- case nir_intrinsic_image_atomic_add:
- case nir_intrinsic_image_atomic_min:
- case nir_intrinsic_image_atomic_max:
- case nir_intrinsic_image_atomic_and:
- case nir_intrinsic_image_atomic_or:
- case nir_intrinsic_image_atomic_xor:
- case nir_intrinsic_image_atomic_exchange:
- case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_min:
+ case nir_intrinsic_image_deref_atomic_max:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
result = visit_image_atomic(ctx, instr);
break;
- case nir_intrinsic_image_size:
+ case nir_intrinsic_image_deref_size:
result = visit_image_size(ctx, instr);
break;
case nir_intrinsic_shader_clock:
result = visit_var_atomic(ctx, instr, ptr, 1);
break;
}
- case nir_intrinsic_var_atomic_add:
- case nir_intrinsic_var_atomic_imin:
- case nir_intrinsic_var_atomic_umin:
- case nir_intrinsic_var_atomic_imax:
- case nir_intrinsic_var_atomic_umax:
- case nir_intrinsic_var_atomic_and:
- case nir_intrinsic_var_atomic_or:
- case nir_intrinsic_var_atomic_xor:
- case nir_intrinsic_var_atomic_exchange:
- case nir_intrinsic_var_atomic_comp_swap: {
- LLVMValueRef ptr = build_gep_for_deref(ctx, instr->variables[0]);
- result = visit_var_atomic(ctx, instr, ptr, 0);
+ case nir_intrinsic_deref_atomic_add:
+ case nir_intrinsic_deref_atomic_imin:
+ case nir_intrinsic_deref_atomic_umin:
+ case nir_intrinsic_deref_atomic_imax:
+ case nir_intrinsic_deref_atomic_umax:
+ case nir_intrinsic_deref_atomic_and:
+ case nir_intrinsic_deref_atomic_or:
+ case nir_intrinsic_deref_atomic_xor:
+ case nir_intrinsic_deref_atomic_exchange:
+ case nir_intrinsic_deref_atomic_comp_swap: {
+ LLVMValueRef ptr = get_src(ctx, instr->src[0]);
+ result = visit_var_atomic(ctx, instr, ptr, 1);
break;
}
- case nir_intrinsic_interp_var_at_centroid:
- case nir_intrinsic_interp_var_at_sample:
- case nir_intrinsic_interp_var_at_offset:
+ case nir_intrinsic_interp_deref_at_centroid:
+ case nir_intrinsic_interp_deref_at_sample:
+ case nir_intrinsic_interp_deref_at_offset:
result = visit_interp(ctx, instr);
break;
case nir_intrinsic_emit_vertex:
result = LLVMBuildSExt(ctx->ac.builder, tmp, ctx->ac.i32, "");
break;
}
+ case nir_intrinsic_shuffle:
+ result = ac_build_shuffle(&ctx->ac, get_src(ctx, instr->src[0]),
+ get_src(ctx, instr->src[1]));
+ break;
+ case nir_intrinsic_reduce:
+ result = ac_build_reduce(&ctx->ac,
+ get_src(ctx, instr->src[0]),
+ instr->const_index[0],
+ instr->const_index[1]);
+ break;
+ case nir_intrinsic_inclusive_scan:
+ result = ac_build_inclusive_scan(&ctx->ac,
+ get_src(ctx, instr->src[0]),
+ instr->const_index[0]);
+ break;
+ case nir_intrinsic_exclusive_scan:
+ result = ac_build_exclusive_scan(&ctx->ac,
+ get_src(ctx, instr->src[0]),
+ instr->const_index[0]);
+ break;
+ case nir_intrinsic_quad_broadcast: {
+ unsigned lane = nir_src_as_const_value(instr->src[1])->u32[0];
+ result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]),
+ lane, lane, lane, lane);
+ break;
+ }
+ case nir_intrinsic_quad_swap_horizontal:
+ result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 1, 0, 3 ,2);
+ break;
+ case nir_intrinsic_quad_swap_vertical:
+ result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 2, 3, 0 ,1);
+ break;
+ case nir_intrinsic_quad_swap_diagonal:
+ result = ac_build_quad_swizzle(&ctx->ac, get_src(ctx, instr->src[0]), 3, 2, 1 ,0);
+ break;
default:
fprintf(stderr, "Unknown intrinsic: ");
nir_print_instr(&instr->instr, stderr);
break;
}
if (result) {
- _mesa_hash_table_insert(ctx->defs, &instr->dest.ssa, result);
+ ctx->ssa_defs[instr->dest.ssa.index] = result;
}
}
static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
- const nir_deref_var *deref,
+ nir_deref_instr *deref_instr,
enum ac_descriptor_type desc_type,
const nir_tex_instr *tex_instr,
bool image, bool write)
unsigned constant_index = 0;
unsigned descriptor_set;
unsigned base_index;
+ bool bindless = false;
- if (!deref) {
+ if (!deref_instr) {
assert(tex_instr && !image);
descriptor_set = 0;
base_index = tex_instr->sampler_index;
} else {
- const nir_deref *tail = &deref->deref;
- while (tail->child) {
- const nir_deref_array *child = nir_deref_as_array(tail->child);
- unsigned array_size = glsl_get_aoa_size(tail->child->type);
-
+ while(deref_instr->deref_type != nir_deref_type_var) {
+ unsigned array_size = glsl_get_aoa_size(deref_instr->type);
if (!array_size)
array_size = 1;
- assert(child->deref_array_type != nir_deref_array_type_wildcard);
-
- if (child->deref_array_type == nir_deref_array_type_indirect) {
- LLVMValueRef indirect = get_src(ctx, child->indirect);
+ assert(deref_instr->deref_type == nir_deref_type_array);
+ nir_const_value *const_value = nir_src_as_const_value(deref_instr->arr.index);
+ if (const_value) {
+ constant_index += array_size * const_value->u32[0];
+ } else {
+ LLVMValueRef indirect = get_src(ctx, deref_instr->arr.index);
indirect = LLVMBuildMul(ctx->ac.builder, indirect,
LLVMConstInt(ctx->ac.i32, array_size, false), "");
- if (!index)
+ if (!index)
index = indirect;
else
index = LLVMBuildAdd(ctx->ac.builder, index, indirect, "");
}
- constant_index += child->base_offset * array_size;
-
- tail = &child->deref;
+ deref_instr = nir_src_as_deref(deref_instr->parent);
}
- descriptor_set = deref->var->data.descriptor_set;
- base_index = deref->var->data.binding;
+ descriptor_set = deref_instr->var->data.descriptor_set;
+ base_index = deref_instr->var->data.binding;
}
return ctx->abi->load_sampler_desc(ctx->abi,
descriptor_set,
base_index,
constant_index, index,
- desc_type, image, write);
-}
-
-static void set_tex_fetch_args(struct ac_llvm_context *ctx,
- struct ac_image_args *args,
- const nir_tex_instr *instr,
- nir_texop op,
- LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
- LLVMValueRef *param, unsigned count,
- unsigned dmask)
-{
- unsigned is_rect = 0;
- bool da = instr->is_array || instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
-
- if (op == nir_texop_lod)
- da = false;
- /* Pad to power of two vector */
- while (count < util_next_power_of_two(count))
- param[count++] = LLVMGetUndef(ctx->i32);
-
- if (count > 1)
- args->addr = ac_build_gather_values(ctx, param, count);
- else
- args->addr = param[0];
-
- args->resource = res_ptr;
- args->sampler = samp_ptr;
-
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF && op == nir_texop_txf) {
- args->addr = param[0];
- return;
- }
-
- args->dmask = dmask;
- args->unorm = is_rect;
- args->da = da;
+ desc_type, image, write, bindless);
}
/* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr,
LLVMValueRef *fmask_ptr)
{
+ nir_deref_instr *texture_deref_instr = NULL;
+ nir_deref_instr *sampler_deref_instr = NULL;
+
+ for (unsigned i = 0; i < instr->num_srcs; i++) {
+ switch (instr->src[i].src_type) {
+ case nir_tex_src_texture_deref:
+ texture_deref_instr = nir_src_as_deref(instr->src[i].src);
+ break;
+ case nir_tex_src_sampler_deref:
+ sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!sampler_deref_instr)
+ sampler_deref_instr = texture_deref_instr;
+
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
- *res_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_BUFFER, instr, false, false);
+ *res_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_BUFFER, instr, false, false);
else
- *res_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_IMAGE, instr, false, false);
+ *res_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_IMAGE, instr, false, false);
if (samp_ptr) {
- if (instr->sampler)
- *samp_ptr = get_sampler_desc(ctx, instr->sampler, AC_DESC_SAMPLER, instr, false, false);
- else
- *samp_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_SAMPLER, instr, false, false);
+ *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, AC_DESC_SAMPLER, instr, false, false);
if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
*samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
}
- if (fmask_ptr && !instr->sampler && (instr->op == nir_texop_txf_ms ||
- instr->op == nir_texop_samples_identical))
- *fmask_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_FMASK, instr, false, false);
+ if (fmask_ptr && (instr->op == nir_texop_txf_ms ||
+ instr->op == nir_texop_samples_identical))
+ *fmask_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_FMASK, instr, false, false);
}
static LLVMValueRef apply_round_slice(struct ac_llvm_context *ctx,
LLVMValueRef coord)
{
coord = ac_to_float(ctx, coord);
- coord = ac_build_intrinsic(ctx, "llvm.rint.f32", ctx->f32, &coord, 1, 0);
+ coord = ac_build_round(ctx, coord);
coord = ac_to_integer(ctx, coord);
return coord;
}
{
LLVMValueRef result = NULL;
struct ac_image_args args = { 0 };
- unsigned dmask = 0xf;
- LLVMValueRef address[16];
- LLVMValueRef coords[5];
- LLVMValueRef coord = NULL, lod = NULL, comparator = NULL;
- LLVMValueRef bias = NULL, offsets = NULL;
- LLVMValueRef res_ptr, samp_ptr, fmask_ptr = NULL, sample_index = NULL;
+ LLVMValueRef fmask_ptr = NULL, sample_index = NULL;
LLVMValueRef ddx = NULL, ddy = NULL;
- LLVMValueRef derivs[6];
- unsigned chan, count = 0;
- unsigned const_src = 0, num_deriv_comp = 0;
- bool lod_is_zero = false;
+ unsigned offset_src = 0;
- tex_fetch_ptrs(ctx, instr, &res_ptr, &samp_ptr, &fmask_ptr);
+ tex_fetch_ptrs(ctx, instr, &args.resource, &args.sampler, &fmask_ptr);
for (unsigned i = 0; i < instr->num_srcs; i++) {
switch (instr->src[i].src_type) {
- case nir_tex_src_coord:
- coord = get_src(ctx, instr->src[i].src);
+ case nir_tex_src_coord: {
+ LLVMValueRef coord = get_src(ctx, instr->src[i].src);
+ for (unsigned chan = 0; chan < instr->coord_components; ++chan)
+ args.coords[chan] = ac_llvm_extract_elem(&ctx->ac, coord, chan);
break;
+ }
case nir_tex_src_projector:
break;
case nir_tex_src_comparator:
- comparator = get_src(ctx, instr->src[i].src);
+ if (instr->is_shadow)
+ args.compare = get_src(ctx, instr->src[i].src);
break;
case nir_tex_src_offset:
- offsets = get_src(ctx, instr->src[i].src);
- const_src = i;
+ args.offset = get_src(ctx, instr->src[i].src);
+ offset_src = i;
break;
case nir_tex_src_bias:
- bias = get_src(ctx, instr->src[i].src);
+ if (instr->op == nir_texop_txb)
+ args.bias = get_src(ctx, instr->src[i].src);
break;
case nir_tex_src_lod: {
nir_const_value *val = nir_src_as_const_value(instr->src[i].src);
if (val && val->i32[0] == 0)
- lod_is_zero = true;
- lod = get_src(ctx, instr->src[i].src);
+ args.level_zero = true;
+ else
+ args.lod = get_src(ctx, instr->src[i].src);
break;
}
case nir_tex_src_ms_index:
break;
case nir_tex_src_ddx:
ddx = get_src(ctx, instr->src[i].src);
- num_deriv_comp = instr->src[i].src.ssa->num_components;
break;
case nir_tex_src_ddy:
ddy = get_src(ctx, instr->src[i].src);
}
if (instr->op == nir_texop_txs && instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
- result = get_buffer_size(ctx, res_ptr, true);
+ result = get_buffer_size(ctx, args.resource, true);
goto write_result;
}
if (instr->op == nir_texop_texture_samples) {
LLVMValueRef res, samples, is_msaa;
- res = LLVMBuildBitCast(ctx->ac.builder, res_ptr, ctx->ac.v8i32, "");
+ res = LLVMBuildBitCast(ctx->ac.builder, args.resource, ctx->ac.v8i32, "");
samples = LLVMBuildExtractElement(ctx->ac.builder, res,
LLVMConstInt(ctx->ac.i32, 3, false), "");
is_msaa = LLVMBuildLShr(ctx->ac.builder, samples,
goto write_result;
}
- if (coord)
- for (chan = 0; chan < instr->coord_components; chan++)
- coords[chan] = ac_llvm_extract_elem(&ctx->ac, coord, chan);
-
- if (offsets && instr->op != nir_texop_txf) {
+ if (args.offset && instr->op != nir_texop_txf) {
LLVMValueRef offset[3], pack;
- for (chan = 0; chan < 3; ++chan)
+ for (unsigned chan = 0; chan < 3; ++chan)
offset[chan] = ctx->ac.i32_0;
- args.offset = true;
- for (chan = 0; chan < ac_get_llvm_num_components(offsets); chan++) {
- offset[chan] = ac_llvm_extract_elem(&ctx->ac, offsets, chan);
+ unsigned num_components = ac_get_llvm_num_components(args.offset);
+ for (unsigned chan = 0; chan < num_components; chan++) {
+ offset[chan] = ac_llvm_extract_elem(&ctx->ac, args.offset, chan);
offset[chan] = LLVMBuildAnd(ctx->ac.builder, offset[chan],
LLVMConstInt(ctx->ac.i32, 0x3f, false), "");
if (chan)
}
pack = LLVMBuildOr(ctx->ac.builder, offset[0], offset[1], "");
pack = LLVMBuildOr(ctx->ac.builder, pack, offset[2], "");
- address[count++] = pack;
-
- }
- /* pack LOD bias value */
- if (instr->op == nir_texop_txb && bias) {
- address[count++] = bias;
+ args.offset = pack;
}
- /* Pack depth comparison value */
- if (instr->is_shadow && comparator) {
- LLVMValueRef z = ac_to_float(&ctx->ac,
- ac_llvm_extract_elem(&ctx->ac, comparator, 0));
-
- /* TC-compatible HTILE on radeonsi promotes Z16 and Z24 to Z32_FLOAT,
- * so the depth comparison value isn't clamped for Z16 and
- * Z24 anymore. Do it manually here.
- *
- * It's unnecessary if the original texture format was
- * Z32_FLOAT, but we don't know that here.
- */
- if (ctx->ac.chip_class == VI && ctx->abi->clamp_shadow_reference)
- z = ac_build_clamp(&ctx->ac, z);
-
- address[count++] = z;
- }
+ /* TC-compatible HTILE on radeonsi promotes Z16 and Z24 to Z32_FLOAT,
+ * so the depth comparison value isn't clamped for Z16 and
+ * Z24 anymore. Do it manually here.
+ *
+ * It's unnecessary if the original texture format was
+ * Z32_FLOAT, but we don't know that here.
+ */
+ if (args.compare && ctx->ac.chip_class == VI && ctx->abi->clamp_shadow_reference)
+ args.compare = ac_build_clamp(&ctx->ac, ac_to_float(&ctx->ac, args.compare));
/* pack derivatives */
if (ddx || ddy) {
switch (instr->sampler_dim) {
case GLSL_SAMPLER_DIM_3D:
case GLSL_SAMPLER_DIM_CUBE:
- num_deriv_comp = 3;
num_src_deriv_channels = 3;
num_dest_deriv_channels = 3;
break;
default:
num_src_deriv_channels = 2;
num_dest_deriv_channels = 2;
- num_deriv_comp = 2;
break;
case GLSL_SAMPLER_DIM_1D:
num_src_deriv_channels = 1;
if (ctx->ac.chip_class >= GFX9) {
num_dest_deriv_channels = 2;
- num_deriv_comp = 2;
} else {
num_dest_deriv_channels = 1;
- num_deriv_comp = 1;
}
break;
}
for (unsigned i = 0; i < num_src_deriv_channels; i++) {
- derivs[i] = ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddx, i));
- derivs[num_dest_deriv_channels + i] = ac_to_float(&ctx->ac, ac_llvm_extract_elem(&ctx->ac, ddy, i));
+ args.derivs[i] = ac_to_float(&ctx->ac,
+ ac_llvm_extract_elem(&ctx->ac, ddx, i));
+ args.derivs[num_dest_deriv_channels + i] = ac_to_float(&ctx->ac,
+ ac_llvm_extract_elem(&ctx->ac, ddy, i));
}
for (unsigned i = num_src_deriv_channels; i < num_dest_deriv_channels; i++) {
- derivs[i] = ctx->ac.f32_0;
- derivs[num_dest_deriv_channels + i] = ctx->ac.f32_0;
+ args.derivs[i] = ctx->ac.f32_0;
+ args.derivs[num_dest_deriv_channels + i] = ctx->ac.f32_0;
}
}
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && coord) {
- for (chan = 0; chan < instr->coord_components; chan++)
- coords[chan] = ac_to_float(&ctx->ac, coords[chan]);
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && args.coords[0]) {
+ for (unsigned chan = 0; chan < instr->coord_components; chan++)
+ args.coords[chan] = ac_to_float(&ctx->ac, args.coords[chan]);
if (instr->coord_components == 3)
- coords[3] = LLVMGetUndef(ctx->ac.f32);
+ args.coords[3] = LLVMGetUndef(ctx->ac.f32);
ac_prepare_cube_coords(&ctx->ac,
instr->op == nir_texop_txd, instr->is_array,
- instr->op == nir_texop_lod, coords, derivs);
- if (num_deriv_comp)
- num_deriv_comp--;
+ instr->op == nir_texop_lod, args.coords, args.derivs);
}
- if (ddx || ddy) {
- for (unsigned i = 0; i < num_deriv_comp * 2; i++)
- address[count++] = derivs[i];
+ /* Texture coordinates fixups */
+ if (instr->coord_components > 1 &&
+ instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
+ instr->is_array &&
+ instr->op != nir_texop_txf) {
+ args.coords[1] = apply_round_slice(&ctx->ac, args.coords[1]);
}
- /* Pack texture coordinates */
- if (coord) {
- address[count++] = coords[0];
- if (instr->coord_components > 1) {
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D && instr->is_array && instr->op != nir_texop_txf) {
- coords[1] = apply_round_slice(&ctx->ac, coords[1]);
- }
- address[count++] = coords[1];
- }
- if (instr->coord_components > 2) {
- if ((instr->sampler_dim == GLSL_SAMPLER_DIM_2D ||
- instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
- instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS ||
- instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
- instr->is_array &&
- instr->op != nir_texop_txf && instr->op != nir_texop_txf_ms) {
- coords[2] = apply_round_slice(&ctx->ac, coords[2]);
- }
- address[count++] = coords[2];
- }
-
- if (ctx->ac.chip_class >= GFX9) {
- LLVMValueRef filler;
- if (instr->op == nir_texop_txf)
- filler = ctx->ac.i32_0;
- else
- filler = LLVMConstReal(ctx->ac.f32, 0.5);
-
- if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
- /* No nir_texop_lod, because it does not take a slice
- * even with array textures. */
- if (instr->is_array && instr->op != nir_texop_lod ) {
- address[count] = address[count - 1];
- address[count - 1] = filler;
- count++;
- } else
- address[count++] = filler;
- }
- }
+ if (instr->coord_components > 2 &&
+ (instr->sampler_dim == GLSL_SAMPLER_DIM_2D ||
+ instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
+ instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS ||
+ instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
+ instr->is_array &&
+ instr->op != nir_texop_txf && instr->op != nir_texop_txf_ms) {
+ args.coords[2] = apply_round_slice(&ctx->ac, args.coords[2]);
}
- /* Pack LOD */
- if (lod && ((instr->op == nir_texop_txl || instr->op == nir_texop_txf) && !lod_is_zero)) {
- address[count++] = lod;
- } else if (instr->op == nir_texop_txf_ms && sample_index) {
- address[count++] = sample_index;
- } else if(instr->op == nir_texop_txs) {
- count = 0;
- if (lod)
- address[count++] = lod;
+ if (ctx->ac.chip_class >= GFX9 &&
+ instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
+ instr->op != nir_texop_lod) {
+ LLVMValueRef filler;
+ if (instr->op == nir_texop_txf)
+ filler = ctx->ac.i32_0;
else
- address[count++] = ctx->ac.i32_0;
- }
+ filler = LLVMConstReal(ctx->ac.f32, 0.5);
- for (chan = 0; chan < count; chan++) {
- address[chan] = LLVMBuildBitCast(ctx->ac.builder,
- address[chan], ctx->ac.i32, "");
+ if (instr->is_array)
+ args.coords[2] = args.coords[1];
+ args.coords[1] = filler;
}
+ /* Pack sample index */
+ if (instr->op == nir_texop_txf_ms && sample_index)
+ args.coords[instr->coord_components] = sample_index;
+
if (instr->op == nir_texop_samples_identical) {
- LLVMValueRef txf_address[4];
struct ac_image_args txf_args = { 0 };
- unsigned txf_count = count;
- memcpy(txf_address, address, sizeof(txf_address));
-
- if (!instr->is_array)
- txf_address[2] = ctx->ac.i32_0;
- txf_address[3] = ctx->ac.i32_0;
+ memcpy(txf_args.coords, args.coords, sizeof(txf_args.coords));
- set_tex_fetch_args(&ctx->ac, &txf_args, instr, nir_texop_txf,
- fmask_ptr, NULL,
- txf_address, txf_count, 0xf);
-
- result = build_tex_intrinsic(ctx, instr, false, &txf_args);
+ txf_args.dmask = 0xf;
+ txf_args.resource = fmask_ptr;
+ txf_args.dim = instr->is_array ? ac_image_2darray : ac_image_2d;
+ result = build_tex_intrinsic(ctx, instr, &txf_args);
result = LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, "");
result = emit_int_cmp(&ctx->ac, LLVMIntEQ, result, ctx->ac.i32_0);
if (instr->sampler_dim == GLSL_SAMPLER_DIM_MS &&
instr->op != nir_texop_txs) {
unsigned sample_chan = instr->is_array ? 3 : 2;
- address[sample_chan] = adjust_sample_index_using_fmask(&ctx->ac,
- address[0],
- address[1],
- instr->is_array ? address[2] : NULL,
- address[sample_chan],
- fmask_ptr);
+ args.coords[sample_chan] = adjust_sample_index_using_fmask(
+ &ctx->ac, args.coords[0], args.coords[1],
+ instr->is_array ? args.coords[2] : NULL,
+ args.coords[sample_chan], fmask_ptr);
}
- if (offsets && instr->op == nir_texop_txf) {
+ if (args.offset && instr->op == nir_texop_txf) {
nir_const_value *const_offset =
- nir_src_as_const_value(instr->src[const_src].src);
- int num_offsets = instr->src[const_src].src.ssa->num_components;
+ nir_src_as_const_value(instr->src[offset_src].src);
+ int num_offsets = instr->src[offset_src].src.ssa->num_components;
assert(const_offset);
num_offsets = MIN2(num_offsets, instr->coord_components);
- if (num_offsets > 2)
- address[2] = LLVMBuildAdd(ctx->ac.builder,
- address[2], LLVMConstInt(ctx->ac.i32, const_offset->i32[2], false), "");
- if (num_offsets > 1)
- address[1] = LLVMBuildAdd(ctx->ac.builder,
- address[1], LLVMConstInt(ctx->ac.i32, const_offset->i32[1], false), "");
- address[0] = LLVMBuildAdd(ctx->ac.builder,
- address[0], LLVMConstInt(ctx->ac.i32, const_offset->i32[0], false), "");
-
+ for (unsigned i = 0; i < num_offsets; ++i) {
+ args.coords[i] = LLVMBuildAdd(
+ ctx->ac.builder, args.coords[i],
+ LLVMConstInt(ctx->ac.i32, const_offset->i32[i], false), "");
+ }
+ args.offset = NULL;
}
/* TODO TG4 support */
+ args.dmask = 0xf;
if (instr->op == nir_texop_tg4) {
if (instr->is_shadow)
- dmask = 1;
+ args.dmask = 1;
else
- dmask = 1 << instr->component;
+ args.dmask = 1 << instr->component;
}
- set_tex_fetch_args(&ctx->ac, &args, instr, instr->op,
- res_ptr, samp_ptr, address, count, dmask);
- result = build_tex_intrinsic(ctx, instr, lod_is_zero, &args);
+ if (instr->sampler_dim != GLSL_SAMPLER_DIM_BUF)
+ args.dim = get_ac_sampler_dim(&ctx->ac, instr->sampler_dim, instr->is_array);
+ result = build_tex_intrinsic(ctx, instr, &args);
if (instr->op == nir_texop_query_levels)
result = LLVMBuildExtractElement(ctx->ac.builder, result, LLVMConstInt(ctx->ac.i32, 3, false), "");
if (result) {
assert(instr->dest.is_ssa);
result = ac_to_integer(&ctx->ac, result);
- _mesa_hash_table_insert(ctx->defs, &instr->dest.ssa, result);
+ ctx->ssa_defs[instr->dest.ssa.index] = result;
}
}
LLVMTypeRef type = get_def_type(ctx, &instr->dest.ssa);
LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
- _mesa_hash_table_insert(ctx->defs, &instr->dest.ssa, result);
+ ctx->ssa_defs[instr->dest.ssa.index] = result;
_mesa_hash_table_insert(ctx->phis, instr, result);
}
static void phi_post_pass(struct ac_nir_context *ctx)
{
- struct hash_entry *entry;
hash_table_foreach(ctx->phis, entry) {
visit_post_phi(ctx, (nir_phi_instr*)entry->key,
(LLVMValueRef)entry->data);
else {
undef = LLVMGetUndef(LLVMVectorType(type, num_components));
}
- _mesa_hash_table_insert(ctx->defs, &instr->def, undef);
+ ctx->ssa_defs[instr->def.index] = undef;
}
static void visit_jump(struct ac_llvm_context *ctx,
}
}
+static void visit_deref(struct ac_nir_context *ctx,
+ nir_deref_instr *instr)
+{
+ if (instr->mode != nir_var_shared)
+ return;
+
+ LLVMValueRef result = NULL;
+ switch(instr->deref_type) {
+ case nir_deref_type_var: {
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->vars, instr->var);
+ result = entry->data;
+ break;
+ }
+ case nir_deref_type_struct:
+ result = ac_build_gep0(&ctx->ac, get_src(ctx, instr->parent),
+ LLVMConstInt(ctx->ac.i32, instr->strct.index, 0));
+ break;
+ case nir_deref_type_array:
+ result = ac_build_gep0(&ctx->ac, get_src(ctx, instr->parent),
+ get_src(ctx, instr->arr.index));
+ break;
+ case nir_deref_type_cast:
+ result = get_src(ctx, instr->parent);
+ break;
+ default:
+ unreachable("Unhandled deref_instr deref type");
+ }
+
+ ctx->ssa_defs[instr->dest.ssa.index] = result;
+}
+
static void visit_cf_list(struct ac_nir_context *ctx,
struct exec_list *list);
case nir_instr_type_jump:
visit_jump(&ctx->ac, nir_instr_as_jump(instr));
break;
+ case nir_instr_type_deref:
+ visit_deref(ctx, nir_instr_as_deref(instr));
+ break;
default:
fprintf(stderr, "Unknown NIR instr type: ");
nir_print_instr(instr, stderr);
}
}
+ bool is_16bit = glsl_type_is_16bit(variable->type);
+ LLVMTypeRef type = is_16bit ? ctx->f16 : ctx->f32;
for (unsigned i = 0; i < attrib_count; ++i) {
for (unsigned chan = 0; chan < 4; chan++) {
- abi->outputs[radeon_llvm_reg_index_soa(output_loc + i, chan)] =
- ac_build_alloca_undef(ctx, ctx->f32, "");
+ abi->outputs[ac_llvm_reg_index_soa(output_loc + i, chan)] =
+ ac_build_alloca_undef(ctx, type, "");
}
}
}
LLVMAddGlobalInAddressSpace(
ctx->ac.module, glsl_to_llvm_type(&ctx->ac, variable->type),
variable->name ? variable->name : "",
- AC_LOCAL_ADDR_SPACE);
+ AC_ADDR_SPACE_LDS);
_mesa_hash_table_insert(ctx->vars, variable, shared);
}
}
struct ac_nir_context ctx = {};
struct nir_function *func;
- /* Last minute passes for both radv & radeonsi */
- ac_lower_subgroups(nir);
-
ctx.ac = *ac;
ctx.abi = abi;
func = (struct nir_function *)exec_list_get_head(&nir->functions);
+ nir_index_ssa_defs(func->impl);
+ ctx.ssa_defs = calloc(func->impl->ssa_alloc, sizeof(LLVMValueRef));
+
setup_locals(&ctx, func);
if (nir->info.stage == MESA_SHADER_COMPUTE)
ctx.abi->outputs);
free(ctx.locals);
+ free(ctx.ssa_defs);
ralloc_free(ctx.defs);
ralloc_free(ctx.phis);
ralloc_free(ctx.vars);
nir_lower_indirect_derefs(nir, indirect_mask);
}
+
+static unsigned
+get_inst_tessfactor_writemask(nir_intrinsic_instr *intrin)
+{
+ if (intrin->intrinsic != nir_intrinsic_store_deref)
+ return 0;
+
+ nir_variable *var =
+ nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[0]));
+
+ if (var->data.mode != nir_var_shader_out)
+ return 0;
+
+ unsigned writemask = 0;
+ const int location = var->data.location;
+ unsigned first_component = var->data.location_frac;
+ unsigned num_comps = intrin->dest.ssa.num_components;
+
+ if (location == VARYING_SLOT_TESS_LEVEL_INNER)
+ writemask = ((1 << num_comps + 1) - 1) << first_component;
+ else if (location == VARYING_SLOT_TESS_LEVEL_OUTER)
+ writemask = (((1 << num_comps + 1) - 1) << first_component) << 4;
+
+ return writemask;
+}
+
+static void
+scan_tess_ctrl(nir_cf_node *cf_node, unsigned *upper_block_tf_writemask,
+ unsigned *cond_block_tf_writemask,
+ bool *tessfactors_are_def_in_all_invocs, bool is_nested_cf)
+{
+ switch (cf_node->type) {
+ case nir_cf_node_block: {
+ nir_block *block = nir_cf_node_as_block(cf_node);
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ if (intrin->intrinsic == nir_intrinsic_barrier) {
+
+ /* If we find a barrier in nested control flow put this in the
+ * too hard basket. In GLSL this is not possible but it is in
+ * SPIR-V.
+ */
+ if (is_nested_cf) {
+ *tessfactors_are_def_in_all_invocs = false;
+ return;
+ }
+
+ /* The following case must be prevented:
+ * gl_TessLevelInner = ...;
+ * barrier();
+ * if (gl_InvocationID == 1)
+ * gl_TessLevelInner = ...;
+ *
+ * If you consider disjoint code segments separated by barriers, each
+ * such segment that writes tess factor channels should write the same
+ * channels in all codepaths within that segment.
+ */
+ if (upper_block_tf_writemask || cond_block_tf_writemask) {
+ /* Accumulate the result: */
+ *tessfactors_are_def_in_all_invocs &=
+ !(*cond_block_tf_writemask & ~(*upper_block_tf_writemask));
+
+ /* Analyze the next code segment from scratch. */
+ *upper_block_tf_writemask = 0;
+ *cond_block_tf_writemask = 0;
+ }
+ } else
+ *upper_block_tf_writemask |= get_inst_tessfactor_writemask(intrin);
+ }
+
+ break;
+ }
+ case nir_cf_node_if: {
+ unsigned then_tessfactor_writemask = 0;
+ unsigned else_tessfactor_writemask = 0;
+
+ nir_if *if_stmt = nir_cf_node_as_if(cf_node);
+ foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->then_list) {
+ scan_tess_ctrl(nested_node, &then_tessfactor_writemask,
+ cond_block_tf_writemask,
+ tessfactors_are_def_in_all_invocs, true);
+ }
+
+ foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->else_list) {
+ scan_tess_ctrl(nested_node, &else_tessfactor_writemask,
+ cond_block_tf_writemask,
+ tessfactors_are_def_in_all_invocs, true);
+ }
+
+ if (then_tessfactor_writemask || else_tessfactor_writemask) {
+ /* If both statements write the same tess factor channels,
+ * we can say that the upper block writes them too.
+ */
+ *upper_block_tf_writemask |= then_tessfactor_writemask &
+ else_tessfactor_writemask;
+ *cond_block_tf_writemask |= then_tessfactor_writemask |
+ else_tessfactor_writemask;
+ }
+
+ break;
+ }
+ case nir_cf_node_loop: {
+ nir_loop *loop = nir_cf_node_as_loop(cf_node);
+ foreach_list_typed(nir_cf_node, nested_node, node, &loop->body) {
+ scan_tess_ctrl(nested_node, cond_block_tf_writemask,
+ cond_block_tf_writemask,
+ tessfactors_are_def_in_all_invocs, true);
+ }
+
+ break;
+ }
+ default:
+ unreachable("unknown cf node type");
+ }
+}
+
+bool
+ac_are_tessfactors_def_in_all_invocs(const struct nir_shader *nir)
+{
+ assert(nir->info.stage == MESA_SHADER_TESS_CTRL);
+
+ /* The pass works as follows:
+ * If all codepaths write tess factors, we can say that all
+ * invocations define tess factors.
+ *
+ * Each tess factor channel is tracked separately.
+ */
+ unsigned main_block_tf_writemask = 0; /* if main block writes tess factors */
+ unsigned cond_block_tf_writemask = 0; /* if cond block writes tess factors */
+
+ /* Initial value = true. Here the pass will accumulate results from
+ * multiple segments surrounded by barriers. If tess factors aren't
+ * written at all, it's a shader bug and we don't care if this will be
+ * true.
+ */
+ bool tessfactors_are_def_in_all_invocs = true;
+
+ nir_foreach_function(function, nir) {
+ if (function->impl) {
+ foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
+ scan_tess_ctrl(node, &main_block_tf_writemask,
+ &cond_block_tf_writemask,
+ &tessfactors_are_def_in_all_invocs,
+ false);
+ }
+ }
+ }
+
+ /* Accumulate the result for the last code segment separated by a
+ * barrier.
+ */
+ if (main_block_tf_writemask || cond_block_tf_writemask) {
+ tessfactors_are_def_in_all_invocs &=
+ !(cond_block_tf_writemask & ~main_block_tf_writemask);
+ }
+
+ return tessfactors_are_def_in_all_invocs;
+}