struct ac_nir_context {
struct ac_llvm_context ac;
struct ac_shader_abi *abi;
+ const struct ac_shader_args *args;
gl_shader_stage stage;
shader_info *info;
LLVMValueRef *locals;
};
+static LLVMValueRef get_sampler_desc_index(struct ac_nir_context *ctx,
+ nir_deref_instr *deref_instr,
+ const nir_instr *instr,
+ bool image);
+
static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
nir_deref_instr *deref_instr,
enum ac_descriptor_type desc_type,
const nir_instr *instr,
+ LLVMValueRef index,
bool image, bool write);
static void
}
static LLVMValueRef
-get_memory_ptr(struct ac_nir_context *ctx, nir_src src)
+get_memory_ptr(struct ac_nir_context *ctx, nir_src src, unsigned bit_size)
{
LLVMValueRef ptr = get_src(ctx, src);
ptr = LLVMBuildGEP(ctx->ac.builder, ctx->ac.lds, &ptr, 1, "");
int addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
+ LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, bit_size);
+
return LLVMBuildBitCast(ctx->ac.builder, ptr,
- LLVMPointerType(ctx->ac.i32, addr_space), "");
+ LLVMPointerType(type, addr_space), "");
}
static LLVMBasicBlockRef get_block(struct ac_nir_context *nir,
static LLVMValueRef emit_bcsel(struct ac_llvm_context *ctx,
LLVMValueRef src0, LLVMValueRef src1, LLVMValueRef src2)
{
+ LLVMTypeRef src1_type = LLVMTypeOf(src1);
+ LLVMTypeRef src2_type = LLVMTypeOf(src2);
+
assert(LLVMGetTypeKind(LLVMTypeOf(src0)) != LLVMVectorTypeKind);
+ if (LLVMGetTypeKind(src1_type) == LLVMPointerTypeKind &&
+ LLVMGetTypeKind(src2_type) != LLVMPointerTypeKind) {
+ src2 = LLVMBuildIntToPtr(ctx->builder, src2, src1_type, "");
+ } else if (LLVMGetTypeKind(src2_type) == LLVMPointerTypeKind &&
+ LLVMGetTypeKind(src1_type) != LLVMPointerTypeKind) {
+ src1 = LLVMBuildIntToPtr(ctx->builder, src1, src2_type, "");
+ }
+
LLVMValueRef v = LLVMBuildICmp(ctx->builder, LLVMIntNE, src0,
ctx->i32_0, "");
return LLVMBuildSelect(ctx->builder, v,
*/
LLVMValueRef temp, cond2;
temp = emit_intrin_1f_param(ctx, "llvm.fabs", ctx->f32, result);
- cond = LLVMBuildFCmp(ctx->builder, LLVMRealUGT,
+ cond = LLVMBuildFCmp(ctx->builder, LLVMRealOGT,
LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""),
temp, "");
- cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealUNE,
+ cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealONE,
temp, ctx->f32_0, "");
cond = LLVMBuildAnd(ctx->builder, cond, cond2, "");
result = LLVMBuildSelect(ctx->builder, cond, ctx->f32_0, result, "");
return result;
}
+struct waterfall_context {
+ LLVMBasicBlockRef phi_bb[2];
+ bool use_waterfall;
+};
+
+/* To deal with divergent descriptors we can create a loop that handles all
+ * lanes with the same descriptor on a given iteration (henceforth a
+ * waterfall loop).
+ *
+ * These helper create the begin and end of the loop leaving the caller
+ * to implement the body.
+ *
+ * params:
+ * - ctx is the usal nir context
+ * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
+ * - value is the possibly divergent value for which we built the loop
+ * - divergent is whether value is actually divergent. If false we just pass
+ * things through.
+ */
+static LLVMValueRef enter_waterfall(struct ac_nir_context *ctx,
+ struct waterfall_context *wctx,
+ LLVMValueRef value, bool divergent)
+{
+ /* If the app claims the value is divergent but it is constant we can
+ * end up with a dynamic index of NULL. */
+ if (!value)
+ divergent = false;
+
+ wctx->use_waterfall = divergent;
+ if (!divergent)
+ return value;
+
+ ac_build_bgnloop(&ctx->ac, 6000);
+
+ LLVMValueRef scalar_value = ac_build_readlane(&ctx->ac, value, NULL);
+
+ LLVMValueRef active = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, value,
+ scalar_value, "uniform_active");
+
+ wctx->phi_bb[0] = LLVMGetInsertBlock(ctx->ac.builder);
+ ac_build_ifcc(&ctx->ac, active, 6001);
+
+ return scalar_value;
+}
+
+static LLVMValueRef exit_waterfall(struct ac_nir_context *ctx,
+ struct waterfall_context *wctx,
+ LLVMValueRef value)
+{
+ LLVMValueRef ret = NULL;
+ LLVMValueRef phi_src[2];
+ LLVMValueRef cc_phi_src[2] = {
+ LLVMConstInt(ctx->ac.i32, 0, false),
+ LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
+ };
+
+ if (!wctx->use_waterfall)
+ return value;
+
+ wctx->phi_bb[1] = LLVMGetInsertBlock(ctx->ac.builder);
+
+ ac_build_endif(&ctx->ac, 6001);
+
+ if (value) {
+ phi_src[0] = LLVMGetUndef(LLVMTypeOf(value));
+ phi_src[1] = value;
+
+ ret = ac_build_phi(&ctx->ac, LLVMTypeOf(value), 2, phi_src, wctx->phi_bb);
+ }
+
+ /*
+ * By using the optimization barrier on the exit decision, we decouple
+ * the operations from the break, and hence avoid LLVM hoisting the
+ * opteration into the break block.
+ */
+ LLVMValueRef cc = ac_build_phi(&ctx->ac, ctx->ac.i32, 2, cc_phi_src, wctx->phi_bb);
+ ac_build_optimization_barrier(&ctx->ac, &cc);
+
+ LLVMValueRef active = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, cc, ctx->ac.i32_0, "uniform_active2");
+ ac_build_ifcc(&ctx->ac, active, 6002);
+ ac_build_break(&ctx->ac);
+ ac_build_endif(&ctx->ac, 6002);
+
+ ac_build_endloop(&ctx->ac, 6000);
+ return ret;
+}
+
static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
{
LLVMValueRef src[4], result = NULL;
case nir_op_fneg:
src[0] = ac_to_float(&ctx->ac, src[0]);
result = LLVMBuildFNeg(ctx->ac.builder, src[0], "");
+ if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO) {
+ /* fneg will be optimized by backend compiler with sign
+ * bit removed via XOR. This is probably a LLVM bug.
+ */
+ result = ac_build_canonicalize(&ctx->ac, result,
+ instr->dest.dest.ssa.bit_size);
+ }
break;
case nir_op_ineg:
result = LLVMBuildNeg(ctx->ac.builder, src[0], "");
result = LLVMBuildFMul(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_frcp:
- src[0] = ac_to_float(&ctx->ac, src[0]);
- result = ac_build_fdiv(&ctx->ac, LLVMConstReal(LLVMTypeOf(src[0]), 1.0), src[0]);
+ result = emit_intrin_1f_param(&ctx->ac, "llvm.amdgcn.rcp",
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_iand:
result = LLVMBuildAnd(ctx->ac.builder, src[0], src[1], "");
case nir_op_fabs:
result = emit_intrin_1f_param(&ctx->ac, "llvm.fabs",
ac_to_float_type(&ctx->ac, def_type), src[0]);
+ if (ctx->ac.float_mode == AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO) {
+ /* fabs will be optimized by backend compiler with sign
+ * bit removed via AND.
+ */
+ result = ac_build_canonicalize(&ctx->ac, result,
+ instr->dest.dest.ssa.bit_size);
+ }
break;
case nir_op_iabs:
result = emit_iabs(&ctx->ac, src[0]);
ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_frsq:
- result = emit_intrin_1f_param(&ctx->ac, "llvm.sqrt",
- ac_to_float_type(&ctx->ac, def_type), src[0]);
- result = ac_build_fdiv(&ctx->ac, LLVMConstReal(LLVMTypeOf(result), 1.0), result);
+ result = emit_intrin_1f_param(&ctx->ac, "llvm.amdgcn.rsq",
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_frexp_exp:
src[0] = ac_to_float(&ctx->ac, src[0]);
if (ctx->ac.chip_class < GFX9 &&
instr->dest.dest.ssa.bit_size == 32) {
/* Only pre-GFX9 chips do not flush denorms. */
- result = emit_intrin_1f_param(&ctx->ac, "llvm.canonicalize",
- ac_to_float_type(&ctx->ac, def_type),
- result);
+ result = ac_build_canonicalize(&ctx->ac, result,
+ instr->dest.dest.ssa.bit_size);
}
break;
case nir_op_fmin:
if (ctx->ac.chip_class < GFX9 &&
instr->dest.dest.ssa.bit_size == 32) {
/* Only pre-GFX9 chips do not flush denorms. */
- result = emit_intrin_1f_param(&ctx->ac, "llvm.canonicalize",
- ac_to_float_type(&ctx->ac, def_type),
- result);
+ result = ac_build_canonicalize(&ctx->ac, result,
+ instr->dest.dest.ssa.bit_size);
}
break;
case nir_op_ffma:
case nir_texop_lod:
args->opcode = ac_image_get_lod;
break;
+ case nir_texop_fragment_fetch:
+ case nir_texop_fragment_mask_fetch:
+ args->opcode = ac_image_load;
+ args->level_zero = false;
+ break;
default:
break;
}
offset += LLVMConstIntGetZExtValue(src0);
offset /= 4;
- offset -= ctx->abi->base_inline_push_consts;
+ offset -= ctx->args->base_inline_push_consts;
- if (offset + count <= ctx->abi->num_inline_push_consts) {
+ unsigned num_inline_push_consts = ctx->args->num_inline_push_consts;
+ if (offset + count <= num_inline_push_consts) {
+ LLVMValueRef push_constants[num_inline_push_consts];
+ for (unsigned i = 0; i < num_inline_push_consts; i++)
+ push_constants[i] = ac_get_arg(&ctx->ac,
+ ctx->args->inline_push_consts[i]);
return ac_build_gather_values(&ctx->ac,
- ctx->abi->inline_push_consts + offset,
+ push_constants + offset,
count);
}
}
- ptr = LLVMBuildGEP(ctx->ac.builder, ctx->abi->push_constants, &addr, 1, "");
+ ptr = LLVMBuildGEP(ctx->ac.builder,
+ ac_get_arg(&ctx->ac, ctx->args->push_constants), &addr, 1, "");
if (instr->dest.ssa.bit_size == 8) {
unsigned load_dwords = instr->dest.ssa.num_components > 1 ? 2 : 1;
return cache_policy;
}
+static LLVMValueRef enter_waterfall_ssbo(struct ac_nir_context *ctx,
+ struct waterfall_context *wctx,
+ const nir_intrinsic_instr *instr,
+ nir_src src)
+{
+ return enter_waterfall(ctx, wctx, get_src(ctx, src),
+ nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
+}
+
static void visit_store_ssbo(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr)
{
+ if (ctx->ac.postponed_kill) {
+ LLVMValueRef cond = LLVMBuildLoad(ctx->ac.builder,
+ ctx->ac.postponed_kill, "");
+ ac_build_ifcc(&ctx->ac, cond, 7000);
+ }
+
LLVMValueRef src_data = get_src(ctx, instr->src[0]);
int elem_size_bytes = ac_get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 8;
unsigned writemask = nir_intrinsic_write_mask(instr);
bool writeonly_memory = access & ACCESS_NON_READABLE;
unsigned cache_policy = get_cache_policy(ctx, access, false, writeonly_memory);
- LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi,
- get_src(ctx, instr->src[1]), true);
+ struct waterfall_context wctx;
+ LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[1]);
+
+ LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi, rsrc_base, true);
LLVMValueRef base_data = src_data;
base_data = ac_trim_vector(&ctx->ac, base_data, instr->num_components);
LLVMValueRef base_offset = get_src(ctx, instr->src[2]);
ac_build_buffer_store_dword(&ctx->ac, rsrc, data,
num_channels, offset,
ctx->ac.i32_0, 0,
- cache_policy, false);
+ cache_policy);
}
}
+
+ exit_waterfall(ctx, &wctx, NULL);
+
+ if (ctx->ac.postponed_kill)
+ ac_build_endif(&ctx->ac, 7000);
}
static LLVMValueRef emit_ssbo_comp_swap_64(struct ac_nir_context *ctx,
}
static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx,
- const nir_intrinsic_instr *instr)
+ nir_intrinsic_instr *instr)
{
+ if (ctx->ac.postponed_kill) {
+ LLVMValueRef cond = LLVMBuildLoad(ctx->ac.builder,
+ ctx->ac.postponed_kill, "");
+ ac_build_ifcc(&ctx->ac, cond, 7001);
+ }
+
LLVMTypeRef return_type = LLVMTypeOf(get_src(ctx, instr->src[2]));
const char *op;
char name[64], type[8];
LLVMValueRef params[6], descriptor;
+ LLVMValueRef result;
int arg_count = 0;
+ struct waterfall_context wctx;
+ LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
+
switch (instr->intrinsic) {
case nir_intrinsic_ssbo_atomic_add:
op = "add";
}
descriptor = ctx->abi->load_ssbo(ctx->abi,
- get_src(ctx, instr->src[0]),
+ rsrc_base,
true);
if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap &&
return_type == ctx->ac.i64) {
- return emit_ssbo_comp_swap_64(ctx, descriptor,
- get_src(ctx, instr->src[1]),
- get_src(ctx, instr->src[2]),
- get_src(ctx, instr->src[3]));
- }
- if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap) {
- params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[3]), 0);
- }
- params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
- params[arg_count++] = descriptor;
+ result = emit_ssbo_comp_swap_64(ctx, descriptor,
+ get_src(ctx, instr->src[1]),
+ get_src(ctx, instr->src[2]),
+ get_src(ctx, instr->src[3]));
+ } else {
+ if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap) {
+ params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[3]), 0);
+ }
+ params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
+ params[arg_count++] = descriptor;
- if (LLVM_VERSION_MAJOR >= 9) {
- /* XXX: The new raw/struct atomic intrinsics are buggy with
- * LLVM 8, see r358579.
- */
- params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
- params[arg_count++] = ctx->ac.i32_0; /* soffset */
- params[arg_count++] = ctx->ac.i32_0; /* slc */
+ if (LLVM_VERSION_MAJOR >= 9) {
+ /* XXX: The new raw/struct atomic intrinsics are buggy with
+ * LLVM 8, see r358579.
+ */
+ params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
+ params[arg_count++] = ctx->ac.i32_0; /* soffset */
+ params[arg_count++] = ctx->ac.i32_0; /* slc */
+
+ ac_build_type_name_for_intr(return_type, type, sizeof(type));
+ snprintf(name, sizeof(name),
+ "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
+ } else {
+ params[arg_count++] = ctx->ac.i32_0; /* vindex */
+ params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
+ params[arg_count++] = ctx->ac.i1false; /* slc */
- ac_build_type_name_for_intr(return_type, type, sizeof(type));
- snprintf(name, sizeof(name),
- "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
- } else {
- params[arg_count++] = ctx->ac.i32_0; /* vindex */
- params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
- params[arg_count++] = ctx->ac.i1false; /* slc */
+ assert(return_type == ctx->ac.i32);
+ snprintf(name, sizeof(name),
+ "llvm.amdgcn.buffer.atomic.%s", op);
+ }
- assert(return_type == ctx->ac.i32);
- snprintf(name, sizeof(name),
- "llvm.amdgcn.buffer.atomic.%s", op);
+ result = ac_build_intrinsic(&ctx->ac, name, return_type, params,
+ arg_count, 0);
}
- return ac_build_intrinsic(&ctx->ac, name, return_type, params,
- arg_count, 0);
+ result = exit_waterfall(ctx, &wctx, result);
+ if (ctx->ac.postponed_kill)
+ ac_build_endif(&ctx->ac, 7001);
+ return result;
}
static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx,
- const nir_intrinsic_instr *instr)
+ nir_intrinsic_instr *instr)
{
+ struct waterfall_context wctx;
+ LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
+
int elem_size_bytes = instr->dest.ssa.bit_size / 8;
int num_components = instr->num_components;
enum gl_access_qualifier access = nir_intrinsic_access(instr);
unsigned cache_policy = get_cache_policy(ctx, access, false, false);
LLVMValueRef offset = get_src(ctx, instr->src[1]);
- LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi,
- get_src(ctx, instr->src[0]), false);
+ LLVMValueRef rsrc = ctx->abi->load_ssbo(ctx->abi, rsrc_base, false);
LLVMValueRef vindex = ctx->ac.i32_0;
LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.ssa);
i += num_elems;
}
- return ac_build_gather_values(&ctx->ac, results, num_components);
+ LLVMValueRef ret = ac_build_gather_values(&ctx->ac, results, num_components);
+ return exit_waterfall(ctx, &wctx, ret);
+}
+
+static LLVMValueRef enter_waterfall_ubo(struct ac_nir_context *ctx,
+ struct waterfall_context *wctx,
+ const nir_intrinsic_instr *instr)
+{
+ return enter_waterfall(ctx, wctx, get_src(ctx, instr->src[0]),
+ nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
}
static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx,
- const nir_intrinsic_instr *instr)
+ nir_intrinsic_instr *instr)
{
+ struct waterfall_context wctx;
+ LLVMValueRef rsrc_base = enter_waterfall_ubo(ctx, &wctx, instr);
+
LLVMValueRef ret;
- LLVMValueRef rsrc = get_src(ctx, instr->src[0]);
+ LLVMValueRef rsrc = rsrc_base;
LLVMValueRef offset = get_src(ctx, instr->src[1]);
int num_components = instr->num_components;
ret = ac_trim_vector(&ctx->ac, ret, num_components);
}
- return LLVMBuildBitCast(ctx->ac.builder, ret,
+ ret = LLVMBuildBitCast(ctx->ac.builder, ret,
get_def_type(ctx, &instr->dest.ssa), "");
+
+ return exit_waterfall(ctx, &wctx, ret);
}
static void
unsigned location = var->data.location;
unsigned driver_location = var->data.driver_location;
- const bool is_patch = var->data.patch;
+ const bool is_patch = var->data.patch ||
+ var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
+ var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER;
const bool is_compact = var->data.compact;
get_deref_offset(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
}
}
break;
- case nir_var_mem_shared: {
- LLVMValueRef address = get_src(ctx, instr->src[0]);
- LLVMValueRef val = LLVMBuildLoad(ctx->ac.builder, address, "");
- return LLVMBuildBitCast(ctx->ac.builder, val,
- get_def_type(ctx, &instr->dest.ssa),
- "");
- }
case nir_var_shader_out:
if (ctx->stage == MESA_SHADER_TESS_CTRL) {
return load_tess_varyings(ctx, instr, false);
visit_store_var(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr)
{
+ if (ctx->ac.postponed_kill) {
+ LLVMValueRef cond = LLVMBuildLoad(ctx->ac.builder,
+ ctx->ac.postponed_kill, "");
+ ac_build_ifcc(&ctx->ac, cond, 7002);
+ }
+
nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
nir_variable *var = nir_deref_instr_get_variable(deref);
LLVMValueRef vertex_index = NULL;
LLVMValueRef indir_index = NULL;
unsigned const_index = 0;
- const bool is_patch = var->data.patch;
+ const bool is_patch = var->data.patch ||
+ var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
+ var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER;
get_deref_offset(ctx, deref, false, NULL,
is_patch ? NULL : &vertex_index,
ctx->abi->store_tcs_outputs(ctx->abi, var,
vertex_index, indir_index,
const_index, src, writemask);
- return;
+ break;
}
for (unsigned chan = 0; chan < 8; chan++) {
}
break;
- case nir_var_mem_global:
- case nir_var_mem_shared: {
+ case nir_var_mem_global: {
int writemask = instr->const_index[0];
LLVMValueRef address = get_src(ctx, instr->src[0]);
LLVMValueRef val = get_src(ctx, instr->src[1]);
abort();
break;
}
+
+ if (ctx->ac.postponed_kill)
+ ac_build_endif(&ctx->ac, 7002);
}
static int image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
static LLVMValueRef get_image_descriptor(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr,
+ LLVMValueRef dynamic_index,
enum ac_descriptor_type desc_type,
bool write)
{
instr->src[0].ssa->parent_instr->type == nir_instr_type_deref ?
nir_instr_as_deref(instr->src[0].ssa->parent_instr) : NULL;
- return get_sampler_desc(ctx, deref_instr, desc_type, &instr->instr, true, write);
+ return get_sampler_desc(ctx, deref_instr, desc_type, &instr->instr, dynamic_index, true, write);
}
static void get_image_coords(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr,
+ LLVMValueRef dynamic_desc_index,
struct ac_image_args *args,
enum glsl_sampler_dim dim,
bool is_array)
fmask_load_address[2],
sample_index,
get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr),
- AC_DESC_FMASK, &instr->instr, true, false));
+ AC_DESC_FMASK, &instr->instr, dynamic_desc_index, true, false));
}
if (count == 1 && !gfx9_1d) {
if (instr->src[1].ssa->num_components)
static LLVMValueRef get_image_buffer_descriptor(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr,
+ LLVMValueRef dynamic_index,
bool write, bool atomic)
{
- LLVMValueRef rsrc = get_image_descriptor(ctx, instr, AC_DESC_BUFFER, write);
+ LLVMValueRef rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, write);
if (ctx->ac.chip_class == GFX9 && LLVM_VERSION_MAJOR < 9 && atomic) {
LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, 0), "");
LLVMValueRef stride = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 1, 0), "");
return rsrc;
}
+static LLVMValueRef enter_waterfall_image(struct ac_nir_context *ctx,
+ struct waterfall_context *wctx,
+ const nir_intrinsic_instr *instr)
+{
+ nir_deref_instr *deref_instr = NULL;
+
+ if (instr->src[0].ssa->parent_instr->type == nir_instr_type_deref)
+ deref_instr = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
+
+ LLVMValueRef value = get_sampler_desc_index(ctx, deref_instr, &instr->instr, true);
+ return enter_waterfall(ctx, wctx, value, nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
+}
+
static LLVMValueRef visit_image_load(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr,
bool bindless)
const struct glsl_type *type = image_deref->type;
const nir_variable *var = nir_deref_instr_get_variable(image_deref);
dim = glsl_get_sampler_dim(type);
- access = var->data.image.access;
+ access = var->data.access;
is_array = glsl_sampler_type_is_array(type);
}
+ struct waterfall_context wctx;
+ LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
+
struct ac_image_args args = {};
args.cache_policy = get_cache_policy(ctx, access, false, false);
unsigned num_channels = util_last_bit(mask);
LLVMValueRef rsrc, vindex;
- rsrc = get_image_buffer_descriptor(ctx, instr, false, false);
+ rsrc = get_image_buffer_descriptor(ctx, instr, dynamic_index, false, false);
vindex = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, "");
res = ac_trim_vector(&ctx->ac, res, instr->dest.ssa.num_components);
res = ac_to_integer(&ctx->ac, res);
} else {
- args.opcode = ac_image_load;
- args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, false);
- get_image_coords(ctx, instr, &args, dim, is_array);
+ bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
+
+ args.opcode = level_zero ? ac_image_load : ac_image_load_mip;
+ args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, false);
+ get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
args.dim = ac_get_image_dim(ctx->ac.chip_class, dim, is_array);
+ if (!level_zero)
+ args.lod = get_src(ctx, instr->src[3]);
args.dmask = 15;
args.attributes = AC_FUNC_ATTR_READONLY;
res = ac_build_image_opcode(&ctx->ac, &args);
}
- return res;
+ return exit_waterfall(ctx, &wctx, res);
}
static void visit_image_store(struct ac_nir_context *ctx,
- nir_intrinsic_instr *instr,
+ const nir_intrinsic_instr *instr,
bool bindless)
{
-
+ if (ctx->ac.postponed_kill) {
+ LLVMValueRef cond = LLVMBuildLoad(ctx->ac.builder,
+ ctx->ac.postponed_kill, "");
+ ac_build_ifcc(&ctx->ac, cond, 7003);
+ }
enum glsl_sampler_dim dim;
enum gl_access_qualifier access;
bool is_array;
+
if (bindless) {
dim = nir_intrinsic_image_dim(instr);
access = nir_intrinsic_access(instr);
const struct glsl_type *type = image_deref->type;
const nir_variable *var = nir_deref_instr_get_variable(image_deref);
dim = glsl_get_sampler_dim(type);
- access = var->data.image.access;
+ access = var->data.access;
is_array = glsl_sampler_type_is_array(type);
}
+ struct waterfall_context wctx;
+ LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
+
bool writeonly_memory = access & ACCESS_NON_READABLE;
struct ac_image_args args = {};
args.cache_policy = get_cache_policy(ctx, access, true, writeonly_memory);
if (dim == GLSL_SAMPLER_DIM_BUF) {
- LLVMValueRef rsrc = get_image_buffer_descriptor(ctx, instr, true, false);
+ LLVMValueRef rsrc = get_image_buffer_descriptor(ctx, instr, dynamic_index, true, false);
LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[3]));
unsigned src_channels = ac_get_llvm_num_components(src);
LLVMValueRef vindex;
ctx->ac.i32_0, src_channels,
args.cache_policy);
} else {
- args.opcode = ac_image_store;
+ bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
+
+ args.opcode = level_zero ? ac_image_store : ac_image_store_mip;
args.data[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[3]));
- args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, true);
- get_image_coords(ctx, instr, &args, dim, is_array);
+ args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, true);
+ get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
args.dim = ac_get_image_dim(ctx->ac.chip_class, dim, is_array);
+ if (!level_zero)
+ args.lod = get_src(ctx, instr->src[4]);
args.dmask = 15;
ac_build_image_opcode(&ctx->ac, &args);
}
+ exit_waterfall(ctx, &wctx, NULL);
+ if (ctx->ac.postponed_kill)
+ ac_build_endif(&ctx->ac, 7003);
}
static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx,
- const nir_intrinsic_instr *instr,
- bool bindless)
+ const nir_intrinsic_instr *instr,
+ bool bindless)
{
+ if (ctx->ac.postponed_kill) {
+ LLVMValueRef cond = LLVMBuildLoad(ctx->ac.builder,
+ ctx->ac.postponed_kill, "");
+ ac_build_ifcc(&ctx->ac, cond, 7004);
+ }
+
LLVMValueRef params[7];
int param_count = 0;
is_array = glsl_sampler_type_is_array(type);
}
+ struct waterfall_context wctx;
+ LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
+
switch (instr->intrinsic) {
case nir_intrinsic_bindless_image_atomic_add:
case nir_intrinsic_image_deref_atomic_add:
params[param_count++] = get_src(ctx, instr->src[4]);
params[param_count++] = get_src(ctx, instr->src[3]);
+ LLVMValueRef result;
if (dim == GLSL_SAMPLER_DIM_BUF) {
- params[param_count++] = get_image_buffer_descriptor(ctx, instr, true, true);
+ params[param_count++] = get_image_buffer_descriptor(ctx, instr, dynamic_index, true, true);
params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, ""); /* vindex */
params[param_count++] = ctx->ac.i32_0; /* voffset */
}
assert(length < sizeof(intrinsic_name));
- return ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.i32,
- params, param_count, 0);
+ result = ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.i32,
+ params, param_count, 0);
} else {
struct ac_image_args args = {};
args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic;
args.data[0] = params[0];
if (cmpswap)
args.data[1] = params[1];
- args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, true);
- get_image_coords(ctx, instr, &args, dim, is_array);
+ args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, true);
+ get_image_coords(ctx, instr, dynamic_index, &args, dim, is_array);
args.dim = ac_get_image_dim(ctx->ac.chip_class, dim, is_array);
- return ac_build_image_opcode(&ctx->ac, &args);
+ result = ac_build_image_opcode(&ctx->ac, &args);
}
+
+ result = exit_waterfall(ctx, &wctx, result);
+ if (ctx->ac.postponed_kill)
+ ac_build_endif(&ctx->ac, 7004);
+ return result;
}
static LLVMValueRef visit_image_samples(struct ac_nir_context *ctx,
- const nir_intrinsic_instr *instr)
+ nir_intrinsic_instr *instr)
{
- LLVMValueRef rsrc = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, false);
+ struct waterfall_context wctx;
+ LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
+ LLVMValueRef rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, false);
- return ac_build_image_get_sample_count(&ctx->ac, rsrc);
+ LLVMValueRef ret = ac_build_image_get_sample_count(&ctx->ac, rsrc);
+
+ return exit_waterfall(ctx, &wctx, ret);
}
static LLVMValueRef visit_image_size(struct ac_nir_context *ctx,
is_array = glsl_sampler_type_is_array(type);
}
- if (dim == GLSL_SAMPLER_DIM_BUF)
- return get_buffer_size(ctx, get_image_descriptor(ctx, instr, AC_DESC_BUFFER, false), true);
+ struct waterfall_context wctx;
+ LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
- struct ac_image_args args = { 0 };
+ if (dim == GLSL_SAMPLER_DIM_BUF) {
+ res = get_buffer_size(ctx, get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, false), true);
+ } else {
- args.dim = ac_get_image_dim(ctx->ac.chip_class, dim, is_array);
- args.dmask = 0xf;
- args.resource = get_image_descriptor(ctx, instr, AC_DESC_IMAGE, false);
- args.opcode = ac_image_get_resinfo;
- args.lod = ctx->ac.i32_0;
- args.attributes = AC_FUNC_ATTR_READNONE;
+ struct ac_image_args args = { 0 };
- res = ac_build_image_opcode(&ctx->ac, &args);
+ args.dim = ac_get_image_dim(ctx->ac.chip_class, dim, is_array);
+ args.dmask = 0xf;
+ args.resource = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_IMAGE, false);
+ args.opcode = ac_image_get_resinfo;
+ args.lod = ctx->ac.i32_0;
+ args.attributes = AC_FUNC_ATTR_READNONE;
- LLVMValueRef two = LLVMConstInt(ctx->ac.i32, 2, false);
+ res = ac_build_image_opcode(&ctx->ac, &args);
- if (dim == GLSL_SAMPLER_DIM_CUBE && is_array) {
- LLVMValueRef six = LLVMConstInt(ctx->ac.i32, 6, false);
- LLVMValueRef z = LLVMBuildExtractElement(ctx->ac.builder, res, two, "");
- z = LLVMBuildSDiv(ctx->ac.builder, z, six, "");
- res = LLVMBuildInsertElement(ctx->ac.builder, res, z, two, "");
- }
- if (ctx->ac.chip_class == GFX9 && dim == GLSL_SAMPLER_DIM_1D && is_array) {
- LLVMValueRef layers = LLVMBuildExtractElement(ctx->ac.builder, res, two, "");
- res = LLVMBuildInsertElement(ctx->ac.builder, res, layers,
- ctx->ac.i32_1, "");
+ LLVMValueRef two = LLVMConstInt(ctx->ac.i32, 2, false);
+
+ if (dim == GLSL_SAMPLER_DIM_CUBE && is_array) {
+ LLVMValueRef six = LLVMConstInt(ctx->ac.i32, 6, false);
+ LLVMValueRef z = LLVMBuildExtractElement(ctx->ac.builder, res, two, "");
+ z = LLVMBuildSDiv(ctx->ac.builder, z, six, "");
+ res = LLVMBuildInsertElement(ctx->ac.builder, res, z, two, "");
+ }
+ if (ctx->ac.chip_class == GFX9 && dim == GLSL_SAMPLER_DIM_1D && is_array) {
+ LLVMValueRef layers = LLVMBuildExtractElement(ctx->ac.builder, res, two, "");
+ res = LLVMBuildInsertElement(ctx->ac.builder, res, layers,
+ ctx->ac.i32_1, "");
+ }
}
- return res;
+ return exit_waterfall(ctx, &wctx, res);
}
static void emit_membar(struct ac_llvm_context *ac,
case nir_intrinsic_group_memory_barrier:
wait_flags = AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE;
break;
- case nir_intrinsic_memory_barrier_atomic_counter:
case nir_intrinsic_memory_barrier_buffer:
case nir_intrinsic_memory_barrier_image:
wait_flags = AC_WAIT_VLOAD | AC_WAIT_VSTORE;
cond = ctx->ac.i1false;
}
- ctx->abi->emit_kill(ctx->abi, cond);
+ ac_build_kill_if_false(&ctx->ac, cond);
+}
+
+static void emit_demote(struct ac_nir_context *ctx,
+ const nir_intrinsic_instr *instr)
+{
+ LLVMValueRef cond;
+
+ if (instr->intrinsic == nir_intrinsic_demote_if) {
+ cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
+ get_src(ctx, instr->src[0]),
+ ctx->ac.i32_0, "");
+ } else {
+ assert(instr->intrinsic == nir_intrinsic_demote);
+ cond = ctx->ac.i1false;
+ }
+
+ /* Kill immediately while maintaining WQM. */
+ ac_build_kill_if_false(&ctx->ac, ac_build_wqm_vote(&ctx->ac, cond));
+
+ LLVMValueRef mask = LLVMBuildLoad(ctx->ac.builder, ctx->ac.postponed_kill, "");
+ mask = LLVMBuildAnd(ctx->ac.builder, mask, cond, "");
+ LLVMBuildStore(ctx->ac.builder, mask, ctx->ac.postponed_kill);
+ return;
}
static LLVMValueRef
{
LLVMValueRef result;
LLVMValueRef thread_id = ac_get_thread_id(&ctx->ac);
- result = LLVMBuildAnd(ctx->ac.builder, ctx->abi->tg_size,
+ result = LLVMBuildAnd(ctx->ac.builder,
+ ac_get_arg(&ctx->ac, ctx->args->tg_size),
LLVMConstInt(ctx->ac.i32, 0xfc0, false), "");
+ if (ctx->ac.wave_size == 32)
+ result = LLVMBuildLShr(ctx->ac.builder, result,
+ LLVMConstInt(ctx->ac.i32, 1, false), "");
+
return LLVMBuildAdd(ctx->ac.builder, result, thread_id, "");
}
{
if (ctx->stage == MESA_SHADER_COMPUTE) {
LLVMValueRef result;
- result = LLVMBuildAnd(ctx->ac.builder, ctx->abi->tg_size,
+ result = LLVMBuildAnd(ctx->ac.builder,
+ ac_get_arg(&ctx->ac, ctx->args->tg_size),
LLVMConstInt(ctx->ac.i32, 0xfc0, false), "");
return LLVMBuildLShr(ctx->ac.builder, result, LLVMConstInt(ctx->ac.i32, 6, false), "");
} else {
visit_load_num_subgroups(struct ac_nir_context *ctx)
{
if (ctx->stage == MESA_SHADER_COMPUTE) {
- return LLVMBuildAnd(ctx->ac.builder, ctx->abi->tg_size,
+ return LLVMBuildAnd(ctx->ac.builder,
+ ac_get_arg(&ctx->ac, ctx->args->tg_size),
LLVMConstInt(ctx->ac.i32, 0x3f, false), "");
} else {
return LLVMConstInt(ctx->ac.i32, 1, false);
{
LLVMValueRef values[4], derived_ptr, index, ret;
- LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0]);
+ LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0],
+ instr->dest.ssa.bit_size);
for (int chan = 0; chan < instr->num_components; chan++) {
index = LLVMConstInt(ctx->ac.i32, chan, 0);
LLVMValueRef derived_ptr, data,index;
LLVMBuilderRef builder = ctx->ac.builder;
- LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1]);
+ LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[1],
+ instr->src[0].ssa->bit_size);
LLVMValueRef src = get_src(ctx, instr->src[0]);
int writemask = nir_intrinsic_write_mask(instr);
const nir_intrinsic_instr *instr,
LLVMValueRef ptr, int src_idx)
{
+ if (ctx->ac.postponed_kill) {
+ LLVMValueRef cond = LLVMBuildLoad(ctx->ac.builder,
+ ctx->ac.postponed_kill, "");
+ ac_build_ifcc(&ctx->ac, cond, 7005);
+ }
+
LLVMValueRef result;
LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
const char *sync_scope = LLVM_VERSION_MAJOR >= 9 ? "workgroup-one-as" : "workgroup";
+ if (instr->src[0].ssa->parent_instr->type == nir_instr_type_deref) {
+ nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
+ if (deref->mode == nir_var_mem_global) {
+ /* use "singlethread" sync scope to implement relaxed ordering */
+ sync_scope = LLVM_VERSION_MAJOR >= 9 ? "singlethread-one-as" : "singlethread";
+
+ LLVMTypeRef ptr_type = LLVMPointerType(LLVMTypeOf(src), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr)));
+ ptr = LLVMBuildBitCast(ctx->ac.builder, ptr, ptr_type , "");
+ }
+ }
+
if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap ||
instr->intrinsic == nir_intrinsic_deref_atomic_comp_swap) {
LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);
result = ac_build_atomic_rmw(&ctx->ac, op, ptr, ac_to_integer(&ctx->ac, src), sync_scope);
}
+
+ if (ctx->ac.postponed_kill)
+ ac_build_endif(&ctx->ac, 7005);
return result;
}
LLVMValueRef values[2];
LLVMValueRef pos[2];
- pos[0] = ac_to_float(&ctx->ac, ctx->abi->frag_pos[0]);
- pos[1] = ac_to_float(&ctx->ac, ctx->abi->frag_pos[1]);
+ pos[0] = ac_to_float(&ctx->ac,
+ ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]));
+ pos[1] = ac_to_float(&ctx->ac,
+ ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]));
values[0] = ac_build_fract(&ctx->ac, pos[0], 32);
values[1] = ac_build_fract(&ctx->ac, pos[1], 32);
case INTERP_MODE_SMOOTH:
case INTERP_MODE_NONE:
if (location == INTERP_CENTER)
- return ctx->abi->persp_center;
+ return ac_get_arg(&ctx->ac, ctx->args->persp_center);
else if (location == INTERP_CENTROID)
return ctx->abi->persp_centroid;
else if (location == INTERP_SAMPLE)
- return ctx->abi->persp_sample;
+ return ac_get_arg(&ctx->ac, ctx->args->persp_sample);
break;
case INTERP_MODE_NOPERSPECTIVE:
if (location == INTERP_CENTER)
- return ctx->abi->linear_center;
+ return ac_get_arg(&ctx->ac, ctx->args->linear_center);
else if (location == INTERP_CENTROID)
return ctx->abi->linear_centroid;
else if (location == INTERP_SAMPLE)
- return ctx->abi->linear_sample;
+ return ac_get_arg(&ctx->ac, ctx->args->linear_sample);
break;
}
return NULL;
return LLVMBuildBitCast(ctx->ac.builder, interp_param, ctx->ac.v2i32, "");
}
+static LLVMValueRef barycentric_model(struct ac_nir_context *ctx)
+{
+ return LLVMBuildBitCast(ctx->ac.builder,
+ ac_get_arg(&ctx->ac, ctx->args->pull_model),
+ ctx->ac.v3i32, "");
+}
+
static LLVMValueRef load_interpolated_input(struct ac_nir_context *ctx,
LLVMValueRef interp_param,
unsigned index, unsigned comp_start,
LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, comp_start + comp, false);
if (bitsize == 16) {
values[comp] = ac_build_fs_interp_f16(&ctx->ac, llvm_chan, attr_number,
- ctx->abi->prim_mask, i, j);
+ ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j);
} else {
values[comp] = ac_build_fs_interp(&ctx->ac, llvm_chan, attr_number,
- ctx->abi->prim_mask, i, j);
+ ac_get_arg(&ctx->ac, ctx->args->prim_mask), i, j);
}
}
return ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, num_components));
}
-static LLVMValueRef load_flat_input(struct ac_nir_context *ctx,
- unsigned index, unsigned comp_start,
- unsigned num_components,
- unsigned bit_size)
+static LLVMValueRef load_input(struct ac_nir_context *ctx,
+ nir_intrinsic_instr *instr)
{
- LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, index, false);
+ unsigned offset_idx = instr->intrinsic == nir_intrinsic_load_input ? 0 : 1;
+ /* We only lower inputs for fragment shaders ATM */
+ ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[offset_idx]);
+ assert(offset);
+ assert(offset[0].i32 == 0);
+
+ unsigned component = nir_intrinsic_component(instr);
+ unsigned index = nir_intrinsic_base(instr);
+ unsigned vertex_id = 2; /* P0 */
+
+ if (instr->intrinsic == nir_intrinsic_load_input_vertex) {
+ nir_const_value *src0 = nir_src_as_const_value(instr->src[0]);
+
+ switch (src0[0].i32) {
+ case 0:
+ vertex_id = 2;
+ break;
+ case 1:
+ vertex_id = 0;
+ break;
+ case 2:
+ vertex_id = 1;
+ break;
+ default:
+ unreachable("Invalid vertex index");
+ }
+ }
+
+ LLVMValueRef attr_number = LLVMConstInt(ctx->ac.i32, index, false);
LLVMValueRef values[8];
/* Each component of a 64-bit value takes up two GL-level channels. */
+ unsigned num_components = instr->dest.ssa.num_components;
+ unsigned bit_size = instr->dest.ssa.bit_size;
unsigned channels =
bit_size == 64 ? num_components * 2 : num_components;
for (unsigned chan = 0; chan < channels; chan++) {
- if (comp_start + chan > 4)
+ if (component + chan > 4)
attr_number = LLVMConstInt(ctx->ac.i32, index + 1, false);
- LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, (comp_start + chan) % 4, false);
+ LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, (component + chan) % 4, false);
values[chan] = ac_build_fs_interp_mov(&ctx->ac,
- LLVMConstInt(ctx->ac.i32, 2, false),
+ LLVMConstInt(ctx->ac.i32, vertex_id, false),
llvm_chan,
attr_number,
- ctx->abi->prim_mask);
+ ac_get_arg(&ctx->ac, ctx->args->prim_mask));
values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, "");
values[chan] = LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan],
bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
LLVMValueRef values[3];
for (int i = 0; i < 3; i++) {
- values[i] = ctx->abi->workgroup_ids[i] ?
- ctx->abi->workgroup_ids[i] : ctx->ac.i32_0;
+ values[i] = ctx->args->workgroup_ids[i].used ?
+ ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i]) : ctx->ac.i32_0;
}
result = ac_build_gather_values(&ctx->ac, values, 3);
result = ctx->abi->load_local_group_size(ctx->abi);
break;
case nir_intrinsic_load_vertex_id:
- result = LLVMBuildAdd(ctx->ac.builder, ctx->abi->vertex_id,
- ctx->abi->base_vertex, "");
+ result = LLVMBuildAdd(ctx->ac.builder,
+ ac_get_arg(&ctx->ac, ctx->args->vertex_id),
+ ac_get_arg(&ctx->ac, ctx->args->base_vertex), "");
break;
case nir_intrinsic_load_vertex_id_zero_base: {
result = ctx->abi->vertex_id;
break;
}
case nir_intrinsic_load_local_invocation_id: {
- result = ctx->abi->local_invocation_ids;
+ result = ac_get_arg(&ctx->ac, ctx->args->local_invocation_ids);
break;
}
case nir_intrinsic_load_base_instance:
- result = ctx->abi->start_instance;
+ result = ac_get_arg(&ctx->ac, ctx->args->start_instance);
break;
case nir_intrinsic_load_draw_id:
- result = ctx->abi->draw_id;
+ result = ac_get_arg(&ctx->ac, ctx->args->draw_id);
break;
case nir_intrinsic_load_view_index:
- result = ctx->abi->view_index;
+ result = ac_get_arg(&ctx->ac, ctx->args->view_index);
break;
case nir_intrinsic_load_invocation_id:
if (ctx->stage == MESA_SHADER_TESS_CTRL) {
- result = ac_unpack_param(&ctx->ac, ctx->abi->tcs_rel_ids, 8, 5);
+ result = ac_unpack_param(&ctx->ac,
+ ac_get_arg(&ctx->ac, ctx->args->tcs_rel_ids),
+ 8, 5);
} else {
if (ctx->ac.chip_class >= GFX10) {
result = LLVMBuildAnd(ctx->ac.builder,
- ctx->abi->gs_invocation_id,
+ ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id),
LLVMConstInt(ctx->ac.i32, 127, 0), "");
} else {
- result = ctx->abi->gs_invocation_id;
+ result = ac_get_arg(&ctx->ac, ctx->args->gs_invocation_id);
}
}
break;
case nir_intrinsic_load_primitive_id:
if (ctx->stage == MESA_SHADER_GEOMETRY) {
- result = ctx->abi->gs_prim_id;
+ result = ac_get_arg(&ctx->ac, ctx->args->gs_prim_id);
} else if (ctx->stage == MESA_SHADER_TESS_CTRL) {
- result = ctx->abi->tcs_patch_id;
+ result = ac_get_arg(&ctx->ac, ctx->args->tcs_patch_id);
} else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
- result = ctx->abi->tes_patch_id;
+ result = ac_get_arg(&ctx->ac, ctx->args->tes_patch_id);
} else
fprintf(stderr, "Unknown primitive id intrinsic: %d", ctx->stage);
break;
case nir_intrinsic_load_sample_id:
- result = ac_unpack_param(&ctx->ac, ctx->abi->ancillary, 8, 4);
+ result = ac_unpack_param(&ctx->ac,
+ ac_get_arg(&ctx->ac, ctx->args->ancillary),
+ 8, 4);
break;
case nir_intrinsic_load_sample_pos:
result = load_sample_pos(ctx);
break;
case nir_intrinsic_load_frag_coord: {
LLVMValueRef values[4] = {
- ctx->abi->frag_pos[0],
- ctx->abi->frag_pos[1],
- ctx->abi->frag_pos[2],
- ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, ctx->abi->frag_pos[3])
+ ac_get_arg(&ctx->ac, ctx->args->frag_pos[0]),
+ ac_get_arg(&ctx->ac, ctx->args->frag_pos[1]),
+ ac_get_arg(&ctx->ac, ctx->args->frag_pos[2]),
+ ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
+ ac_get_arg(&ctx->ac, ctx->args->frag_pos[3]))
};
result = ac_to_integer(&ctx->ac,
ac_build_gather_values(&ctx->ac, values, 4));
result = ctx->abi->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
break;
case nir_intrinsic_load_front_face:
- result = ctx->abi->front_face;
+ result = ac_get_arg(&ctx->ac, ctx->args->front_face);
break;
case nir_intrinsic_load_helper_invocation:
result = ac_build_load_helper_invocation(&ctx->ac);
break;
+ case nir_intrinsic_is_helper_invocation:
+ result = ac_build_is_helper_invocation(&ctx->ac);
+ break;
case nir_intrinsic_load_color0:
result = ctx->abi->color0;
break;
result = ctx->abi->instance_id;
break;
case nir_intrinsic_load_num_work_groups:
- result = ctx->abi->num_work_groups;
+ result = ac_get_arg(&ctx->ac, ctx->args->num_work_groups);
break;
case nir_intrinsic_load_local_invocation_index:
result = visit_load_local_invocation_index(ctx);
case nir_intrinsic_discard_if:
emit_discard(ctx, instr);
break;
+ case nir_intrinsic_demote:
+ case nir_intrinsic_demote_if:
+ emit_demote(ctx, instr);
+ break;
case nir_intrinsic_memory_barrier:
case nir_intrinsic_group_memory_barrier:
- case nir_intrinsic_memory_barrier_atomic_counter:
case nir_intrinsic_memory_barrier_buffer:
case nir_intrinsic_memory_barrier_image:
case nir_intrinsic_memory_barrier_shared:
emit_membar(&ctx->ac, instr);
break;
- case nir_intrinsic_barrier:
+ case nir_intrinsic_memory_barrier_tcs_patch:
+ break;
+ case nir_intrinsic_control_barrier:
ac_emit_barrier(&ctx->ac, ctx->stage);
break;
case nir_intrinsic_shared_atomic_add:
case nir_intrinsic_shared_atomic_xor:
case nir_intrinsic_shared_atomic_exchange:
case nir_intrinsic_shared_atomic_comp_swap: {
- LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0]);
+ LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0],
+ instr->src[1].ssa->bit_size);
result = visit_var_atomic(ctx, instr, ptr, 1);
break;
}
case nir_intrinsic_load_barycentric_sample:
result = barycentric_sample(ctx, nir_intrinsic_interp_mode(instr));
break;
+ case nir_intrinsic_load_barycentric_model:
+ result = barycentric_model(ctx);
+ break;
case nir_intrinsic_load_barycentric_at_offset: {
LLVMValueRef offset = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
result = barycentric_offset(ctx, nir_intrinsic_interp_mode(instr), offset);
instr->dest.ssa.bit_size);
break;
}
- case nir_intrinsic_load_input: {
- /* We only lower inputs for fragment shaders ATM */
- ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[0]);
- assert(offset);
- assert(offset[0].i32 == 0);
-
- unsigned index = nir_intrinsic_base(instr);
- unsigned component = nir_intrinsic_component(instr);
- result = load_flat_input(ctx, index, component,
- instr->dest.ssa.num_components,
- instr->dest.ssa.bit_size);
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_input_vertex:
+ result = load_input(ctx, instr);
break;
- }
case nir_intrinsic_emit_vertex:
ctx->abi->emit_vertex(ctx->abi, nir_intrinsic_stream_id(instr), ctx->abi->outputs);
break;
break;
}
case nir_intrinsic_shuffle:
- result = ac_build_shuffle(&ctx->ac, get_src(ctx, instr->src[0]),
- get_src(ctx, instr->src[1]));
+ if (ctx->ac.chip_class == GFX8 ||
+ ctx->ac.chip_class == GFX9 ||
+ (ctx->ac.chip_class == GFX10 && ctx->ac.wave_size == 32)) {
+ result = ac_build_shuffle(&ctx->ac, get_src(ctx, instr->src[0]),
+ get_src(ctx, instr->src[1]));
+ } else {
+ LLVMValueRef src = get_src(ctx, instr->src[0]);
+ LLVMValueRef index = get_src(ctx, instr->src[1]);
+ LLVMTypeRef type = LLVMTypeOf(src);
+ struct waterfall_context wctx;
+ LLVMValueRef index_val;
+
+ index_val = enter_waterfall(ctx, &wctx, index, true);
+
+ src = LLVMBuildZExt(ctx->ac.builder, src,
+ ctx->ac.i32, "");
+
+ result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.readlane",
+ ctx->ac.i32,
+ (LLVMValueRef []) { src, index_val }, 2,
+ AC_FUNC_ATTR_READNONE |
+ AC_FUNC_ATTR_CONVERGENT);
+
+ result = LLVMBuildTrunc(ctx->ac.builder, result, type, "");
+
+ result = exit_waterfall(ctx, &wctx, result);
+ }
break;
case nir_intrinsic_reduce:
result = ac_build_reduce(&ctx->ac,
break;
}
case nir_intrinsic_load_constant: {
+ unsigned base = nir_intrinsic_base(instr);
+ unsigned range = nir_intrinsic_range(instr);
+
LLVMValueRef offset = get_src(ctx, instr->src[0]);
- LLVMValueRef base = LLVMConstInt(ctx->ac.i32,
- nir_intrinsic_base(instr),
- false);
- offset = LLVMBuildAdd(ctx->ac.builder, offset, base, "");
+ offset = LLVMBuildAdd(ctx->ac.builder, offset,
+ LLVMConstInt(ctx->ac.i32, base, false), "");
+
+ /* Clamp the offset to avoid out-of-bound access because global
+ * instructions can't handle them.
+ */
+ LLVMValueRef size = LLVMConstInt(ctx->ac.i32, base + range, false);
+ LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
+ offset, size, "");
+ offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, "");
+
LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data,
offset);
LLVMTypeRef comp_type =
return LLVMBuildBitCast(ctx->ac.builder, ret, ctx->ac.i32, "");
}
-static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
- nir_deref_instr *deref_instr,
- enum ac_descriptor_type desc_type,
- const nir_instr *instr,
- bool image, bool write)
+struct sampler_desc_address {
+ unsigned descriptor_set;
+ unsigned base_index; /* binding in vulkan */
+ unsigned constant_index;
+ LLVMValueRef dynamic_index;
+ bool image;
+ bool bindless;
+};
+
+static struct sampler_desc_address
+get_sampler_desc_internal(struct ac_nir_context *ctx,
+ nir_deref_instr *deref_instr,
+ const nir_instr *instr,
+ bool image)
{
LLVMValueRef index = NULL;
unsigned constant_index = 0;
} else
base_index = deref_instr->var->data.binding;
}
+ return (struct sampler_desc_address) {
+ .descriptor_set = descriptor_set,
+ .base_index = base_index,
+ .constant_index = constant_index,
+ .dynamic_index = index,
+ .image = image,
+ .bindless = bindless,
+ };
+}
+
+/* Extract any possibly divergent index into a separate value that can be fed
+ * into get_sampler_desc with the same arguments. */
+static LLVMValueRef get_sampler_desc_index(struct ac_nir_context *ctx,
+ nir_deref_instr *deref_instr,
+ const nir_instr *instr,
+ bool image)
+{
+ struct sampler_desc_address addr = get_sampler_desc_internal(ctx, deref_instr, instr, image);
+ return addr.dynamic_index;
+}
+static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
+ nir_deref_instr *deref_instr,
+ enum ac_descriptor_type desc_type,
+ const nir_instr *instr,
+ LLVMValueRef index,
+ bool image, bool write)
+{
+ struct sampler_desc_address addr = get_sampler_desc_internal(ctx, deref_instr, instr, image);
return ctx->abi->load_sampler_desc(ctx->abi,
- descriptor_set,
- base_index,
- constant_index, index,
- desc_type, image, write, bindless);
+ addr.descriptor_set,
+ addr.base_index,
+ addr.constant_index, index,
+ desc_type, addr.image, write, addr.bindless);
}
/* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
static void tex_fetch_ptrs(struct ac_nir_context *ctx,
nir_tex_instr *instr,
+ struct waterfall_context *wctx,
LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr,
LLVMValueRef *fmask_ptr)
{
}
}
+ LLVMValueRef texture_dynamic_index = get_sampler_desc_index(ctx, texture_deref_instr,
+ &instr->instr, false);
if (!sampler_deref_instr)
sampler_deref_instr = texture_deref_instr;
+ LLVMValueRef sampler_dynamic_index = get_sampler_desc_index(ctx, sampler_deref_instr,
+ &instr->instr, false);
+ if (instr->texture_non_uniform)
+ texture_dynamic_index = enter_waterfall(ctx, wctx + 0, texture_dynamic_index, true);
+
+ if (instr->sampler_non_uniform)
+ sampler_dynamic_index = enter_waterfall(ctx, wctx + 1, sampler_dynamic_index, true);
+
enum ac_descriptor_type main_descriptor = instr->sampler_dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE;
if (plane >= 0) {
main_descriptor = AC_DESC_PLANE_0 + plane;
}
- *res_ptr = get_sampler_desc(ctx, texture_deref_instr, main_descriptor, &instr->instr, false, false);
+ if (instr->op == nir_texop_fragment_mask_fetch) {
+ /* The fragment mask is fetched from the compressed
+ * multisampled surface.
+ */
+ main_descriptor = AC_DESC_FMASK;
+ }
+
+ *res_ptr = get_sampler_desc(ctx, texture_deref_instr, main_descriptor, &instr->instr,
+ texture_dynamic_index, false, false);
if (samp_ptr) {
- *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, AC_DESC_SAMPLER, &instr->instr, false, false);
+ *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, AC_DESC_SAMPLER, &instr->instr,
+ sampler_dynamic_index, false, false);
if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
*samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
}
if (fmask_ptr && (instr->op == nir_texop_txf_ms ||
instr->op == nir_texop_samples_identical))
- *fmask_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_FMASK, &instr->instr, false, false);
+ *fmask_ptr = get_sampler_desc(ctx, texture_deref_instr, AC_DESC_FMASK,
+ &instr->instr, texture_dynamic_index, false, false);
}
static LLVMValueRef apply_round_slice(struct ac_llvm_context *ctx,
LLVMValueRef fmask_ptr = NULL, sample_index = NULL;
LLVMValueRef ddx = NULL, ddy = NULL;
unsigned offset_src = 0;
+ struct waterfall_context wctx[2] = {{{0}}};
- tex_fetch_ptrs(ctx, instr, &args.resource, &args.sampler, &fmask_ptr);
+ tex_fetch_ptrs(ctx, instr, wctx, &args.resource, &args.sampler, &fmask_ptr);
for (unsigned i = 0; i < instr->num_srcs; i++) {
switch (instr->src[i].src_type) {
instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS ||
instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
instr->is_array &&
- instr->op != nir_texop_txf && instr->op != nir_texop_txf_ms) {
+ instr->op != nir_texop_txf &&
+ instr->op != nir_texop_txf_ms &&
+ instr->op != nir_texop_fragment_fetch &&
+ instr->op != nir_texop_fragment_mask_fetch) {
args.coords[2] = apply_round_slice(&ctx->ac, args.coords[2]);
}
}
/* Pack sample index */
- if (instr->op == nir_texop_txf_ms && sample_index)
+ if (sample_index && (instr->op == nir_texop_txf_ms ||
+ instr->op == nir_texop_fragment_fetch))
args.coords[instr->coord_components] = sample_index;
if (instr->op == nir_texop_samples_identical) {
if ((instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS ||
instr->sampler_dim == GLSL_SAMPLER_DIM_MS) &&
- instr->op != nir_texop_txs) {
+ instr->op != nir_texop_txs &&
+ instr->op != nir_texop_fragment_fetch &&
+ instr->op != nir_texop_fragment_mask_fetch) {
unsigned sample_chan = instr->is_array ? 3 : 2;
args.coords[sample_chan] = adjust_sample_index_using_fmask(
&ctx->ac, args.coords[0], args.coords[1],
args.dim = ac_get_sampler_dim(ctx->ac.chip_class, instr->sampler_dim, instr->is_array);
args.unorm = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
}
+
+ /* Adjust the number of coordinates because we only need (x,y) for 2D
+ * multisampled images and (x,y,layer) for 2D multisampled layered
+ * images or for multisampled input attachments.
+ */
+ if (instr->op == nir_texop_fragment_mask_fetch) {
+ if (args.dim == ac_image_2dmsaa) {
+ args.dim = ac_image_2d;
+ } else {
+ assert(args.dim == ac_image_2darraymsaa);
+ args.dim = ac_image_2darray;
+ }
+ }
+
result = build_tex_intrinsic(ctx, instr, &args);
if (instr->op == nir_texop_query_levels)
if (result) {
assert(instr->dest.is_ssa);
result = ac_to_integer(&ctx->ac, result);
+
+ for (int i = ARRAY_SIZE(wctx); --i >= 0;) {
+ result = exit_waterfall(ctx, wctx + i, result);
+ }
+
ctx->ssa_defs[instr->dest.ssa.index] = result;
}
}
-
static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
{
LLVMTypeRef type = get_def_type(ctx, &instr->dest.ssa);
setup_shared(struct ac_nir_context *ctx,
struct nir_shader *nir)
{
- nir_foreach_variable(variable, &nir->shared) {
- LLVMValueRef shared =
- LLVMAddGlobalInAddressSpace(
- ctx->ac.module, glsl_to_llvm_type(&ctx->ac, variable->type),
- variable->name ? variable->name : "",
- AC_ADDR_SPACE_LDS);
- _mesa_hash_table_insert(ctx->vars, variable, shared);
- }
+ if (ctx->ac.lds)
+ return;
+
+ LLVMTypeRef type = LLVMArrayType(ctx->ac.i8,
+ nir->info.cs.shared_size);
+
+ LLVMValueRef lds =
+ LLVMAddGlobalInAddressSpace(ctx->ac.module, type,
+ "compute_lds",
+ AC_ADDR_SPACE_LDS);
+ LLVMSetAlignment(lds, 64 * 1024);
+
+ ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, lds,
+ LLVMPointerType(ctx->ac.i8,
+ AC_ADDR_SPACE_LDS), "");
}
void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
- struct nir_shader *nir)
+ const struct ac_shader_args *args, struct nir_shader *nir)
{
struct ac_nir_context ctx = {};
struct nir_function *func;
ctx.ac = *ac;
ctx.abi = abi;
+ ctx.args = args;
ctx.stage = nir->info.stage;
ctx.info = &nir->info;
if (gl_shader_stage_is_compute(nir->info.stage))
setup_shared(&ctx, nir);
+ if (nir->info.stage == MESA_SHADER_FRAGMENT && nir->info.fs.uses_demote) {
+ ctx.ac.postponed_kill = ac_build_alloca_undef(&ctx.ac, ac->i1, "");
+ /* true = don't kill. */
+ LLVMBuildStore(ctx.ac.builder, ctx.ac.i1true, ctx.ac.postponed_kill);
+ }
+
visit_cf_list(&ctx, &func->impl->body);
phi_post_pass(&ctx);
+ if (ctx.ac.postponed_kill)
+ ac_build_kill_if_false(&ctx.ac, LLVMBuildLoad(ctx.ac.builder,
+ ctx.ac.postponed_kill, ""));
+
if (!gl_shader_stage_is_compute(nir->info.stage))
ctx.abi->emit_outputs(ctx.abi, AC_LLVM_MAX_OUTPUTS,
ctx.abi->outputs);
ralloc_free(ctx.vars);
}
-void
+bool
ac_lower_indirect_derefs(struct nir_shader *nir, enum chip_class chip_class)
{
+ bool progress = false;
+
/* Lower large variables to scratch first so that we won't bloat the
* shader by generating large if ladders for them. We later lower
* scratch to alloca's, assuming LLVM won't generate VGPR indexing.
*/
- NIR_PASS_V(nir, nir_lower_vars_to_scratch,
- nir_var_function_temp,
- 256,
- glsl_get_natural_size_align_bytes);
+ NIR_PASS(progress, nir, nir_lower_vars_to_scratch,
+ nir_var_function_temp,
+ 256,
+ glsl_get_natural_size_align_bytes);
/* While it would be nice not to have this flag, we are constrained
* by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
*/
indirect_mask |= nir_var_function_temp;
- nir_lower_indirect_derefs(nir, indirect_mask);
+ progress |= nir_lower_indirect_derefs(nir, indirect_mask);
+ return progress;
}
static unsigned
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if (intrin->intrinsic == nir_intrinsic_barrier) {
+ if (intrin->intrinsic == nir_intrinsic_control_barrier) {
/* If we find a barrier in nested control flow put this in the
* too hard basket. In GLSL this is not possible but it is in