struct si_shader_output_values
{
LLVMValueRef values[4];
- unsigned name;
- unsigned sid;
+ unsigned semantic_name;
+ unsigned semantic_index;
+ ubyte vertex_stream[4];
};
static void si_init_shader_ctx(struct si_shader_context *ctx,
LOCAL_ADDR_SPACE = 3,
};
-#define SENDMSG_GS 2
-#define SENDMSG_GS_DONE 3
-
-#define SENDMSG_GS_OP_NOP (0 << 4)
-#define SENDMSG_GS_OP_CUT (1 << 4)
-#define SENDMSG_GS_OP_EMIT (2 << 4)
-#define SENDMSG_GS_OP_EMIT_CUT (3 << 4)
-
/**
* Returns a unique index for a semantic name and index. The index must be
* less than 64, so that a 64-bit bitmask of used inputs or outputs can be
case TGSI_SEMANTIC_GENERIC:
if (index <= 63-4)
return 4 + index;
- else
- /* same explanation as in the default statement,
- * the only user hitting this is st/nine.
- */
- return 0;
+
+ assert(!"invalid generic index");
+ return 0;
/* patch indices are completely separate and thus start from 0 */
case TGSI_SEMANTIC_TESSOUTER:
return 2 + index;
default:
- /* Don't fail here. The result of this function is only used
- * for LS, TCS, TES, and GS, where legacy GL semantics can't
- * occur, but this function is called for all vertex shaders
- * before it's known whether LS will be compiled or not.
- */
+ assert(!"invalid semantic name");
+ return 0;
+ }
+}
+
+unsigned si_shader_io_get_unique_index2(unsigned name, unsigned index)
+{
+ switch (name) {
+ case TGSI_SEMANTIC_FOG:
+ return 0;
+ case TGSI_SEMANTIC_LAYER:
+ return 1;
+ case TGSI_SEMANTIC_VIEWPORT_INDEX:
+ return 2;
+ case TGSI_SEMANTIC_PRIMID:
+ return 3;
+ case TGSI_SEMANTIC_COLOR: /* these alias */
+ case TGSI_SEMANTIC_BCOLOR:
+ return 4 + index;
+ case TGSI_SEMANTIC_TEXCOORD:
+ return 6 + index;
+ default:
+ assert(!"invalid semantic name");
return 0;
}
}
param);
if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
- value = bitcast(&ctx->soa.bld_base,
+ value = bitcast(&ctx->bld_base,
TGSI_TYPE_UNSIGNED, value);
if (rshift)
static LLVMValueRef
get_tcs_out_patch0_offset(struct si_shader_context *ctx)
{
- return lp_build_mul_imm(&ctx->soa.bld_base.uint_bld,
+ return lp_build_mul_imm(&ctx->bld_base.uint_bld,
unpack_param(ctx,
SI_PARAM_TCS_OUT_OFFSETS,
0, 16),
static LLVMValueRef
get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
{
- return lp_build_mul_imm(&ctx->soa.bld_base.uint_bld,
+ return lp_build_mul_imm(&ctx->bld_base.uint_bld,
unpack_param(ctx,
SI_PARAM_TCS_OUT_OFFSETS,
16, 16),
"");
}
-static LLVMValueRef build_gep0(struct si_shader_context *ctx,
- LLVMValueRef base_ptr, LLVMValueRef index)
-{
- LLVMValueRef indices[2] = {
- LLVMConstInt(ctx->i32, 0, 0),
- index,
- };
- return LLVMBuildGEP(ctx->gallivm.builder, base_ptr,
- indices, 2, "");
-}
-
-static void build_indexed_store(struct si_shader_context *ctx,
- LLVMValueRef base_ptr, LLVMValueRef index,
- LLVMValueRef value)
-{
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
- struct gallivm_state *gallivm = bld_base->base.gallivm;
-
- LLVMBuildStore(gallivm->builder, value,
- build_gep0(ctx, base_ptr, index));
-}
-
-/**
- * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
- * It's equivalent to doing a load from &base_ptr[index].
- *
- * \param base_ptr Where the array starts.
- * \param index The element index into the array.
- * \param uniform Whether the base_ptr and index can be assumed to be
- * dynamically uniform
- */
-static LLVMValueRef build_indexed_load(struct si_shader_context *ctx,
- LLVMValueRef base_ptr, LLVMValueRef index,
- bool uniform)
-{
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
- struct gallivm_state *gallivm = bld_base->base.gallivm;
- LLVMValueRef pointer;
-
- pointer = build_gep0(ctx, base_ptr, index);
- if (uniform)
- LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
- return LLVMBuildLoad(gallivm->builder, pointer, "");
-}
-
-/**
- * Do a load from &base_ptr[index], but also add a flag that it's loading
- * a constant from a dynamically uniform index.
- */
-static LLVMValueRef build_indexed_load_const(
- struct si_shader_context *ctx,
- LLVMValueRef base_ptr, LLVMValueRef index)
-{
- LLVMValueRef result = build_indexed_load(ctx, base_ptr, index, true);
- LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
- return result;
-}
-
static LLVMValueRef get_instance_index_for_fetch(
struct si_shader_context *radeon_bld,
unsigned param_start_instance, unsigned divisor)
{
struct si_shader_context *ctx =
- si_shader_context(&radeon_bld->soa.bld_base);
- struct gallivm_state *gallivm = radeon_bld->soa.bld_base.base.gallivm;
+ si_shader_context(&radeon_bld->bld_base);
+ struct gallivm_state *gallivm = radeon_bld->bld_base.base.gallivm;
LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
ctx->param_instance_id);
LLVMGetParam(radeon_bld->main_fn, param_start_instance), "");
}
+/* Bitcast <4 x float> to <2 x double>, extract the component, and convert
+ * to float. */
+static LLVMValueRef extract_double_to_float(struct si_shader_context *ctx,
+ LLVMValueRef vec4,
+ unsigned double_index)
+{
+ LLVMBuilderRef builder = ctx->gallivm.builder;
+ LLVMTypeRef f64 = LLVMDoubleTypeInContext(ctx->gallivm.context);
+ LLVMValueRef dvec2 = LLVMBuildBitCast(builder, vec4,
+ LLVMVectorType(f64, 2), "");
+ LLVMValueRef index = LLVMConstInt(ctx->i32, double_index, 0);
+ LLVMValueRef value = LLVMBuildExtractElement(builder, dvec2, index, "");
+ return LLVMBuildFPTrunc(builder, value, ctx->f32, "");
+}
+
static void declare_input_vs(
struct si_shader_context *ctx,
unsigned input_index,
const struct tgsi_full_declaration *decl,
LLVMValueRef out[4])
{
- struct lp_build_context *base = &ctx->soa.bld_base.base;
+ struct lp_build_context *base = &ctx->bld_base.base;
struct gallivm_state *gallivm = base->gallivm;
unsigned chan;
unsigned fix_fetch;
+ unsigned num_fetches;
+ unsigned fetch_stride;
LLVMValueRef t_list_ptr;
LLVMValueRef t_offset;
LLVMValueRef t_list;
- LLVMValueRef attribute_offset;
- LLVMValueRef buffer_index;
+ LLVMValueRef vertex_index;
LLVMValueRef args[3];
- LLVMValueRef input;
+ LLVMValueRef input[3];
/* Load the T list */
t_list_ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_VERTEX_BUFFERS);
t_offset = lp_build_const_int32(gallivm, input_index);
- t_list = build_indexed_load_const(ctx, t_list_ptr, t_offset);
+ t_list = ac_build_indexed_load_const(&ctx->ac, t_list_ptr, t_offset);
- /* Build the attribute offset */
- attribute_offset = lp_build_const_int32(gallivm, 0);
-
- buffer_index = LLVMGetParam(ctx->main_fn,
+ vertex_index = LLVMGetParam(ctx->main_fn,
ctx->param_vertex_index0 +
input_index);
+ fix_fetch = ctx->shader->key.mono.vs.fix_fetch[input_index];
+
+ /* Do multiple loads for special formats. */
+ switch (fix_fetch) {
+ case SI_FIX_FETCH_RGB_64_FLOAT:
+ num_fetches = 3; /* 3 2-dword loads */
+ fetch_stride = 8;
+ break;
+ case SI_FIX_FETCH_RGBA_64_FLOAT:
+ num_fetches = 2; /* 2 4-dword loads */
+ fetch_stride = 16;
+ break;
+ case SI_FIX_FETCH_RGB_8:
+ case SI_FIX_FETCH_RGB_8_INT:
+ num_fetches = 3;
+ fetch_stride = 1;
+ break;
+ case SI_FIX_FETCH_RGB_16:
+ case SI_FIX_FETCH_RGB_16_INT:
+ num_fetches = 3;
+ fetch_stride = 2;
+ break;
+ default:
+ num_fetches = 1;
+ fetch_stride = 0;
+ }
+
args[0] = t_list;
- args[1] = attribute_offset;
- args[2] = buffer_index;
- input = lp_build_intrinsic(gallivm->builder,
- "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
- LP_FUNC_ATTR_READNONE);
+ args[2] = vertex_index;
+
+ for (unsigned i = 0; i < num_fetches; i++) {
+ args[1] = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
+
+ input[i] = lp_build_intrinsic(gallivm->builder,
+ "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
+ LP_FUNC_ATTR_READNONE);
+ }
/* Break up the vec4 into individual components */
for (chan = 0; chan < 4; chan++) {
LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
out[chan] = LLVMBuildExtractElement(gallivm->builder,
- input, llvm_chan, "");
+ input[0], llvm_chan, "");
}
- fix_fetch = (ctx->shader->key.mono.vs.fix_fetch >> (2 * input_index)) & 3;
- if (fix_fetch) {
+ switch (fix_fetch) {
+ case SI_FIX_FETCH_A2_SNORM:
+ case SI_FIX_FETCH_A2_SSCALED:
+ case SI_FIX_FETCH_A2_SINT: {
/* The hardware returns an unsigned value; convert it to a
* signed one.
*/
}
out[3] = tmp;
+ break;
+ }
+ case SI_FIX_FETCH_RGBA_32_UNORM:
+ case SI_FIX_FETCH_RGBX_32_UNORM:
+ for (chan = 0; chan < 4; chan++) {
+ out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
+ ctx->i32, "");
+ out[chan] = LLVMBuildUIToFP(gallivm->builder,
+ out[chan], ctx->f32, "");
+ out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
+ LLVMConstReal(ctx->f32, 1.0 / UINT_MAX), "");
+ }
+ /* RGBX UINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
+ if (fix_fetch == SI_FIX_FETCH_RGBX_32_UNORM)
+ out[3] = LLVMConstReal(ctx->f32, 1);
+ break;
+ case SI_FIX_FETCH_RGBA_32_SNORM:
+ case SI_FIX_FETCH_RGBX_32_SNORM:
+ case SI_FIX_FETCH_RGBA_32_FIXED:
+ case SI_FIX_FETCH_RGBX_32_FIXED: {
+ double scale;
+ if (fix_fetch >= SI_FIX_FETCH_RGBA_32_FIXED)
+ scale = 1.0 / 0x10000;
+ else
+ scale = 1.0 / INT_MAX;
+
+ for (chan = 0; chan < 4; chan++) {
+ out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
+ ctx->i32, "");
+ out[chan] = LLVMBuildSIToFP(gallivm->builder,
+ out[chan], ctx->f32, "");
+ out[chan] = LLVMBuildFMul(gallivm->builder, out[chan],
+ LLVMConstReal(ctx->f32, scale), "");
+ }
+ /* RGBX SINT returns 1 in alpha, which would be rounded to 0 by normalizing. */
+ if (fix_fetch == SI_FIX_FETCH_RGBX_32_SNORM ||
+ fix_fetch == SI_FIX_FETCH_RGBX_32_FIXED)
+ out[3] = LLVMConstReal(ctx->f32, 1);
+ break;
+ }
+ case SI_FIX_FETCH_RGBA_32_USCALED:
+ for (chan = 0; chan < 4; chan++) {
+ out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
+ ctx->i32, "");
+ out[chan] = LLVMBuildUIToFP(gallivm->builder,
+ out[chan], ctx->f32, "");
+ }
+ break;
+ case SI_FIX_FETCH_RGBA_32_SSCALED:
+ for (chan = 0; chan < 4; chan++) {
+ out[chan] = LLVMBuildBitCast(gallivm->builder, out[chan],
+ ctx->i32, "");
+ out[chan] = LLVMBuildSIToFP(gallivm->builder,
+ out[chan], ctx->f32, "");
+ }
+ break;
+ case SI_FIX_FETCH_RG_64_FLOAT:
+ for (chan = 0; chan < 2; chan++)
+ out[chan] = extract_double_to_float(ctx, input[0], chan);
+
+ out[2] = LLVMConstReal(ctx->f32, 0);
+ out[3] = LLVMConstReal(ctx->f32, 1);
+ break;
+ case SI_FIX_FETCH_RGB_64_FLOAT:
+ for (chan = 0; chan < 3; chan++)
+ out[chan] = extract_double_to_float(ctx, input[chan], 0);
+
+ out[3] = LLVMConstReal(ctx->f32, 1);
+ break;
+ case SI_FIX_FETCH_RGBA_64_FLOAT:
+ for (chan = 0; chan < 4; chan++) {
+ out[chan] = extract_double_to_float(ctx, input[chan / 2],
+ chan % 2);
+ }
+ break;
+ case SI_FIX_FETCH_RGB_8:
+ case SI_FIX_FETCH_RGB_8_INT:
+ case SI_FIX_FETCH_RGB_16:
+ case SI_FIX_FETCH_RGB_16_INT:
+ for (chan = 0; chan < 3; chan++) {
+ out[chan] = LLVMBuildExtractElement(gallivm->builder,
+ input[chan],
+ ctx->i32_0, "");
+ }
+ if (fix_fetch == SI_FIX_FETCH_RGB_8 ||
+ fix_fetch == SI_FIX_FETCH_RGB_16) {
+ out[3] = LLVMConstReal(ctx->f32, 1);
+ } else {
+ out[3] = LLVMBuildBitCast(gallivm->builder, ctx->i32_1,
+ ctx->f32, "");
+ }
+ break;
}
}
const struct tgsi_ind_register *ind,
int rel_index)
{
- struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
+ struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
LLVMValueRef result;
- result = ctx->soa.addr[ind->Index][ind->Swizzle];
+ result = ctx->addrs[ind->Index][ind->Swizzle];
result = LLVMBuildLoad(gallivm->builder, result, "");
result = LLVMBuildAdd(gallivm->builder, result,
lp_build_const_int32(gallivm, rel_index), "");
LLVMValueRef vertex_dw_stride,
LLVMValueRef base_addr)
{
- struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
+ struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
struct tgsi_shader_info *info = &ctx->shader->selector->info;
ubyte *name, *index, *array_first;
int first, param;
LLVMValueRef vertex_index,
LLVMValueRef param_index)
{
- struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
+ struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
LLVMValueRef param_stride, constant16;
const struct tgsi_full_dst_register *dst,
const struct tgsi_full_src_register *src)
{
- struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
+ struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
struct tgsi_shader_info *info = &ctx->shader->selector->info;
ubyte *name, *index, *array_first;
struct tgsi_full_src_register reg;
return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
}
-/* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
- * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
- * or v4i32 (num_channels=3,4). */
-static void build_tbuffer_store(struct si_shader_context *ctx,
- LLVMValueRef rsrc,
- LLVMValueRef vdata,
- unsigned num_channels,
- LLVMValueRef vaddr,
- LLVMValueRef soffset,
- unsigned inst_offset,
- unsigned dfmt,
- unsigned nfmt,
- unsigned offen,
- unsigned idxen,
- unsigned glc,
- unsigned slc,
- unsigned tfe)
-{
- struct gallivm_state *gallivm = &ctx->gallivm;
- LLVMValueRef args[] = {
- rsrc,
- vdata,
- LLVMConstInt(ctx->i32, num_channels, 0),
- vaddr,
- soffset,
- LLVMConstInt(ctx->i32, inst_offset, 0),
- LLVMConstInt(ctx->i32, dfmt, 0),
- LLVMConstInt(ctx->i32, nfmt, 0),
- LLVMConstInt(ctx->i32, offen, 0),
- LLVMConstInt(ctx->i32, idxen, 0),
- LLVMConstInt(ctx->i32, glc, 0),
- LLVMConstInt(ctx->i32, slc, 0),
- LLVMConstInt(ctx->i32, tfe, 0)
- };
-
- /* The instruction offset field has 12 bits */
- assert(offen || inst_offset < (1 << 12));
-
- /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
- unsigned func = CLAMP(num_channels, 1, 3) - 1;
- const char *types[] = {"i32", "v2i32", "v4i32"};
- char name[256];
- snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
-
- lp_build_intrinsic(gallivm->builder, name, ctx->voidt,
- args, ARRAY_SIZE(args), 0);
-}
-
-static void build_tbuffer_store_dwords(struct si_shader_context *ctx,
- LLVMValueRef rsrc,
- LLVMValueRef vdata,
- unsigned num_channels,
- LLVMValueRef vaddr,
- LLVMValueRef soffset,
- unsigned inst_offset)
-{
- static unsigned dfmt[] = {
- V_008F0C_BUF_DATA_FORMAT_32,
- V_008F0C_BUF_DATA_FORMAT_32_32,
- V_008F0C_BUF_DATA_FORMAT_32_32_32,
- V_008F0C_BUF_DATA_FORMAT_32_32_32_32
- };
- assert(num_channels >= 1 && num_channels <= 4);
-
- build_tbuffer_store(ctx, rsrc, vdata, num_channels, vaddr, soffset,
- inst_offset, dfmt[num_channels-1],
- V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
-}
-
-static LLVMValueRef build_buffer_load(struct si_shader_context *ctx,
- LLVMValueRef rsrc,
- int num_channels,
- LLVMValueRef vindex,
- LLVMValueRef voffset,
- LLVMValueRef soffset,
- unsigned inst_offset,
- unsigned glc,
- unsigned slc)
-{
- struct gallivm_state *gallivm = &ctx->gallivm;
- unsigned func = CLAMP(num_channels, 1, 3) - 1;
-
- if (HAVE_LLVM >= 0x309) {
- LLVMValueRef args[] = {
- LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, ""),
- vindex ? vindex : LLVMConstInt(ctx->i32, 0, 0),
- LLVMConstInt(ctx->i32, inst_offset, 0),
- LLVMConstInt(ctx->i1, glc, 0),
- LLVMConstInt(ctx->i1, slc, 0)
- };
-
- LLVMTypeRef types[] = {ctx->f32, LLVMVectorType(ctx->f32, 2),
- ctx->v4f32};
- const char *type_names[] = {"f32", "v2f32", "v4f32"};
- char name[256];
-
- if (voffset) {
- args[2] = LLVMBuildAdd(gallivm->builder, args[2], voffset,
- "");
- }
-
- if (soffset) {
- args[2] = LLVMBuildAdd(gallivm->builder, args[2], soffset,
- "");
- }
-
- snprintf(name, sizeof(name), "llvm.amdgcn.buffer.load.%s",
- type_names[func]);
-
- return lp_build_intrinsic(gallivm->builder, name, types[func], args,
- ARRAY_SIZE(args), LP_FUNC_ATTR_READONLY);
- } else {
- LLVMValueRef args[] = {
- LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v16i8, ""),
- voffset ? voffset : vindex,
- soffset,
- LLVMConstInt(ctx->i32, inst_offset, 0),
- LLVMConstInt(ctx->i32, voffset ? 1 : 0, 0), // offen
- LLVMConstInt(ctx->i32, vindex ? 1 : 0, 0), //idxen
- LLVMConstInt(ctx->i32, glc, 0),
- LLVMConstInt(ctx->i32, slc, 0),
- LLVMConstInt(ctx->i32, 0, 0), // TFE
- };
-
- LLVMTypeRef types[] = {ctx->i32, LLVMVectorType(ctx->i32, 2),
- ctx->v4i32};
- const char *type_names[] = {"i32", "v2i32", "v4i32"};
- const char *arg_type = "i32";
- char name[256];
-
- if (voffset && vindex) {
- LLVMValueRef vaddr[] = {vindex, voffset};
-
- arg_type = "v2i32";
- args[1] = lp_build_gather_values(gallivm, vaddr, 2);
- }
-
- snprintf(name, sizeof(name), "llvm.SI.buffer.load.dword.%s.%s",
- type_names[func], arg_type);
-
- return lp_build_intrinsic(gallivm->builder, name, types[func], args,
- ARRAY_SIZE(args), LP_FUNC_ATTR_READONLY);
- }
-}
-
static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
enum tgsi_opcode_type type, unsigned swizzle,
LLVMValueRef buffer, LLVMValueRef offset,
LLVMTypeRef vec_type = LLVMVectorType(llvm_type, 4);
if (swizzle == ~0) {
- value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
- 0, 1, 0);
+ value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
+ 0, 1, 0);
return LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
}
if (!tgsi_type_is_64bit(type)) {
- value = build_buffer_load(ctx, buffer, 4, NULL, base, offset,
- 0, 1, 0);
+ value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
+ 0, 1, 0);
value = LLVMBuildBitCast(gallivm->builder, value, vec_type, "");
return LLVMBuildExtractElement(gallivm->builder, value,
lp_build_const_int32(gallivm, swizzle), "");
}
- value = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
+ value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
swizzle * 4, 1, 0);
- value2 = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
+ value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
swizzle * 4 + 4, 1, 0);
return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
lp_build_const_int32(gallivm, swizzle));
- value = build_indexed_load(ctx, ctx->lds, dw_addr, false);
+ value = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
if (tgsi_type_is_64bit(type)) {
LLVMValueRef value2;
dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
lp_build_const_int32(gallivm, 1));
- value2 = build_indexed_load(ctx, ctx->lds, dw_addr, false);
+ value2 = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
}
lp_build_const_int32(gallivm, swizzle));
value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
- build_indexed_store(ctx, ctx->lds,
- dw_addr, value);
+ ac_build_indexed_store(&ctx->ac, ctx->lds,
+ dw_addr, value);
}
static LLVMValueRef fetch_input_tcs(
rw_buffers = LLVMGetParam(ctx->main_fn,
SI_PARAM_RW_BUFFERS);
- buffer = build_indexed_load_const(ctx, rw_buffers,
+ buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
const struct tgsi_full_dst_register *reg = &inst->Dst[0];
+ const struct tgsi_shader_info *sh_info = &ctx->shader->selector->info;
unsigned chan_index;
LLVMValueRef dw_addr, stride;
LLVMValueRef rw_buffers, buffer, base, buf_addr;
LLVMValueRef values[4];
+ bool skip_lds_store;
/* Only handle per-patch and per-vertex outputs here.
* Vectors will be lowered to scalars and this function will be called again.
stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
dw_addr = get_tcs_out_current_patch_offset(ctx);
dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
+ skip_lds_store = !sh_info->reads_pervertex_outputs;
} else {
dw_addr = get_tcs_out_current_patch_data_offset(ctx);
dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
+ skip_lds_store = !sh_info->reads_perpatch_outputs;
+
+ if (!reg->Register.Indirect) {
+ int name = sh_info->output_semantic_name[reg->Register.Index];
+
+ /* Always write tess factors into LDS for the TCS epilog. */
+ if (name == TGSI_SEMANTIC_TESSINNER ||
+ name == TGSI_SEMANTIC_TESSOUTER)
+ skip_lds_store = false;
+ }
}
rw_buffers = LLVMGetParam(ctx->main_fn,
SI_PARAM_RW_BUFFERS);
- buffer = build_indexed_load_const(ctx, rw_buffers,
+ buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
LLVMValueRef value = dst[chan_index];
if (inst->Instruction.Saturate)
- value = si_llvm_saturate(bld_base, value);
+ value = ac_emit_clamp(&ctx->ac, value);
- lds_store(bld_base, chan_index, dw_addr, value);
+ /* Skip LDS stores if there is no LDS read of this output. */
+ if (!skip_lds_store)
+ lds_store(bld_base, chan_index, dw_addr, value);
value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
values[chan_index] = value;
if (inst->Dst[0].Register.WriteMask != 0xF) {
- build_tbuffer_store_dwords(ctx, buffer, value, 1,
- buf_addr, base,
- 4 * chan_index);
+ ac_build_tbuffer_store_dwords(&ctx->ac, buffer, value, 1,
+ buf_addr, base,
+ 4 * chan_index);
}
}
if (inst->Dst[0].Register.WriteMask == 0xF) {
LLVMValueRef value = lp_build_gather_values(bld_base->base.gallivm,
values, 4);
- build_tbuffer_store_dwords(ctx, buffer, value, 4, buf_addr,
- base, 0);
+ ac_build_tbuffer_store_dwords(&ctx->ac, buffer, value, 4, buf_addr,
+ base, 0);
}
}
struct lp_build_context *base = &bld_base->base;
struct si_shader_context *ctx = si_shader_context(bld_base);
struct si_shader *shader = ctx->shader;
- struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
+ struct lp_build_context *uint = &ctx->bld_base.uint_bld;
struct gallivm_state *gallivm = base->gallivm;
LLVMValueRef vtx_offset;
LLVMValueRef args[9];
LLVMValueRef face,
LLVMValueRef result[4])
{
- struct lp_build_context *base = &ctx->soa.bld_base.base;
- struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
+ struct lp_build_context *base = &bld_base->base;
+ struct lp_build_context *uint = &bld_base->uint_bld;
struct gallivm_state *gallivm = base->gallivm;
- const char *intr_name;
LLVMValueRef attr_number;
+ LLVMValueRef i, j;
unsigned chan;
- attr_number = lp_build_const_int32(gallivm, input_index);
-
/* fs.constant returns the param from the middle vertex, so it's not
* really useful for flat shading. It's meant to be used for custom
* interpolation (but the intrinsic can't fetch from the other two
* to do the right thing. The only reason we use fs.constant is that
* fs.interp cannot be used on integers, because they can be equal
* to NaN.
+ *
+ * When interp is false we will use fs.constant or for newer llvm,
+ * amdgcn.interp.mov.
*/
- intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
+ bool interp = interp_param != NULL;
+
+ attr_number = lp_build_const_int32(gallivm, input_index);
+
+ if (interp) {
+ interp_param = LLVMBuildBitCast(gallivm->builder, interp_param,
+ LLVMVectorType(ctx->f32, 2), "");
+
+ i = LLVMBuildExtractElement(gallivm->builder, interp_param,
+ uint->zero, "");
+ j = LLVMBuildExtractElement(gallivm->builder, interp_param,
+ uint->one, "");
+ }
if (semantic_name == TGSI_SEMANTIC_COLOR &&
ctx->shader->key.part.ps.prolog.color_two_side) {
- LLVMValueRef args[4];
LLVMValueRef is_face_positive;
LLVMValueRef back_attr_number;
is_face_positive = LLVMBuildICmp(gallivm->builder, LLVMIntNE,
face, uint->zero, "");
- args[2] = prim_mask;
- args[3] = interp_param;
for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
LLVMValueRef front, back;
- args[0] = llvm_chan;
- args[1] = attr_number;
- front = lp_build_intrinsic(gallivm->builder, intr_name,
- ctx->f32, args, args[3] ? 4 : 3,
- LP_FUNC_ATTR_READNONE);
-
- args[1] = back_attr_number;
- back = lp_build_intrinsic(gallivm->builder, intr_name,
- ctx->f32, args, args[3] ? 4 : 3,
- LP_FUNC_ATTR_READNONE);
+ if (interp) {
+ front = ac_build_fs_interp(&ctx->ac, llvm_chan,
+ attr_number, prim_mask,
+ i, j);
+ back = ac_build_fs_interp(&ctx->ac, llvm_chan,
+ back_attr_number, prim_mask,
+ i, j);
+ } else {
+ front = ac_build_fs_interp_mov(&ctx->ac,
+ lp_build_const_int32(gallivm, 2), /* P0 */
+ llvm_chan, attr_number, prim_mask);
+ back = ac_build_fs_interp_mov(&ctx->ac,
+ lp_build_const_int32(gallivm, 2), /* P0 */
+ llvm_chan, back_attr_number, prim_mask);
+ }
result[chan] = LLVMBuildSelect(gallivm->builder,
is_face_positive,
"");
}
} else if (semantic_name == TGSI_SEMANTIC_FOG) {
- LLVMValueRef args[4];
-
- args[0] = uint->zero;
- args[1] = attr_number;
- args[2] = prim_mask;
- args[3] = interp_param;
- result[0] = lp_build_intrinsic(gallivm->builder, intr_name,
- ctx->f32, args, args[3] ? 4 : 3,
- LP_FUNC_ATTR_READNONE);
+ if (interp) {
+ result[0] = ac_build_fs_interp(&ctx->ac, uint->zero,
+ attr_number, prim_mask, i, j);
+ } else {
+ result[0] = ac_build_fs_interp_mov(&ctx->ac, uint->zero,
+ lp_build_const_int32(gallivm, 2), /* P0 */
+ attr_number, prim_mask);
+ }
result[1] =
result[2] = lp_build_const_float(gallivm, 0.0f);
result[3] = lp_build_const_float(gallivm, 1.0f);
} else {
for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
- LLVMValueRef args[4];
LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
- args[0] = llvm_chan;
- args[1] = attr_number;
- args[2] = prim_mask;
- args[3] = interp_param;
- result[chan] = lp_build_intrinsic(gallivm->builder, intr_name,
- ctx->f32, args, args[3] ? 4 : 3,
- LP_FUNC_ATTR_READNONE);
+ if (interp) {
+ result[chan] = ac_build_fs_interp(&ctx->ac,
+ llvm_chan, attr_number, prim_mask, i, j);
+ } else {
+ result[chan] = ac_build_fs_interp_mov(&ctx->ac,
+ lp_build_const_int32(gallivm, 2), /* P0 */
+ llvm_chan, attr_number, prim_mask);
+ }
}
}
}
const struct tgsi_full_declaration *decl,
LLVMValueRef out[4])
{
- struct lp_build_context *base = &radeon_bld->soa.bld_base.base;
+ struct lp_build_context *base = &radeon_bld->bld_base.base;
struct si_shader_context *ctx =
- si_shader_context(&radeon_bld->soa.bld_base);
+ si_shader_context(&radeon_bld->bld_base);
struct si_shader *shader = ctx->shader;
LLVMValueRef main_fn = radeon_bld->main_fn;
LLVMValueRef interp_param = NULL;
static LLVMValueRef get_sample_id(struct si_shader_context *radeon_bld)
{
- return unpack_param(si_shader_context(&radeon_bld->soa.bld_base),
+ return unpack_param(si_shader_context(&radeon_bld->bld_base),
SI_PARAM_ANCILLARY, 8, 4);
}
-/**
- * Set range metadata on an instruction. This can only be used on load and
- * call instructions. If you know an instruction can only produce the values
- * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
- * \p lo is the minimum value inclusive.
- * \p hi is the maximum value exclusive.
- */
-static void set_range_metadata(struct si_shader_context *ctx,
- LLVMValueRef value, unsigned lo, unsigned hi)
-{
- LLVMValueRef range_md, md_args[2];
- LLVMTypeRef type = LLVMTypeOf(value);
- LLVMContextRef context = LLVMGetTypeContext(type);
-
- md_args[0] = LLVMConstInt(type, lo, false);
- md_args[1] = LLVMConstInt(type, hi, false);
- range_md = LLVMMDNodeInContext(context, md_args, 2);
- LLVMSetMetadata(value, ctx->range_md_kind, range_md);
-}
-
-static LLVMValueRef get_thread_id(struct si_shader_context *ctx)
-{
- struct gallivm_state *gallivm = &ctx->gallivm;
- LLVMValueRef tid;
-
- if (HAVE_LLVM < 0x0308) {
- tid = lp_build_intrinsic(gallivm->builder, "llvm.SI.tid",
- ctx->i32, NULL, 0, LP_FUNC_ATTR_READNONE);
- } else {
- LLVMValueRef tid_args[2];
- tid_args[0] = lp_build_const_int32(gallivm, 0xffffffff);
- tid_args[1] = lp_build_const_int32(gallivm, 0);
- tid_args[1] = lp_build_intrinsic(gallivm->builder,
- "llvm.amdgcn.mbcnt.lo", ctx->i32,
- tid_args, 2, LP_FUNC_ATTR_READNONE);
-
- tid = lp_build_intrinsic(gallivm->builder,
- "llvm.amdgcn.mbcnt.hi", ctx->i32,
- tid_args, 2, LP_FUNC_ATTR_READNONE);
- }
- set_range_metadata(ctx, tid, 0, 64);
- return tid;
-}
/**
* Load a dword from a constant buffer.
static LLVMValueRef load_sample_position(struct si_shader_context *radeon_bld, LLVMValueRef sample_id)
{
struct si_shader_context *ctx =
- si_shader_context(&radeon_bld->soa.bld_base);
- struct lp_build_context *uint_bld = &radeon_bld->soa.bld_base.uint_bld;
+ si_shader_context(&radeon_bld->bld_base);
+ struct lp_build_context *uint_bld = &radeon_bld->bld_base.uint_bld;
struct gallivm_state *gallivm = &radeon_bld->gallivm;
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef desc = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
LLVMValueRef buf_index = lp_build_const_int32(gallivm, SI_PS_CONST_SAMPLE_POSITIONS);
- LLVMValueRef resource = build_indexed_load_const(ctx, desc, buf_index);
+ LLVMValueRef resource = ac_build_indexed_load_const(&ctx->ac, desc, buf_index);
/* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
LLVMValueRef offset0 = lp_build_mul_imm(uint_bld, sample_id, 8);
const struct tgsi_full_declaration *decl)
{
struct si_shader_context *ctx =
- si_shader_context(&radeon_bld->soa.bld_base);
- struct lp_build_context *bld = &radeon_bld->soa.bld_base.base;
+ si_shader_context(&radeon_bld->bld_base);
+ struct lp_build_context *bld = &radeon_bld->bld_base.base;
struct gallivm_state *gallivm = &radeon_bld->gallivm;
LLVMValueRef value = 0;
LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_X_FLOAT),
LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Y_FLOAT),
LLVMGetParam(radeon_bld->main_fn, SI_PARAM_POS_Z_FLOAT),
- lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base, TGSI_OPCODE_RCP,
+ lp_build_emit_llvm_unary(&radeon_bld->bld_base, TGSI_OPCODE_RCP,
LLVMGetParam(radeon_bld->main_fn,
SI_PARAM_POS_W_FLOAT)),
};
lp_build_const_float(gallivm, 0),
lp_build_const_float(gallivm, 0)
};
- pos[0] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
+ pos[0] = lp_build_emit_llvm_unary(&radeon_bld->bld_base,
TGSI_OPCODE_FRC, pos[0]);
- pos[1] = lp_build_emit_llvm_unary(&radeon_bld->soa.bld_base,
+ pos[1] = lp_build_emit_llvm_unary(&radeon_bld->bld_base,
TGSI_OPCODE_FRC, pos[1]);
value = lp_build_gather_values(gallivm, pos, 4);
break;
rw_buffers = LLVMGetParam(ctx->main_fn,
SI_PARAM_RW_BUFFERS);
- buffer = build_indexed_load_const(ctx, rw_buffers,
+ buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
addr = get_tcs_tes_buffer_address(ctx, NULL,
lp_build_const_int32(gallivm, param));
- value = buffer_load(&radeon_bld->soa.bld_base, TGSI_TYPE_FLOAT,
+ value = buffer_load(&radeon_bld->bld_base, TGSI_TYPE_FLOAT,
~0, buffer, base, addr);
break;
slot = lp_build_const_int32(gallivm, SI_HS_CONST_DEFAULT_TESS_LEVELS);
buf = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
- buf = build_indexed_load_const(ctx, buf, slot);
+ buf = ac_build_indexed_load_const(&ctx->ac, buf, slot);
offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
for (i = 0; i < 4; i++)
}
case TGSI_SEMANTIC_PRIMID:
- value = get_primitive_id(&radeon_bld->soa.bld_base, 0);
+ value = get_primitive_id(&radeon_bld->bld_base, 0);
break;
case TGSI_SEMANTIC_GRID_SIZE:
value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_THREAD_ID);
break;
-#if HAVE_LLVM >= 0x0309
case TGSI_SEMANTIC_HELPER_INVOCATION:
- value = lp_build_intrinsic(gallivm->builder,
- "llvm.amdgcn.ps.live",
- ctx->i1, NULL, 0,
- LP_FUNC_ATTR_READNONE);
- value = LLVMBuildNot(gallivm->builder, value, "");
- value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
+ if (HAVE_LLVM >= 0x0309) {
+ value = lp_build_intrinsic(gallivm->builder,
+ "llvm.amdgcn.ps.live",
+ ctx->i1, NULL, 0,
+ LP_FUNC_ATTR_READNONE);
+ value = LLVMBuildNot(gallivm->builder, value, "");
+ value = LLVMBuildSExt(gallivm->builder, value, ctx->i32, "");
+ } else {
+ assert(!"TGSI_SEMANTIC_HELPER_INVOCATION unsupported");
+ return;
+ }
break;
-#endif
default:
assert(!"unknown system value");
const struct tgsi_full_declaration *decl)
{
struct si_shader_context *ctx =
- si_shader_context(&radeon_bld->soa.bld_base);
+ si_shader_context(&radeon_bld->bld_base);
struct si_shader_selector *sel = ctx->shader->selector;
struct gallivm_state *gallivm = &radeon_bld->gallivm;
LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
SI_PARAM_CONST_BUFFERS);
- return build_indexed_load_const(ctx, list_ptr,
+ return ac_build_indexed_load_const(&ctx->ac, list_ptr,
LLVMConstInt(ctx->i32, i, 0));
}
index = get_bounded_indirect_index(ctx, ®->DimIndirect,
reg->Dimension.Index,
SI_NUM_CONST_BUFFERS);
- bufp = build_indexed_load_const(ctx, ptr, index);
+ bufp = ac_build_indexed_load_const(&ctx->ac, ptr, index);
} else
bufp = load_const_buffer_desc(ctx, buf);
if (reg->Register.Indirect) {
- addr = ctx->soa.addr[ireg->Index][ireg->Swizzle];
+ addr = ctx->addrs[ireg->Index][ireg->Swizzle];
addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
addr = lp_build_mul_imm(&bld_base->uint_bld, addr, 16);
addr = lp_build_add(&bld_base->uint_bld, addr,
LLVMValueRef *args)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
- struct lp_build_context *uint =
- &ctx->soa.bld_base.uint_bld;
+ struct lp_build_context *uint = &ctx->bld_base.uint_bld;
struct lp_build_context *base = &bld_base->base;
struct gallivm_state *gallivm = base->gallivm;
LLVMBuilderRef builder = base->gallivm->builder;
LLVMValueRef val[4];
unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
unsigned chan;
- bool is_int8;
+ bool is_int8, is_int10;
/* Default is 0xf. Adjusted below depending on the format. */
args[0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
assert(cbuf >= 0 && cbuf < 8);
spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
+ is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
}
args[4] = uint->zero; /* COMPR flag */
case V_028714_SPI_SHADER_UNORM16_ABGR:
for (chan = 0; chan < 4; chan++) {
- val[chan] = si_llvm_saturate(bld_base, values[chan]);
+ val[chan] = ac_emit_clamp(&ctx->ac, values[chan]);
val[chan] = LLVMBuildFMul(builder, val[chan],
lp_build_const_float(gallivm, 65535), "");
val[chan] = LLVMBuildFAdd(builder, val[chan],
break;
case V_028714_SPI_SHADER_UINT16_ABGR: {
- LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
- 255 : 65535);
+ LLVMValueRef max_rgb = lp_build_const_int32(gallivm,
+ is_int8 ? 255 : is_int10 ? 1023 : 65535);
+ LLVMValueRef max_alpha =
+ !is_int10 ? max_rgb : lp_build_const_int32(gallivm, 3);
+
/* Clamp. */
for (chan = 0; chan < 4; chan++) {
val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
val[chan] = lp_build_emit_llvm_binary(bld_base, TGSI_OPCODE_UMIN,
- val[chan], max);
+ val[chan],
+ chan == 3 ? max_alpha : max_rgb);
}
args[4] = uint->one; /* COMPR flag */
}
case V_028714_SPI_SHADER_SINT16_ABGR: {
- LLVMValueRef max = lp_build_const_int32(gallivm, is_int8 ?
- 127 : 32767);
- LLVMValueRef min = lp_build_const_int32(gallivm, is_int8 ?
- -128 : -32768);
+ LLVMValueRef max_rgb = lp_build_const_int32(gallivm,
+ is_int8 ? 127 : is_int10 ? 511 : 32767);
+ LLVMValueRef min_rgb = lp_build_const_int32(gallivm,
+ is_int8 ? -128 : is_int10 ? -512 : -32768);
+ LLVMValueRef max_alpha =
+ !is_int10 ? max_rgb : lp_build_const_int32(gallivm, 1);
+ LLVMValueRef min_alpha =
+ !is_int10 ? min_rgb : lp_build_const_int32(gallivm, -2);
+
/* Clamp. */
for (chan = 0; chan < 4; chan++) {
val[chan] = bitcast(bld_base, TGSI_TYPE_UNSIGNED, values[chan]);
val[chan] = lp_build_emit_llvm_binary(bld_base,
- TGSI_OPCODE_IMIN,
- val[chan], max);
+ TGSI_OPCODE_IMIN,
+ val[chan], chan == 3 ? max_alpha : max_rgb);
val[chan] = lp_build_emit_llvm_binary(bld_base,
- TGSI_OPCODE_IMAX,
- val[chan], min);
+ TGSI_OPCODE_IMAX,
+ val[chan], chan == 3 ? min_alpha : min_rgb);
}
args[4] = uint->one; /* COMPR flag */
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct lp_build_context *base = &bld_base->base;
- struct lp_build_context *uint = &ctx->soa.bld_base.uint_bld;
+ struct lp_build_context *uint = &ctx->bld_base.uint_bld;
unsigned reg_index;
unsigned chan;
unsigned const_chan;
LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm,
SI_VS_CONST_CLIP_PLANES);
- LLVMValueRef const_resource = build_indexed_load_const(ctx, ptr, constbuf_index);
+ LLVMValueRef const_resource = ac_build_indexed_load_const(&ctx->ac, ptr, constbuf_index);
for (reg_index = 0; reg_index < 2; reg_index ++) {
LLVMValueRef *args = pos[2 + reg_index];
}
}
-/* On SI, the vertex shader is responsible for writing streamout data
- * to buffers. */
-static void si_llvm_emit_streamout(struct si_shader_context *ctx,
- struct si_shader_output_values *outputs,
- unsigned noutput)
+static void emit_streamout_output(struct si_shader_context *ctx,
+ LLVMValueRef const *so_buffers,
+ LLVMValueRef const *so_write_offsets,
+ struct pipe_stream_output *stream_out,
+ struct si_shader_output_values *shader_out)
{
- struct pipe_stream_output_info *so = &ctx->shader->selector->so;
struct gallivm_state *gallivm = &ctx->gallivm;
LLVMBuilderRef builder = gallivm->builder;
- int i, j;
- struct lp_build_if_state if_ctx;
- LLVMValueRef so_buffers[4];
- LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ unsigned buf_idx = stream_out->output_buffer;
+ unsigned start = stream_out->start_component;
+ unsigned num_comps = stream_out->num_components;
+ LLVMValueRef out[4];
- /* Load the descriptors. */
- for (i = 0; i < 4; ++i) {
- if (ctx->shader->selector->so.stride[i]) {
- LLVMValueRef offset = lp_build_const_int32(gallivm,
- SI_VS_STREAMOUT_BUF0 + i);
+ assert(num_comps && num_comps <= 4);
+ if (!num_comps || num_comps > 4)
+ return;
+
+ /* Load the output as int. */
+ for (int j = 0; j < num_comps; j++) {
+ assert(stream_out->stream == shader_out->vertex_stream[start + j]);
+
+ out[j] = LLVMBuildBitCast(builder,
+ shader_out->values[start + j],
+ ctx->i32, "");
+ }
+
+ /* Pack the output. */
+ LLVMValueRef vdata = NULL;
- so_buffers[i] = build_indexed_load_const(ctx, buf_ptr, offset);
+ switch (num_comps) {
+ case 1: /* as i32 */
+ vdata = out[0];
+ break;
+ case 2: /* as v2i32 */
+ case 3: /* as v4i32 (aligned to 4) */
+ case 4: /* as v4i32 */
+ vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
+ for (int j = 0; j < num_comps; j++) {
+ vdata = LLVMBuildInsertElement(builder, vdata, out[j],
+ LLVMConstInt(ctx->i32, j, 0), "");
}
+ break;
}
+ ac_build_tbuffer_store_dwords(&ctx->ac, so_buffers[buf_idx],
+ vdata, num_comps,
+ so_write_offsets[buf_idx],
+ LLVMConstInt(ctx->i32, 0, 0),
+ stream_out->dst_offset * 4);
+}
+
+/**
+ * Write streamout data to buffers for vertex stream @p stream (different
+ * vertex streams can occur for GS copy shaders).
+ */
+static void si_llvm_emit_streamout(struct si_shader_context *ctx,
+ struct si_shader_output_values *outputs,
+ unsigned noutput, unsigned stream)
+{
+ struct si_shader_selector *sel = ctx->shader->selector;
+ struct pipe_stream_output_info *so = &sel->so;
+ struct gallivm_state *gallivm = &ctx->gallivm;
+ LLVMBuilderRef builder = gallivm->builder;
+ int i;
+ struct lp_build_if_state if_ctx;
+
/* Get bits [22:16], i.e. (so_param >> 16) & 127; */
LLVMValueRef so_vtx_count =
unpack_param(ctx, ctx->param_streamout_config, 16, 7);
- LLVMValueRef tid = get_thread_id(ctx);
+ LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
/* can_emit = tid < so_vtx_count; */
LLVMValueRef can_emit =
LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
- LLVMValueRef stream_id =
- unpack_param(ctx, ctx->param_streamout_config, 24, 2);
-
/* Emit the streamout code conditionally. This actually avoids
* out-of-bounds buffer access. The hw tells us via the SGPR
* (so_vtx_count) which threads are allowed to emit streamout data. */
/* Compute (streamout_write_index + thread_id). */
so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
- /* Compute the write offset for each enabled buffer. */
+ /* Load the descriptor and compute the write offset for each
+ * enabled buffer. */
LLVMValueRef so_write_offset[4] = {};
+ LLVMValueRef so_buffers[4];
+ LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
+ SI_PARAM_RW_BUFFERS);
+
for (i = 0; i < 4; i++) {
if (!so->stride[i])
continue;
+ LLVMValueRef offset = lp_build_const_int32(gallivm,
+ SI_VS_STREAMOUT_BUF0 + i);
+
+ so_buffers[i] = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
+
LLVMValueRef so_offset = LLVMGetParam(ctx->main_fn,
ctx->param_streamout_offset[i]);
so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
/* Write streamout data. */
for (i = 0; i < so->num_outputs; i++) {
- unsigned buf_idx = so->output[i].output_buffer;
unsigned reg = so->output[i].register_index;
- unsigned start = so->output[i].start_component;
- unsigned num_comps = so->output[i].num_components;
- unsigned stream = so->output[i].stream;
- LLVMValueRef out[4];
- struct lp_build_if_state if_ctx_stream;
-
- assert(num_comps && num_comps <= 4);
- if (!num_comps || num_comps > 4)
- continue;
if (reg >= noutput)
continue;
- /* Load the output as int. */
- for (j = 0; j < num_comps; j++) {
- out[j] = LLVMBuildBitCast(builder,
- outputs[reg].values[start+j],
- ctx->i32, "");
- }
-
- /* Pack the output. */
- LLVMValueRef vdata = NULL;
-
- switch (num_comps) {
- case 1: /* as i32 */
- vdata = out[0];
- break;
- case 2: /* as v2i32 */
- case 3: /* as v4i32 (aligned to 4) */
- case 4: /* as v4i32 */
- vdata = LLVMGetUndef(LLVMVectorType(ctx->i32, util_next_power_of_two(num_comps)));
- for (j = 0; j < num_comps; j++) {
- vdata = LLVMBuildInsertElement(builder, vdata, out[j],
- LLVMConstInt(ctx->i32, j, 0), "");
- }
- break;
- }
+ if (stream != so->output[i].stream)
+ continue;
- LLVMValueRef can_emit_stream =
- LLVMBuildICmp(builder, LLVMIntEQ,
- stream_id,
- lp_build_const_int32(gallivm, stream), "");
-
- lp_build_if(&if_ctx_stream, gallivm, can_emit_stream);
- build_tbuffer_store_dwords(ctx, so_buffers[buf_idx],
- vdata, num_comps,
- so_write_offset[buf_idx],
- LLVMConstInt(ctx->i32, 0, 0),
- so->output[i].dst_offset*4);
- lp_build_endif(&if_ctx_stream);
+ emit_streamout_output(ctx, so_buffers, so_write_offset,
+ &so->output[i], &outputs[reg]);
}
}
lp_build_endif(&if_ctx);
struct si_shader_context *ctx = si_shader_context(bld_base);
struct si_shader *shader = ctx->shader;
struct lp_build_context *base = &bld_base->base;
- struct lp_build_context *uint =
- &ctx->soa.bld_base.uint_bld;
+ struct lp_build_context *uint = &ctx->bld_base.uint_bld;
LLVMValueRef args[9];
LLVMValueRef pos_args[4][9] = { { 0 } };
LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
unsigned pos_idx;
int i;
- if (outputs && ctx->shader->selector->so.num_outputs) {
- si_llvm_emit_streamout(ctx, outputs, noutput);
- }
-
for (i = 0; i < noutput; i++) {
- semantic_name = outputs[i].name;
- semantic_index = outputs[i].sid;
+ semantic_name = outputs[i].semantic_name;
+ semantic_index = outputs[i].semantic_index;
+ bool export_param = true;
+
+ switch (semantic_name) {
+ case TGSI_SEMANTIC_POSITION: /* ignore these */
+ case TGSI_SEMANTIC_PSIZE:
+ case TGSI_SEMANTIC_CLIPVERTEX:
+ case TGSI_SEMANTIC_EDGEFLAG:
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ case TGSI_SEMANTIC_CLIPDIST:
+ if (shader->key.opt.hw_vs.kill_outputs &
+ (1ull << si_shader_io_get_unique_index(semantic_name, semantic_index)))
+ export_param = false;
+ break;
+ default:
+ if (shader->key.opt.hw_vs.kill_outputs2 &
+ (1u << si_shader_io_get_unique_index2(semantic_name, semantic_index)))
+ export_param = false;
+ break;
+ }
+
+ if (outputs[i].vertex_stream[0] != 0 &&
+ outputs[i].vertex_stream[1] != 0 &&
+ outputs[i].vertex_stream[2] != 0 &&
+ outputs[i].vertex_stream[3] != 0)
+ export_param = false;
handle_semantic:
/* Select the correct target */
case TGSI_SEMANTIC_POSITION:
target = V_008DFC_SQ_EXP_POS;
break;
- case TGSI_SEMANTIC_COLOR:
- case TGSI_SEMANTIC_BCOLOR:
- target = V_008DFC_SQ_EXP_PARAM + param_count;
- assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
- shader->info.vs_output_param_offset[i] = param_count;
- param_count++;
- break;
case TGSI_SEMANTIC_CLIPDIST:
if (shader->key.opt.hw_vs.clip_disable) {
semantic_name = TGSI_SEMANTIC_GENERIC;
continue;
si_llvm_emit_clipvertex(bld_base, pos_args, outputs[i].values);
continue;
+ case TGSI_SEMANTIC_COLOR:
+ case TGSI_SEMANTIC_BCOLOR:
case TGSI_SEMANTIC_PRIMID:
case TGSI_SEMANTIC_FOG:
case TGSI_SEMANTIC_TEXCOORD:
case TGSI_SEMANTIC_GENERIC:
+ if (!export_param)
+ continue;
target = V_008DFC_SQ_EXP_PARAM + param_count;
assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
shader->info.vs_output_param_offset[i] = param_count;
invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
rw_buffers = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
- buffer = build_indexed_load_const(ctx, rw_buffers,
+ buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
LLVMValueRef value = lds_load(bld_base, TGSI_TYPE_SIGNED, ~0,
lds_ptr);
- build_tbuffer_store_dwords(ctx, buffer, value, 4, buffer_addr,
- buffer_offset, 0);
+ ac_build_tbuffer_store_dwords(&ctx->ac, buffer, value, 4, buffer_addr,
+ buffer_offset, 0);
}
}
lp_build_const_int32(gallivm,
tess_outer_index * 4), "");
- for (i = 0; i < outer_comps; i++)
- out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
- for (i = 0; i < inner_comps; i++)
- out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
+ if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
+ /* For isolines, the hardware expects tess factors in the
+ * reverse order from what GLSL / TGSI specify.
+ */
+ out[0] = lds_load(bld_base, TGSI_TYPE_SIGNED, 1, lds_outer);
+ out[1] = lds_load(bld_base, TGSI_TYPE_SIGNED, 0, lds_outer);
+ } else {
+ for (i = 0; i < outer_comps; i++)
+ out[i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_outer);
+ for (i = 0; i < inner_comps; i++)
+ out[outer_comps+i] = lds_load(bld_base, TGSI_TYPE_SIGNED, i, lds_inner);
+ }
/* Convert the outputs to vectors for stores. */
vec0 = lp_build_gather_values(gallivm, out, MIN2(stride, 4));
/* Get the buffer. */
rw_buffers = LLVMGetParam(ctx->main_fn,
SI_PARAM_RW_BUFFERS);
- buffer = build_indexed_load_const(ctx, rw_buffers,
+ buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
lp_build_const_int32(gallivm, SI_HS_RING_TESS_FACTOR));
/* Get the offset. */
rel_patch_id, bld_base->uint_bld.zero, ""));
/* Store the dynamic HS control word. */
- build_tbuffer_store_dwords(ctx, buffer,
- lp_build_const_int32(gallivm, 0x80000000),
- 1, lp_build_const_int32(gallivm, 0), tf_base, 0);
+ ac_build_tbuffer_store_dwords(&ctx->ac, buffer,
+ lp_build_const_int32(gallivm, 0x80000000),
+ 1, lp_build_const_int32(gallivm, 0), tf_base, 0);
lp_build_endif(&inner_if_ctx);
/* Store the tessellation factors. */
- build_tbuffer_store_dwords(ctx, buffer, vec0,
- MIN2(stride, 4), byteoffset, tf_base, 4);
+ ac_build_tbuffer_store_dwords(&ctx->ac, buffer, vec0,
+ MIN2(stride, 4), byteoffset, tf_base, 4);
if (vec1)
- build_tbuffer_store_dwords(ctx, buffer, vec1,
- stride - 4, byteoffset, tf_base, 20);
+ ac_build_tbuffer_store_dwords(&ctx->ac, buffer, vec1,
+ stride - 4, byteoffset, tf_base, 20);
lp_build_endif(&if_ctx);
}
/* Write outputs to LDS. The next shader (TCS aka HS) will read
* its inputs from it. */
for (i = 0; i < info->num_outputs; i++) {
- LLVMValueRef *out_ptr = ctx->soa.outputs[i];
+ LLVMValueRef *out_ptr = ctx->outputs[i];
unsigned name = info->output_semantic_name[i];
unsigned index = info->output_semantic_index[i];
int param = si_shader_io_get_unique_index(name, index);
int i;
for (i = 0; i < info->num_outputs; i++) {
- LLVMValueRef *out_ptr =
- ctx->soa.outputs[i];
+ LLVMValueRef *out_ptr = ctx->outputs[i];
int param_index;
if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
- build_tbuffer_store(ctx,
- ctx->esgs_ring,
- out_val, 1,
- LLVMGetUndef(ctx->i32), soffset,
- (4 * param_index + chan) * 4,
- V_008F0C_BUF_DATA_FORMAT_32,
- V_008F0C_BUF_NUM_FORMAT_UINT,
- 0, 0, 1, 1, 0);
+ ac_build_tbuffer_store(&ctx->ac,
+ ctx->esgs_ring,
+ out_val, 1,
+ LLVMGetUndef(ctx->i32), soffset,
+ (4 * param_index + chan) * 4,
+ V_008F0C_BUF_DATA_FORMAT_32,
+ V_008F0C_BUF_NUM_FORMAT_UINT,
+ 0, 0, 1, 1, 0);
}
}
}
static void si_llvm_emit_gs_epilogue(struct lp_build_tgsi_context *bld_base)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
- struct gallivm_state *gallivm = bld_base->base.gallivm;
- LLVMValueRef args[2];
- args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_NOP | SENDMSG_GS_DONE);
- args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
- lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
- ctx->voidt, args, 2, 0);
+ ac_emit_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
+ LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID));
}
static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
}
for (j = 0; j < 4; j++) {
- addr = ctx->soa.outputs[i][j];
+ addr = ctx->outputs[i][j];
val = LLVMBuildLoad(gallivm->builder, addr, "");
- val = si_llvm_saturate(bld_base, val);
+ val = ac_emit_clamp(&ctx->ac, val);
LLVMBuildStore(gallivm->builder, val, addr);
}
}
}
for (i = 0; i < info->num_outputs; i++) {
- outputs[i].name = info->output_semantic_name[i];
- outputs[i].sid = info->output_semantic_index[i];
+ outputs[i].semantic_name = info->output_semantic_name[i];
+ outputs[i].semantic_index = info->output_semantic_index[i];
- for (j = 0; j < 4; j++)
+ for (j = 0; j < 4; j++) {
outputs[i].values[j] =
LLVMBuildLoad(gallivm->builder,
- ctx->soa.outputs[i][j],
+ ctx->outputs[i][j],
"");
+ outputs[i].vertex_stream[j] =
+ (info->output_streams[i] >> (2 * j)) & 3;
+ }
+
}
/* Return the primitive ID from the LLVM function. */
get_primitive_id(bld_base, 0)),
VS_EPILOG_PRIMID_LOC, "");
+ if (ctx->shader->selector->so.num_outputs)
+ si_llvm_emit_streamout(ctx, outputs, i, 0);
si_llvm_export_vs(bld_base, outputs, i);
FREE(outputs);
}
}
}
- /* SI (except OLAND) has a bug that it only looks
+ /* SI (except OLAND and HAINAN) has a bug that it only looks
* at the X writemask component. */
if (ctx->screen->b.chip_class == SI &&
- ctx->screen->b.family != CHIP_OLAND)
+ ctx->screen->b.family != CHIP_OLAND &&
+ ctx->screen->b.family != CHIP_HAINAN)
mask |= 0x1;
/* Specify which components to enable */
/* Clamp color */
if (ctx->shader->key.part.ps.epilog.clamp_color)
for (i = 0; i < 4; i++)
- color[i] = si_llvm_saturate(bld_base, color[i]);
+ color[i] = ac_emit_clamp(&ctx->ac, color[i]);
/* Alpha to one */
if (ctx->shader->key.part.ps.epilog.alpha_to_one)
case TGSI_SEMANTIC_COLOR:
assert(semantic_index < 8);
for (j = 0; j < 4; j++) {
- LLVMValueRef ptr = ctx->soa.outputs[i][j];
+ LLVMValueRef ptr = ctx->outputs[i][j];
LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
color[semantic_index][j] = result;
}
break;
case TGSI_SEMANTIC_POSITION:
depth = LLVMBuildLoad(builder,
- ctx->soa.outputs[i][2], "");
+ ctx->outputs[i][2], "");
break;
case TGSI_SEMANTIC_STENCIL:
stencil = LLVMBuildLoad(builder,
- ctx->soa.outputs[i][1], "");
+ ctx->outputs[i][1], "");
break;
case TGSI_SEMANTIC_SAMPLEMASK:
samplemask = LLVMBuildLoad(builder,
- ctx->soa.outputs[i][0], "");
+ ctx->outputs[i][0], "");
break;
default:
fprintf(stderr, "Warning: SI unhandled fs output type:%d\n",
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef size =
LLVMBuildExtractElement(builder, descriptor,
- lp_build_const_int32(gallivm, 6), "");
+ lp_build_const_int32(gallivm, 2), "");
if (ctx->screen->b.chip_class >= VI) {
/* On VI, the descriptor contains the size in bytes,
*/
LLVMValueRef stride =
LLVMBuildExtractElement(builder, descriptor,
- lp_build_const_int32(gallivm, 5), "");
+ lp_build_const_int32(gallivm, 1), "");
stride = LLVMBuildLShr(builder, stride,
lp_build_const_int32(gallivm, 16), "");
stride = LLVMBuildAnd(builder, stride,
* point in the program by emitting empty inline assembly that is marked as
* having side effects.
*/
+#if 0 /* unused currently */
static void emit_optimization_barrier(struct si_shader_context *ctx)
{
LLVMBuilderRef builder = ctx->gallivm.builder;
LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, "", "", true, false);
LLVMBuildCall(builder, inlineasm, NULL, 0, "");
}
+#endif
-static void emit_waitcnt(struct si_shader_context *ctx)
+/* Combine these with & instead of |. */
+#define NOOP_WAITCNT 0xf7f
+#define LGKM_CNT 0x07f
+#define VM_CNT 0xf70
+
+static void emit_waitcnt(struct si_shader_context *ctx, unsigned simm16)
{
struct gallivm_state *gallivm = &ctx->gallivm;
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef args[1] = {
- lp_build_const_int32(gallivm, 0xf70)
+ lp_build_const_int32(gallivm, simm16)
};
lp_build_intrinsic(builder, "llvm.amdgcn.s.waitcnt",
ctx->voidt, args, 1, 0);
struct lp_build_emit_data *emit_data)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
+ LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
+ unsigned flags = LLVMConstIntGetZExtValue(src0);
+ unsigned waitcnt = NOOP_WAITCNT;
+
+ if (flags & TGSI_MEMBAR_THREAD_GROUP)
+ waitcnt &= VM_CNT & LGKM_CNT;
+
+ if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
+ TGSI_MEMBAR_SHADER_BUFFER |
+ TGSI_MEMBAR_SHADER_IMAGE))
+ waitcnt &= VM_CNT;
- emit_waitcnt(ctx);
+ if (flags & TGSI_MEMBAR_SHARED)
+ waitcnt &= LGKM_CNT;
+
+ if (waitcnt != NOOP_WAITCNT)
+ emit_waitcnt(ctx, waitcnt);
}
static LLVMValueRef
reg->Register.Index,
SI_NUM_SHADER_BUFFERS);
- return build_indexed_load_const(ctx, rsrc_ptr, index);
+ return ac_build_indexed_load_const(&ctx->ac, rsrc_ptr, index);
}
static bool tgsi_is_array_sampler(unsigned target)
}
}
+static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
+{
+ return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
+ CONST_ADDR_SPACE);
+}
+
/**
* Load the resource descriptor for \p image.
*/
SI_NUM_IMAGES);
}
- tmp = build_indexed_load_const(ctx, rsrc_ptr, index);
+ if (target == TGSI_TEXTURE_BUFFER) {
+ LLVMBuilderRef builder = ctx->gallivm.builder;
+
+ rsrc_ptr = LLVMBuildPointerCast(builder, rsrc_ptr,
+ const_array(ctx->v4i32, 0), "");
+ index = LLVMBuildMul(builder, index,
+ LLVMConstInt(ctx->i32, 2, 0), "");
+ index = LLVMBuildAdd(builder, index,
+ LLVMConstInt(ctx->i32, 1, 0), "");
+ *rsrc = ac_build_indexed_load_const(&ctx->ac, rsrc_ptr, index);
+ return;
+ }
+
+ tmp = ac_build_indexed_load_const(&ctx->ac, rsrc_ptr, index);
if (dcc_off)
tmp = force_dcc_off(ctx, tmp);
*rsrc = tmp;
struct si_shader_context *ctx,
struct lp_build_emit_data * emit_data,
unsigned target,
- bool atomic)
+ bool atomic,
+ bool force_glc)
{
const struct tgsi_full_instruction *inst = emit_data->inst;
LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
LLVMValueRef r128 = i1false;
LLVMValueRef da = tgsi_is_array_image(target) ? i1true : i1false;
LLVMValueRef glc =
+ force_glc ||
inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
i1true : i1false;
LLVMValueRef slc = i1false;
emit_data->args[emit_data->arg_count++] = da;
}
-/**
- * Given a 256 bit resource, extract the top half (which stores the buffer
- * resource in the case of textures and images).
- */
-static LLVMValueRef extract_rsrc_top_half(
- struct si_shader_context *ctx,
- LLVMValueRef rsrc)
-{
- struct gallivm_state *gallivm = &ctx->gallivm;
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
- LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
-
- rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, v2i128, "");
- rsrc = LLVMBuildExtractElement(gallivm->builder, rsrc, bld_base->uint_bld.one, "");
- rsrc = LLVMBuildBitCast(gallivm->builder, rsrc, ctx->v4i32, "");
-
- return rsrc;
-}
-
/**
* Append the resource and indexing arguments for buffer intrinsics.
*
LLVMValueRef rsrc,
LLVMValueRef index,
LLVMValueRef offset,
- bool atomic)
+ bool atomic,
+ bool force_glc)
{
const struct tgsi_full_instruction *inst = emit_data->inst;
LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
emit_data->args[emit_data->arg_count++] = offset; /* voffset */
if (!atomic) {
emit_data->args[emit_data->arg_count++] =
+ force_glc ||
inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
i1true : i1false; /* glc */
}
offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
- offset, false);
+ offset, false, false);
} else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
LLVMValueRef coords;
coords = image_fetch_coords(bld_base, inst, 1);
if (target == TGSI_TEXTURE_BUFFER) {
- rsrc = extract_rsrc_top_half(ctx, rsrc);
buffer_append_args(ctx, emit_data, rsrc, coords,
- bld_base->uint_bld.zero, false);
+ bld_base->uint_bld.zero, false, false);
} else {
emit_data->args[0] = coords;
emit_data->args[1] = rsrc;
emit_data->args[2] = lp_build_const_int32(gallivm, 15); /* dmask */
emit_data->arg_count = 3;
- image_append_args(ctx, emit_data, target, false);
+ image_append_args(ctx, emit_data, target, false, false);
}
}
}
LLVMValueRef offset, ptr;
int addr_space;
- offset = lp_build_emit_fetch(&ctx->soa.bld_base, inst, arg, 0);
+ offset = lp_build_emit_fetch(&ctx->bld_base, inst, arg, 0);
offset = LLVMBuildBitCast(builder, offset, ctx->i32, "");
ptr = ctx->shared_memory;
struct lp_build_emit_data *emit_data)
{
const struct tgsi_full_instruction *inst = emit_data->inst;
- struct lp_build_context *base = &ctx->soa.bld_base.base;
+ struct lp_build_context *base = &ctx->bld_base.base;
struct gallivm_state *gallivm = &ctx->gallivm;
LLVMBuilderRef builder = gallivm->builder;
unsigned writemask = inst->Dst[0].Register.WriteMask;
}
if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
- emit_waitcnt(ctx);
+ emit_waitcnt(ctx, VM_CNT);
if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
load_emit_buffer(ctx, emit_data);
offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
- offset, false);
+ offset, false, false);
} else if (inst->Dst[0].Register.File == TGSI_FILE_IMAGE) {
unsigned target = inst->Memory.Texture;
LLVMValueRef coords;
+ /* 8bit/16bit TC L1 write corruption bug on SI.
+ * All store opcodes not aligned to a dword are affected.
+ *
+ * The only way to get unaligned stores in radeonsi is through
+ * shader images.
+ */
+ bool force_glc = ctx->screen->b.chip_class == SI;
+
coords = image_fetch_coords(bld_base, inst, 0);
if (target == TGSI_TEXTURE_BUFFER) {
image_fetch_rsrc(bld_base, &memory, true, target, &rsrc);
-
- rsrc = extract_rsrc_top_half(ctx, rsrc);
buffer_append_args(ctx, emit_data, rsrc, coords,
- bld_base->uint_bld.zero, false);
+ bld_base->uint_bld.zero, false, force_glc);
} else {
emit_data->args[1] = coords;
image_fetch_rsrc(bld_base, &memory, true, target,
emit_data->args[3] = lp_build_const_int32(gallivm, 15); /* dmask */
emit_data->arg_count = 4;
- image_append_args(ctx, emit_data, target, false);
+ image_append_args(ctx, emit_data, target, false, force_glc);
}
}
}
const struct tgsi_full_instruction *inst = emit_data->inst;
struct gallivm_state *gallivm = &ctx->gallivm;
LLVMBuilderRef builder = gallivm->builder;
- struct lp_build_context *uint_bld = &ctx->soa.bld_base.uint_bld;
+ struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
LLVMValueRef base_data = emit_data->args[0];
LLVMValueRef base_offset = emit_data->args[3];
unsigned writemask = inst->Dst[0].Register.WriteMask;
{
const struct tgsi_full_instruction *inst = emit_data->inst;
struct gallivm_state *gallivm = &ctx->gallivm;
- struct lp_build_context *base = &ctx->soa.bld_base.base;
+ struct lp_build_context *base = &ctx->bld_base.base;
LLVMBuilderRef builder = gallivm->builder;
unsigned writemask = inst->Dst[0].Register.WriteMask;
LLVMValueRef ptr, derived_ptr, data, index;
if (!(writemask & (1 << chan))) {
continue;
}
- data = lp_build_emit_fetch(&ctx->soa.bld_base, inst, 1, chan);
+ data = lp_build_emit_fetch(&ctx->bld_base, inst, 1, chan);
index = lp_build_const_int32(gallivm, chan);
derived_ptr = LLVMBuildGEP(builder, ptr, &index, 1, "");
LLVMBuildStore(builder, data, derived_ptr);
}
if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
- emit_waitcnt(ctx);
+ emit_waitcnt(ctx, VM_CNT);
if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
store_emit_buffer(ctx, emit_data);
offset = LLVMBuildBitCast(builder, tmp, bld_base->uint_bld.elem_type, "");
buffer_append_args(ctx, emit_data, rsrc, bld_base->uint_bld.zero,
- offset, true);
+ offset, true, false);
} else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
unsigned target = inst->Memory.Texture;
LLVMValueRef coords;
coords = image_fetch_coords(bld_base, inst, 1);
if (target == TGSI_TEXTURE_BUFFER) {
- rsrc = extract_rsrc_top_half(ctx, rsrc);
buffer_append_args(ctx, emit_data, rsrc, coords,
- bld_base->uint_bld.zero, true);
+ bld_base->uint_bld.zero, true, false);
} else {
emit_data->args[emit_data->arg_count++] = coords;
emit_data->args[emit_data->arg_count++] = rsrc;
- image_append_args(ctx, emit_data, target, true);
+ image_append_args(ctx, emit_data, target, true, false);
}
}
}
ptr = get_memory_ptr(ctx, inst, ctx->i32, 1);
- arg = lp_build_emit_fetch(&ctx->soa.bld_base, inst, 2, 0);
+ arg = lp_build_emit_fetch(&ctx->bld_base, inst, 2, 0);
arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
LLVMValueRef new_data;
- new_data = lp_build_emit_fetch(&ctx->soa.bld_base,
+ new_data = lp_build_emit_fetch(&ctx->bld_base,
inst, 3, 0);
new_data = LLVMBuildBitCast(builder, new_data, ctx->i32, "");
enum desc_type {
DESC_IMAGE,
+ DESC_BUFFER,
DESC_FMASK,
- DESC_SAMPLER
+ DESC_SAMPLER,
};
-static LLVMTypeRef const_array(LLVMTypeRef elem_type, int num_elements)
-{
- return LLVMPointerType(LLVMArrayType(elem_type, num_elements),
- CONST_ADDR_SPACE);
-}
-
/**
* Load an image view, fmask view. or sampler state descriptor.
*/
/* The image is at [0:7]. */
index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
break;
+ case DESC_BUFFER:
+ /* The buffer is in [4:7]. */
+ index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
+ index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 1, 0), "");
+ list = LLVMBuildPointerCast(builder, list,
+ const_array(ctx->v4i32, 0), "");
+ break;
case DESC_FMASK:
/* The FMASK is at [8:15]. */
index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
break;
}
- return build_indexed_load_const(ctx, list, index);
+ return ac_build_indexed_load_const(&ctx->ac, list, index);
}
static LLVMValueRef load_sampler_desc(struct si_shader_context *ctx,
index = LLVMConstInt(ctx->i32, sampler_index, 0);
}
- *res_ptr = load_sampler_desc(ctx, index, DESC_IMAGE);
+ if (target == TGSI_TEXTURE_BUFFER)
+ *res_ptr = load_sampler_desc(ctx, index, DESC_BUFFER);
+ else
+ *res_ptr = load_sampler_desc(ctx, index, DESC_IMAGE);
+
+ if (samp_ptr)
+ *samp_ptr = NULL;
+ if (fmask_ptr)
+ *fmask_ptr = NULL;
if (target == TGSI_TEXTURE_2D_MSAA ||
target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
- if (samp_ptr)
- *samp_ptr = NULL;
if (fmask_ptr)
*fmask_ptr = load_sampler_desc(ctx, index, DESC_FMASK);
- } else {
+ } else if (target != TGSI_TEXTURE_BUFFER) {
if (samp_ptr) {
*samp_ptr = load_sampler_desc(ctx, index, DESC_SAMPLER);
*samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
}
- if (fmask_ptr)
- *fmask_ptr = NULL;
}
}
struct lp_build_emit_data *emit_data)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
- struct gallivm_state *gallivm = bld_base->base.gallivm;
- LLVMBuilderRef builder = gallivm->builder;
const struct tgsi_full_instruction *inst = emit_data->inst;
unsigned target = inst->Texture.Texture;
LLVMValueRef res_ptr;
if (target == TGSI_TEXTURE_BUFFER) {
/* Read the size from the buffer descriptor directly. */
- LLVMValueRef res = LLVMBuildBitCast(builder, res_ptr, ctx->v8i32, "");
- emit_data->args[0] = get_buffer_size(bld_base, res);
+ emit_data->args[0] = get_buffer_size(bld_base, res_ptr);
return;
}
tex_fetch_ptrs(bld_base, emit_data, &res_ptr, &samp_ptr, &fmask_ptr);
if (target == TGSI_TEXTURE_BUFFER) {
- LLVMTypeRef v2i128 = LLVMVectorType(ctx->i128, 2);
-
- /* Bitcast and truncate v8i32 to v16i8. */
- LLVMValueRef res = res_ptr;
- res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
- res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.one, "");
- res = LLVMBuildBitCast(gallivm->builder, res, ctx->v16i8, "");
-
emit_data->dst_type = ctx->v4f32;
- emit_data->args[0] = res;
+ emit_data->args[0] = LLVMBuildBitCast(gallivm->builder, res_ptr,
+ ctx->v16i8, "");
emit_data->args[1] = bld_base->uint_bld.zero;
emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
emit_data->arg_count = 3;
* Z32_FLOAT, but we don't know that here.
*/
if (ctx->screen->b.chip_class == VI)
- z = si_llvm_saturate(bld_base, z);
+ z = ac_emit_clamp(&ctx->ac, z);
address[count++] = z;
}
target == TGSI_TEXTURE_CUBE_ARRAY ||
target == TGSI_TEXTURE_SHADOWCUBE ||
target == TGSI_TEXTURE_SHADOWCUBE_ARRAY)
- si_prepare_cube_coords(bld_base, emit_data, coords, derivs);
+ ac_prepare_cube_coords(&ctx->ac,
+ opcode == TGSI_OPCODE_TXD,
+ target == TGSI_TEXTURE_CUBE_ARRAY ||
+ target == TGSI_TEXTURE_SHADOWCUBE_ARRAY,
+ coords, derivs);
if (opcode == TGSI_OPCODE_TXD)
for (int i = 0; i < num_deriv_channels * 2; i++)
struct lp_build_context *uint_bld = &bld_base->uint_bld;
struct lp_build_emit_data txf_emit_data = *emit_data;
LLVMValueRef txf_address[4];
- unsigned txf_count = count;
+ /* We only need .xy for non-arrays, and .xyz for arrays. */
+ unsigned txf_count = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
struct tgsi_full_instruction inst = {};
memcpy(txf_address, address, sizeof(txf_address));
- if (target == TGSI_TEXTURE_2D_MSAA) {
- txf_address[2] = bld_base->uint_bld.zero;
- }
- txf_address[3] = bld_base->uint_bld.zero;
-
/* Read FMASK using TXF. */
inst.Instruction.Opcode = TGSI_OPCODE_TXF;
inst.Texture.Texture = target;
txf_emit_data.output[0],
uint_bld->zero, "");
- unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
+ unsigned sample_chan = txf_count; /* the sample index is last */
LLVMValueRef sample_index4 =
LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
/* add tex offsets */
if (inst->Texture.NumOffsets) {
struct lp_build_context *uint_bld = &bld_base->uint_bld;
- struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
const struct tgsi_texture_offset *off = inst->TexOffsets;
assert(inst->Texture.NumOffsets == 1);
switch (target) {
case TGSI_TEXTURE_3D:
address[2] = lp_build_add(uint_bld, address[2],
- bld->immediates[off->Index][off->SwizzleZ]);
+ ctx->imms[off->Index * TGSI_NUM_CHANNELS + off->SwizzleZ]);
/* fall through */
case TGSI_TEXTURE_2D:
case TGSI_TEXTURE_SHADOW2D:
case TGSI_TEXTURE_SHADOW2D_ARRAY:
address[1] =
lp_build_add(uint_bld, address[1],
- bld->immediates[off->Index][off->SwizzleY]);
+ ctx->imms[off->Index * TGSI_NUM_CHANNELS + off->SwizzleY]);
/* fall through */
case TGSI_TEXTURE_1D:
case TGSI_TEXTURE_SHADOW1D:
case TGSI_TEXTURE_SHADOW1D_ARRAY:
address[0] =
lp_build_add(uint_bld, address[0],
- bld->immediates[off->Index][off->SwizzleX]);
+ ctx->imms[off->Index * TGSI_NUM_CHANNELS + off->SwizzleX]);
break;
/* texture offsets do not apply to other texture targets */
}
/* Get the component index from src1.x for Gather4. */
if (!tgsi_is_shadow_target(target)) {
- LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
LLVMValueRef comp_imm;
struct tgsi_src_register src1 = inst->Src[1].Register;
assert(src1.File == TGSI_FILE_IMMEDIATE);
- comp_imm = imms[src1.Index][src1.SwizzleX];
+ comp_imm = ctx->imms[src1.Index * TGSI_NUM_CHANNELS + src1.SwizzleX];
gather_comp = LLVMConstIntGetZExtValue(comp_imm);
gather_comp = CLAMP(gather_comp, 0, 3);
}
set_tex_fetch_args(ctx, &txq_emit_data, TGSI_OPCODE_TXQ,
txq_inst.Texture.Texture,
emit_data->args[1], NULL,
- &ctx->soa.bld_base.uint_bld.zero,
+ &ctx->bld_base.uint_bld.zero,
1, 0xf);
- txq_emit(NULL, &ctx->soa.bld_base, &txq_emit_data);
+ txq_emit(NULL, &ctx->bld_base, &txq_emit_data);
/* Compute -0.5 / size. */
for (c = 0; c < 2; c++) {
LLVMConstInt(ctx->i32, c, 0), "");
half_texel[c] = LLVMBuildUIToFP(builder, half_texel[c], ctx->f32, "");
half_texel[c] =
- lp_build_emit_llvm_unary(&ctx->soa.bld_base,
+ lp_build_emit_llvm_unary(&ctx->bld_base,
TGSI_OPCODE_RCP, half_texel[c]);
half_texel[c] = LLVMBuildFMul(builder, half_texel[c],
LLVMConstReal(ctx->f32, -0.5), "");
emit_data->output[emit_data->chan] = samples;
}
-/*
- * SI implements derivatives using the local data store (LDS)
- * All writes to the LDS happen in all executing threads at
- * the same time. TID is the Thread ID for the current
- * thread and is a value between 0 and 63, representing
- * the thread's position in the wavefront.
- *
- * For the pixel shader threads are grouped into quads of four pixels.
- * The TIDs of the pixels of a quad are:
- *
- * +------+------+
- * |4n + 0|4n + 1|
- * +------+------+
- * |4n + 2|4n + 3|
- * +------+------+
- *
- * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
- * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
- * the current pixel's column, and masking with 0xfffffffe yields the TID
- * of the left pixel of the current pixel's row.
- *
- * Adding 1 yields the TID of the pixel to the right of the left pixel, and
- * adding 2 yields the TID of the pixel below the top pixel.
- */
-/* masks for thread ID. */
-#define TID_MASK_TOP_LEFT 0xfffffffc
-#define TID_MASK_TOP 0xfffffffd
-#define TID_MASK_LEFT 0xfffffffe
-
static void si_llvm_emit_ddxy(
const struct lp_build_tgsi_action *action,
struct lp_build_tgsi_context *bld_base,
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
unsigned opcode = emit_data->info->opcode;
- LLVMValueRef thread_id, tl, trbl, tl_tid, trbl_tid, val, args[2];
+ LLVMValueRef val;
int idx;
unsigned mask;
- thread_id = get_thread_id(ctx);
-
if (opcode == TGSI_OPCODE_DDX_FINE)
- mask = TID_MASK_LEFT;
+ mask = AC_TID_MASK_LEFT;
else if (opcode == TGSI_OPCODE_DDY_FINE)
- mask = TID_MASK_TOP;
+ mask = AC_TID_MASK_TOP;
else
- mask = TID_MASK_TOP_LEFT;
-
- tl_tid = LLVMBuildAnd(gallivm->builder, thread_id,
- lp_build_const_int32(gallivm, mask), "");
+ mask = AC_TID_MASK_TOP_LEFT;
/* for DDX we want to next X pixel, DDY next Y pixel. */
idx = (opcode == TGSI_OPCODE_DDX || opcode == TGSI_OPCODE_DDX_FINE) ? 1 : 2;
- trbl_tid = LLVMBuildAdd(gallivm->builder, tl_tid,
- lp_build_const_int32(gallivm, idx), "");
val = LLVMBuildBitCast(gallivm->builder, emit_data->args[0], ctx->i32, "");
-
- if (ctx->screen->has_ds_bpermute) {
- args[0] = LLVMBuildMul(gallivm->builder, tl_tid,
- lp_build_const_int32(gallivm, 4), "");
- args[1] = val;
- tl = lp_build_intrinsic(gallivm->builder,
- "llvm.amdgcn.ds.bpermute", ctx->i32,
- args, 2, LP_FUNC_ATTR_READNONE);
-
- args[0] = LLVMBuildMul(gallivm->builder, trbl_tid,
- lp_build_const_int32(gallivm, 4), "");
- trbl = lp_build_intrinsic(gallivm->builder,
- "llvm.amdgcn.ds.bpermute", ctx->i32,
- args, 2, LP_FUNC_ATTR_READNONE);
- } else {
- LLVMValueRef store_ptr, load_ptr0, load_ptr1;
-
- store_ptr = build_gep0(ctx, ctx->lds, thread_id);
- load_ptr0 = build_gep0(ctx, ctx->lds, tl_tid);
- load_ptr1 = build_gep0(ctx, ctx->lds, trbl_tid);
-
- LLVMBuildStore(gallivm->builder, val, store_ptr);
- tl = LLVMBuildLoad(gallivm->builder, load_ptr0, "");
- trbl = LLVMBuildLoad(gallivm->builder, load_ptr1, "");
- }
-
- tl = LLVMBuildBitCast(gallivm->builder, tl, ctx->f32, "");
- trbl = LLVMBuildBitCast(gallivm->builder, trbl, ctx->f32, "");
-
- emit_data->output[emit_data->chan] =
- LLVMBuildFSub(gallivm->builder, trbl, tl, "");
+ val = ac_emit_ddxy(&ctx->ac, ctx->screen->has_ds_bpermute,
+ mask, idx, ctx->lds, val);
+ emit_data->output[emit_data->chan] = val;
}
/*
struct si_shader_context *ctx = si_shader_context(bld_base);
struct si_shader *shader = ctx->shader;
struct gallivm_state *gallivm = bld_base->base.gallivm;
+ struct lp_build_context *uint = &bld_base->uint_bld;
LLVMValueRef interp_param;
const struct tgsi_full_instruction *inst = emit_data->inst;
- const char *intr_name;
int input_index = inst->Src[0].Register.Index;
int chan;
int i;
temp2 = LLVMBuildFMul(gallivm->builder, ddy_el, emit_data->args[1], "");
- temp2 = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
-
- ij_out[i] = LLVMBuildBitCast(gallivm->builder,
- temp2, ctx->i32, "");
+ ij_out[i] = LLVMBuildFAdd(gallivm->builder, temp2, temp1, "");
}
interp_param = lp_build_gather_values(bld_base->base.gallivm, ij_out, 2);
}
- intr_name = interp_param ? "llvm.SI.fs.interp" : "llvm.SI.fs.constant";
for (chan = 0; chan < 4; chan++) {
- LLVMValueRef args[4];
LLVMValueRef llvm_chan;
unsigned schan;
schan = tgsi_util_get_full_src_register_swizzle(&inst->Src[0], chan);
llvm_chan = lp_build_const_int32(gallivm, schan);
- args[0] = llvm_chan;
- args[1] = attr_number;
- args[2] = params;
- args[3] = interp_param;
-
- emit_data->output[chan] =
- lp_build_intrinsic(gallivm->builder, intr_name,
- ctx->f32, args, args[3] ? 4 : 3,
- LP_FUNC_ATTR_READNONE);
+ if (interp_param) {
+ interp_param = LLVMBuildBitCast(gallivm->builder,
+ interp_param, LLVMVectorType(ctx->f32, 2), "");
+ LLVMValueRef i = LLVMBuildExtractElement(
+ gallivm->builder, interp_param, uint->zero, "");
+ LLVMValueRef j = LLVMBuildExtractElement(
+ gallivm->builder, interp_param, uint->one, "");
+ emit_data->output[chan] = ac_build_fs_interp(&ctx->ac,
+ llvm_chan, attr_number, params,
+ i, j);
+ } else {
+ emit_data->output[chan] = ac_build_fs_interp_mov(&ctx->ac,
+ lp_build_const_int32(gallivm, 2), /* P0 */
+ llvm_chan, attr_number, params);
+ }
}
}
static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
struct lp_build_emit_data *emit_data)
{
- LLVMValueRef (*imms)[4] = lp_soa_context(bld_base)->immediates;
+ struct si_shader_context *ctx = si_shader_context(bld_base);
struct tgsi_src_register src0 = emit_data->inst->Src[0].Register;
+ LLVMValueRef imm;
unsigned stream;
assert(src0.File == TGSI_FILE_IMMEDIATE);
- stream = LLVMConstIntGetZExtValue(imms[src0.Index][src0.SwizzleX]) & 0x3;
+ imm = ctx->imms[src0.Index * TGSI_NUM_CHANNELS + src0.SwizzleX];
+ stream = LLVMConstIntGetZExtValue(imm) & 0x3;
return stream;
}
struct si_shader *shader = ctx->shader;
struct tgsi_shader_info *info = &shader->selector->info;
struct gallivm_state *gallivm = bld_base->base.gallivm;
+ struct lp_build_if_state if_state;
LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
SI_PARAM_GS2VS_OFFSET);
LLVMValueRef gs_next_vertex;
LLVMValueRef can_emit, kill;
- LLVMValueRef args[2];
- unsigned chan;
+ unsigned chan, offset;
int i;
unsigned stream;
"");
/* If this thread has already emitted the declared maximum number of
- * vertices, kill it: excessive vertex emissions are not supposed to
- * have any effect, and GS threads have no externally observable
- * effects other than emitting vertices.
+ * vertices, skip the write: excessive vertex emissions are not
+ * supposed to have any effect.
+ *
+ * If the shader has no writes to memory, kill it instead. This skips
+ * further memory loads and may allow LLVM to skip to the end
+ * altogether.
*/
- can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULE, gs_next_vertex,
+ can_emit = LLVMBuildICmp(gallivm->builder, LLVMIntULT, gs_next_vertex,
lp_build_const_int32(gallivm,
shader->selector->gs_max_out_vertices), "");
- kill = lp_build_select(&bld_base->base, can_emit,
- lp_build_const_float(gallivm, 1.0f),
- lp_build_const_float(gallivm, -1.0f));
- lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
- ctx->voidt, &kill, 1, 0);
+ bool use_kill = !info->writes_memory;
+ if (use_kill) {
+ kill = lp_build_select(&bld_base->base, can_emit,
+ lp_build_const_float(gallivm, 1.0f),
+ lp_build_const_float(gallivm, -1.0f));
+ lp_build_intrinsic(gallivm->builder, "llvm.AMDGPU.kill",
+ ctx->voidt, &kill, 1, 0);
+ } else {
+ lp_build_if(&if_state, gallivm, can_emit);
+ }
+
+ offset = 0;
for (i = 0; i < info->num_outputs; i++) {
- LLVMValueRef *out_ptr =
- ctx->soa.outputs[i];
+ LLVMValueRef *out_ptr = ctx->outputs[i];
for (chan = 0; chan < 4; chan++) {
+ if (!(info->output_usagemask[i] & (1 << chan)) ||
+ ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
+ continue;
+
LLVMValueRef out_val = LLVMBuildLoad(gallivm->builder, out_ptr[chan], "");
LLVMValueRef voffset =
- lp_build_const_int32(gallivm, (i * 4 + chan) *
+ lp_build_const_int32(gallivm, offset *
shader->selector->gs_max_out_vertices);
+ offset++;
voffset = lp_build_add(uint, voffset, gs_next_vertex);
voffset = lp_build_mul_imm(uint, voffset, 4);
out_val = LLVMBuildBitCast(gallivm->builder, out_val, ctx->i32, "");
- build_tbuffer_store(ctx,
- ctx->gsvs_ring[stream],
- out_val, 1,
- voffset, soffset, 0,
- V_008F0C_BUF_DATA_FORMAT_32,
- V_008F0C_BUF_NUM_FORMAT_UINT,
- 1, 0, 1, 1, 0);
+ ac_build_tbuffer_store(&ctx->ac,
+ ctx->gsvs_ring[stream],
+ out_val, 1,
+ voffset, soffset, 0,
+ V_008F0C_BUF_DATA_FORMAT_32,
+ V_008F0C_BUF_NUM_FORMAT_UINT,
+ 1, 0, 1, 1, 0);
}
}
+
gs_next_vertex = lp_build_add(uint, gs_next_vertex,
lp_build_const_int32(gallivm, 1));
LLVMBuildStore(gallivm->builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
/* Signal vertex emission */
- args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_EMIT | SENDMSG_GS | (stream << 8));
- args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
- lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
- ctx->voidt, args, 2, 0);
+ ac_emit_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
+ LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID));
+ if (!use_kill)
+ lp_build_endif(&if_state);
}
/* Cut one primitive from the geometry shader */
struct lp_build_emit_data *emit_data)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
- struct gallivm_state *gallivm = bld_base->base.gallivm;
- LLVMValueRef args[2];
unsigned stream;
/* Signal primitive cut */
stream = si_llvm_get_stream(bld_base, emit_data);
- args[0] = lp_build_const_int32(gallivm, SENDMSG_GS_OP_CUT | SENDMSG_GS | (stream << 8));
- args[1] = LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID);
- lp_build_intrinsic(gallivm->builder, "llvm.SI.sendmsg",
- ctx->voidt, args, 2, 0);
+ ac_emit_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
+ LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID));
}
static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = bld_base->base.gallivm;
- /* The real barrier instruction isn’t needed, because an entire patch
+ /* SI only (thanks to a hw bug workaround):
+ * The real barrier instruction isn’t needed, because an entire patch
* always fits into a single wave.
*/
- if (ctx->type == PIPE_SHADER_TESS_CTRL) {
- emit_optimization_barrier(ctx);
+ if (HAVE_LLVM >= 0x0309 &&
+ ctx->screen->b.chip_class == SI &&
+ ctx->type == PIPE_SHADER_TESS_CTRL) {
+ emit_waitcnt(ctx, LGKM_CNT & VM_CNT);
return;
}
}
}
-static void create_meta_data(struct si_shader_context *ctx)
-{
- struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
-
- ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
- "invariant.load", 14);
- ctx->range_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
- "range", 5);
- ctx->uniform_md_kind = LLVMGetMDKindIDInContext(gallivm->context,
- "amdgpu.uniform", 14);
-
- ctx->empty_md = LLVMMDNodeInContext(gallivm->context, NULL, 0);
-}
-
static void declare_streamout_params(struct si_shader_context *ctx,
struct pipe_stream_output_info *so,
LLVMTypeRef *params, LLVMTypeRef i32,
if (ctx->type != PIPE_SHADER_TESS_EVAL)
params[ctx->param_streamout_config = (*num_params)++] = i32;
else
- ctx->param_streamout_config = ctx->param_tess_offchip;
+ ctx->param_streamout_config = *num_params - 1;
params[ctx->param_streamout_write_index = (*num_params)++] = i32;
}
case LLVMVectorTypeKind:
return LLVMGetVectorSize(type) *
llvm_get_type_size(LLVMGetElementType(type));
+ case LLVMArrayTypeKind:
+ return LLVMGetArrayLength(type) *
+ llvm_get_type_size(LLVMGetElementType(type));
default:
assert(0);
return 0;
static void declare_tess_lds(struct si_shader_context *ctx)
{
struct gallivm_state *gallivm = &ctx->gallivm;
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
struct lp_build_context *uint = &bld_base->uint_bld;
unsigned lds_size = ctx->screen->b.chip_class >= CIK ? 65536 : 32768;
"tess_lds");
}
+static unsigned si_get_max_workgroup_size(struct si_shader *shader)
+{
+ const unsigned *properties = shader->selector->info.properties;
+ unsigned max_work_group_size =
+ properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
+ properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
+ properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
+
+ if (!max_work_group_size) {
+ /* This is a variable group size compute shader,
+ * compile it for the maximum possible group size.
+ */
+ max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
+ }
+ return max_work_group_size;
+}
+
static void create_function(struct si_shader_context *ctx)
{
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
struct gallivm_state *gallivm = bld_base->base.gallivm;
struct si_shader *shader = ctx->shader;
- LLVMTypeRef params[SI_NUM_PARAMS + SI_NUM_VERTEX_BUFFERS], v3i32;
+ LLVMTypeRef params[SI_NUM_PARAMS + SI_MAX_ATTRIBS], v3i32;
LLVMTypeRef returns[16+32*4];
unsigned i, last_sgpr, num_params, num_return_sgprs;
unsigned num_returns = 0;
switch (ctx->type) {
case PIPE_SHADER_VERTEX:
- params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
+ params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_MAX_ATTRIBS);
params[SI_PARAM_BASE_VERTEX] = ctx->i32;
params[SI_PARAM_START_INSTANCE] = ctx->i32;
params[SI_PARAM_DRAWID] = ctx->i32;
if (shader->key.as_es) {
params[ctx->param_oc_lds = num_params++] = ctx->i32;
- params[ctx->param_tess_offchip = num_params++] = ctx->i32;
+ params[num_params++] = ctx->i32;
params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
} else {
- params[ctx->param_tess_offchip = num_params++] = ctx->i32;
+ params[num_params++] = ctx->i32;
declare_streamout_params(ctx, &shader->selector->so,
params, ctx->i32, &num_params);
params[ctx->param_oc_lds = num_params++] = ctx->i32;
S_0286D0_FRONT_FACE_ENA(1) |
S_0286D0_POS_FIXED_PT_ENA(1));
} else if (ctx->type == PIPE_SHADER_COMPUTE) {
- const unsigned *properties = shader->selector->info.properties;
- unsigned max_work_group_size =
- properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
- properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
- properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
-
- if (!max_work_group_size) {
- /* This is a variable group size compute shader,
- * compile it for the maximum possible group size.
- */
- max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
- }
-
si_llvm_add_attribute(ctx->main_fn,
"amdgpu-max-work-group-size",
- max_work_group_size);
+ si_get_max_workgroup_size(shader));
}
shader->info.num_input_sgprs = 0;
LOCAL_ADDR_SPACE);
if ((ctx->type == PIPE_SHADER_VERTEX && shader->key.as_ls) ||
- ctx->type == PIPE_SHADER_TESS_CTRL ||
- ctx->type == PIPE_SHADER_TESS_EVAL)
+ ctx->type == PIPE_SHADER_TESS_CTRL)
declare_tess_lds(ctx);
}
*/
static void preload_ring_buffers(struct si_shader_context *ctx)
{
- struct gallivm_state *gallivm =
- ctx->soa.bld_base.base.gallivm;
+ struct gallivm_state *gallivm = ctx->bld_base.base.gallivm;
+ LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
SI_PARAM_RW_BUFFERS);
LLVMValueRef offset = lp_build_const_int32(gallivm, ring);
ctx->esgs_ring =
- build_indexed_load_const(ctx, buf_ptr, offset);
+ ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
}
if (ctx->shader->is_gs_copy_shader) {
- LLVMValueRef offset = lp_build_const_int32(gallivm, SI_VS_RING_GSVS);
+ LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_GSVS);
ctx->gsvs_ring[0] =
- build_indexed_load_const(ctx, buf_ptr, offset);
- }
- if (ctx->type == PIPE_SHADER_GEOMETRY) {
- int i;
- for (i = 0; i < 4; i++) {
- LLVMValueRef offset = lp_build_const_int32(gallivm, SI_GS_RING_GSVS0 + i);
+ ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
+ } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
+ const struct si_shader_selector *sel = ctx->shader->selector;
+ struct lp_build_context *uint = &ctx->bld_base.uint_bld;
+ LLVMValueRef offset = lp_build_const_int32(gallivm, SI_RING_GSVS);
+ LLVMValueRef base_ring;
+
+ base_ring = ac_build_indexed_load_const(&ctx->ac, buf_ptr, offset);
+
+ /* The conceptual layout of the GSVS ring is
+ * v0c0 .. vLv0 v0c1 .. vLc1 ..
+ * but the real memory layout is swizzled across
+ * threads:
+ * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
+ * t16v0c0 ..
+ * Override the buffer descriptor accordingly.
+ */
+ LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
+ uint64_t stream_offset = 0;
+
+ for (unsigned stream = 0; stream < 4; ++stream) {
+ unsigned num_components;
+ unsigned stride;
+ unsigned num_records;
+ LLVMValueRef ring, tmp;
+
+ num_components = sel->info.num_stream_output_components[stream];
+ if (!num_components)
+ continue;
- ctx->gsvs_ring[i] =
- build_indexed_load_const(ctx, buf_ptr, offset);
+ stride = 4 * num_components * sel->gs_max_out_vertices;
+
+ /* Limit on the stride field for <= CIK. */
+ assert(stride < (1 << 14));
+
+ num_records = 64;
+
+ ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
+ tmp = LLVMBuildExtractElement(builder, ring, uint->zero, "");
+ tmp = LLVMBuildAdd(builder, tmp,
+ LLVMConstInt(ctx->i64,
+ stream_offset, 0), "");
+ stream_offset += stride * 64;
+
+ ring = LLVMBuildInsertElement(builder, ring, tmp, uint->zero, "");
+ ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
+ tmp = LLVMBuildExtractElement(builder, ring, uint->one, "");
+ tmp = LLVMBuildOr(builder, tmp,
+ LLVMConstInt(ctx->i32,
+ S_008F04_STRIDE(stride) |
+ S_008F04_SWIZZLE_ENABLE(1), 0), "");
+ ring = LLVMBuildInsertElement(builder, ring, tmp, uint->one, "");
+ ring = LLVMBuildInsertElement(builder, ring,
+ LLVMConstInt(ctx->i32, num_records, 0),
+ LLVMConstInt(ctx->i32, 2, 0), "");
+ ring = LLVMBuildInsertElement(builder, ring,
+ LLVMConstInt(ctx->i32,
+ S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
+ S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
+ S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
+ S_008F0C_ADD_TID_ENABLE(1),
+ 0),
+ LLVMConstInt(ctx->i32, 3, 0), "");
+ ring = LLVMBuildBitCast(builder, ring, ctx->v16i8, "");
+
+ ctx->gsvs_ring[stream] = ring;
}
}
}
LLVMValueRef param_rw_buffers,
unsigned param_pos_fixed_pt)
{
- struct lp_build_tgsi_context *bld_base =
- &ctx->soa.bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
struct gallivm_state *gallivm = bld_base->base.gallivm;
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef slot, desc, offset, row, bit, address[2];
/* Load the buffer descriptor. */
slot = lp_build_const_int32(gallivm, SI_PS_CONST_POLY_STIPPLE);
- desc = build_indexed_load_const(ctx, param_rw_buffers, slot);
+ desc = ac_build_indexed_load_const(&ctx->ac, param_rw_buffers, slot);
/* The stipple pattern is 32x32, each row has 32 bits. */
offset = LLVMBuildMul(builder, address[1],
r600_resource_reference(&shader->bo, NULL);
shader->bo = (struct r600_resource*)
pipe_buffer_create(&sscreen->b.b, 0,
- PIPE_USAGE_IMMUTABLE, bo_size);
+ PIPE_USAGE_IMMUTABLE,
+ align(bo_size, SI_CPDMA_ALIGNMENT));
if (!shader->bo)
return -ENOMEM;
}
static void si_shader_dump_stats(struct si_screen *sscreen,
- struct si_shader_config *conf,
- unsigned num_inputs,
- unsigned code_size,
+ struct si_shader *shader,
struct pipe_debug_callback *debug,
unsigned processor,
- FILE *file)
+ FILE *file,
+ bool check_debug_option)
{
+ struct si_shader_config *conf = &shader->config;
+ unsigned num_inputs = shader->selector ? shader->selector->info.num_inputs : 0;
+ unsigned code_size = si_get_shader_binary_size(shader);
unsigned lds_increment = sscreen->b.chip_class >= CIK ? 512 : 256;
unsigned lds_per_wave = 0;
unsigned max_simd_waves = 10;
/* Compute LDS usage for PS. */
- if (processor == PIPE_SHADER_FRAGMENT) {
+ switch (processor) {
+ case PIPE_SHADER_FRAGMENT:
/* The minimum usage per wave is (num_inputs * 48). The maximum
* usage is (num_inputs * 48 * 16).
* We can get anything in between and it varies between waves.
*/
lds_per_wave = conf->lds_size * lds_increment +
align(num_inputs * 48, lds_increment);
+ break;
+ case PIPE_SHADER_COMPUTE:
+ if (shader->selector) {
+ unsigned max_workgroup_size =
+ si_get_max_workgroup_size(shader);
+ lds_per_wave = (conf->lds_size * lds_increment) /
+ DIV_ROUND_UP(max_workgroup_size, 64);
+ }
+ break;
}
/* Compute the per-SIMD wave counts. */
if (conf->num_vgprs)
max_simd_waves = MIN2(max_simd_waves, 256 / conf->num_vgprs);
- /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
- * that PS can use.
- */
+ /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
+ * 16KB makes some SIMDs unoccupied). */
if (lds_per_wave)
max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
- if (file != stderr ||
+ if (!check_debug_option ||
r600_can_dump_shader(&sscreen->b, processor)) {
if (processor == PIPE_SHADER_FRAGMENT) {
fprintf(file, "*** SHADER CONFIG ***\n"
"VGPRS: %d\n"
"Spilled SGPRs: %d\n"
"Spilled VGPRs: %d\n"
+ "Private memory VGPRs: %d\n"
"Code Size: %d bytes\n"
"LDS: %d blocks\n"
"Scratch: %d bytes per wave\n"
"Max Waves: %d\n"
"********************\n\n\n",
conf->num_sgprs, conf->num_vgprs,
- conf->spilled_sgprs, conf->spilled_vgprs, code_size,
+ conf->spilled_sgprs, conf->spilled_vgprs,
+ conf->private_mem_vgprs, code_size,
conf->lds_size, conf->scratch_bytes_per_wave,
max_simd_waves);
}
pipe_debug_message(debug, SHADER_INFO,
"Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
"LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
- "Spilled VGPRs: %d",
+ "Spilled VGPRs: %d PrivMem VGPRs: %d",
conf->num_sgprs, conf->num_vgprs, code_size,
conf->lds_size, conf->scratch_bytes_per_wave,
max_simd_waves, conf->spilled_sgprs,
- conf->spilled_vgprs);
+ conf->spilled_vgprs, conf->private_mem_vgprs);
}
-static const char *si_get_shader_name(struct si_shader *shader,
- unsigned processor)
+const char *si_get_shader_name(struct si_shader *shader, unsigned processor)
{
switch (processor) {
case PIPE_SHADER_VERTEX:
void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
struct pipe_debug_callback *debug, unsigned processor,
- FILE *file)
+ FILE *file, bool check_debug_option)
{
- if (file != stderr ||
+ if (!check_debug_option ||
r600_can_dump_shader(&sscreen->b, processor))
si_dump_shader_key(processor, &shader->key, file);
- if (file != stderr && shader->binary.llvm_ir_string) {
+ if (!check_debug_option && shader->binary.llvm_ir_string) {
fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
si_get_shader_name(shader, processor));
fprintf(file, "%s\n", shader->binary.llvm_ir_string);
}
- if (file != stderr ||
+ if (!check_debug_option ||
(r600_can_dump_shader(&sscreen->b, processor) &&
!(sscreen->b.debug_flags & DBG_NO_ASM))) {
fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
fprintf(file, "\n");
}
- si_shader_dump_stats(sscreen, &shader->config,
- shader->selector ? shader->selector->info.num_inputs : 0,
- si_get_shader_binary_size(shader), debug, processor,
- file);
+ si_shader_dump_stats(sscreen, shader, debug, processor, file,
+ check_debug_option);
}
int si_compile_llvm(struct si_screen *sscreen,
if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
fprintf(stderr, "%s LLVM IR:\n\n", name);
- LLVMDumpModule(mod);
+ ac_dump_module(mod);
fprintf(stderr, "\n");
}
}
struct si_shader_context ctx;
struct si_shader *shader;
struct gallivm_state *gallivm = &ctx.gallivm;
- struct lp_build_tgsi_context *bld_base = &ctx.soa.bld_base;
+ LLVMBuilderRef builder;
+ struct lp_build_tgsi_context *bld_base = &ctx.bld_base;
struct lp_build_context *uint = &bld_base->uint_bld;
struct si_shader_output_values *outputs;
struct tgsi_shader_info *gsinfo = &gs_selector->info;
si_init_shader_ctx(&ctx, sscreen, shader, tm);
ctx.type = PIPE_SHADER_VERTEX;
- create_meta_data(&ctx);
+ builder = gallivm->builder;
+
create_function(&ctx);
preload_ring_buffers(&ctx);
args[7] = uint->one; /* SLC */
args[8] = uint->zero; /* TFE */
- /* Fetch vertex data from GSVS ring */
+ /* Fetch the vertex stream ID.*/
+ LLVMValueRef stream_id;
+
+ if (gs_selector->so.num_outputs)
+ stream_id = unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
+ else
+ stream_id = uint->zero;
+
+ /* Fill in output information. */
for (i = 0; i < gsinfo->num_outputs; ++i) {
- unsigned chan;
+ outputs[i].semantic_name = gsinfo->output_semantic_name[i];
+ outputs[i].semantic_index = gsinfo->output_semantic_index[i];
- outputs[i].name = gsinfo->output_semantic_name[i];
- outputs[i].sid = gsinfo->output_semantic_index[i];
+ for (int chan = 0; chan < 4; chan++) {
+ outputs[i].vertex_stream[chan] =
+ (gsinfo->output_streams[i] >> (2 * chan)) & 3;
+ }
+ }
- for (chan = 0; chan < 4; chan++) {
- args[2] = lp_build_const_int32(gallivm,
- (i * 4 + chan) *
- gs_selector->gs_max_out_vertices * 16 * 4);
+ LLVMBasicBlockRef end_bb;
+ LLVMValueRef switch_inst;
+
+ end_bb = LLVMAppendBasicBlockInContext(gallivm->context, ctx.main_fn, "end");
+ switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
+
+ for (int stream = 0; stream < 4; stream++) {
+ LLVMBasicBlockRef bb;
+ unsigned offset;
+
+ if (!gsinfo->num_stream_output_components[stream])
+ continue;
+
+ if (stream > 0 && !gs_selector->so.num_outputs)
+ continue;
- outputs[i].values[chan] =
- LLVMBuildBitCast(gallivm->builder,
+ bb = LLVMInsertBasicBlockInContext(gallivm->context, end_bb, "out");
+ LLVMAddCase(switch_inst, lp_build_const_int32(gallivm, stream), bb);
+ LLVMPositionBuilderAtEnd(builder, bb);
+
+ /* Fetch vertex data from GSVS ring */
+ offset = 0;
+ for (i = 0; i < gsinfo->num_outputs; ++i) {
+ for (unsigned chan = 0; chan < 4; chan++) {
+ if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
+ outputs[i].vertex_stream[chan] != stream) {
+ outputs[i].values[chan] = ctx.bld_base.base.undef;
+ continue;
+ }
+
+ args[2] = lp_build_const_int32(
+ gallivm,
+ offset * gs_selector->gs_max_out_vertices * 16 * 4);
+ offset++;
+
+ outputs[i].values[chan] =
+ LLVMBuildBitCast(gallivm->builder,
lp_build_intrinsic(gallivm->builder,
"llvm.SI.buffer.load.dword.i32.i32",
ctx.i32, args, 9,
LP_FUNC_ATTR_READONLY),
ctx.f32, "");
+ }
+ }
+
+ /* Streamout and exports. */
+ if (gs_selector->so.num_outputs) {
+ si_llvm_emit_streamout(&ctx, outputs,
+ gsinfo->num_outputs,
+ stream);
}
+
+ if (stream == 0)
+ si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
+
+ LLVMBuildBr(builder, end_bb);
}
- si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
+ LLVMPositionBuilderAtEnd(builder, end_bb);
LLVMBuildRetVoid(gallivm->builder);
/* Dump LLVM IR before any optimization passes */
if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
- LLVMDumpModule(bld_base->base.gallivm->module);
+ ac_dump_module(bld_base->base.gallivm->module);
si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_GEOMETRY));
if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
fprintf(stderr, "GS Copy Shader:\n");
si_shader_dump(sscreen, ctx.shader, debug,
- PIPE_SHADER_GEOMETRY, stderr);
+ PIPE_SHADER_GEOMETRY, stderr, true);
r = si_shader_binary_upload(sscreen, ctx.shader);
}
fprintf(f, " part.vs.epilog.export_prim_id = %u\n", key->part.vs.epilog.export_prim_id);
fprintf(f, " as_es = %u\n", key->as_es);
fprintf(f, " as_ls = %u\n", key->as_ls);
- fprintf(f, " mono.vs.fix_fetch = 0x%x\n", key->mono.vs.fix_fetch);
+
+ fprintf(f, " mono.vs.fix_fetch = {");
+ for (i = 0; i < SI_MAX_ATTRIBS; i++)
+ fprintf(f, !i ? "%u" : ", %u", key->mono.vs.fix_fetch[i]);
+ fprintf(f, "}\n");
break;
case PIPE_SHADER_TESS_CTRL:
fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
+ fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
default:
assert(0);
}
+
+ if ((shader == PIPE_SHADER_GEOMETRY ||
+ shader == PIPE_SHADER_TESS_EVAL ||
+ shader == PIPE_SHADER_VERTEX) &&
+ !key->as_es && !key->as_ls) {
+ fprintf(f, " opt.hw_vs.kill_outputs = 0x%"PRIx64"\n", key->opt.hw_vs.kill_outputs);
+ fprintf(f, " opt.hw_vs.kill_outputs2 = 0x%x\n", key->opt.hw_vs.kill_outputs2);
+ fprintf(f, " opt.hw_vs.clip_disable = %u\n", key->opt.hw_vs.clip_disable);
+ }
}
static void si_init_shader_ctx(struct si_shader_context *ctx,
(shader && shader->selector) ? &shader->selector->info : NULL,
(shader && shader->selector) ? shader->selector->tokens : NULL);
- bld_base = &ctx->soa.bld_base;
+ bld_base = &ctx->bld_base;
bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
bld_base->op_actions[TGSI_OPCODE_INTERP_CENTROID] = interp_action;
exports.num = 0;
- if (shader->key.as_es || shader->key.as_ls)
+ if (ctx->type == PIPE_SHADER_FRAGMENT ||
+ ctx->type == PIPE_SHADER_COMPUTE ||
+ shader->key.as_es ||
+ shader->key.as_ls)
return;
/* Process all LLVM instructions. */
}
}
+static void si_count_scratch_private_memory(struct si_shader_context *ctx)
+{
+ ctx->shader->config.private_mem_vgprs = 0;
+
+ /* Process all LLVM instructions. */
+ LLVMBasicBlockRef bb = LLVMGetFirstBasicBlock(ctx->main_fn);
+ while (bb) {
+ LLVMValueRef next = LLVMGetFirstInstruction(bb);
+
+ while (next) {
+ LLVMValueRef inst = next;
+ next = LLVMGetNextInstruction(next);
+
+ if (LLVMGetInstructionOpcode(inst) != LLVMAlloca)
+ continue;
+
+ LLVMTypeRef type = LLVMGetElementType(LLVMTypeOf(inst));
+ /* No idea why LLVM aligns allocas to 4 elements. */
+ unsigned alignment = LLVMGetAlignment(inst);
+ unsigned dw_size = align(llvm_get_type_size(type) / 4, alignment);
+ ctx->shader->config.private_mem_vgprs += dw_size;
+ }
+ bb = LLVMGetNextBasicBlock(bb);
+ }
+}
+
static bool si_compile_tgsi_main(struct si_shader_context *ctx,
struct si_shader *shader)
{
struct si_shader_selector *sel = shader->selector;
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
switch (ctx->type) {
case PIPE_SHADER_VERTEX:
return false;
}
- create_meta_data(ctx);
create_function(ctx);
preload_ring_buffers(ctx);
si_init_shader_ctx(&ctx, sscreen, shader, tm);
ctx.separate_prolog = !is_monolithic;
- memset(shader->info.vs_output_param_offset, 0xff,
+ memset(shader->info.vs_output_param_offset, EXP_PARAM_UNDEFINED,
sizeof(shader->info.vs_output_param_offset));
shader->info.uses_instanceid = sel->info.uses_instanceid;
- bld_base = &ctx.soa.bld_base;
+ bld_base = &ctx.bld_base;
ctx.load_system_value = declare_system_value;
if (!si_compile_tgsi_main(&ctx, shader)) {
/* Dump LLVM IR before any optimization passes */
if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
r600_can_dump_shader(&sscreen->b, ctx.type))
- LLVMDumpModule(mod);
+ ac_dump_module(mod);
si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, ctx.type));
- /* Post-optimization transformations. */
+ /* Post-optimization transformations and analysis. */
si_eliminate_const_vs_outputs(&ctx);
+ if ((debug && debug->debug_message) ||
+ r600_can_dump_shader(&sscreen->b, ctx.type))
+ si_count_scratch_private_memory(&ctx);
+
/* Compile to bytecode. */
r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
mod, debug, ctx.type, "TGSI shader");
* LLVM 3.9svn has this bug.
*/
if (sel->type == PIPE_SHADER_COMPUTE) {
- unsigned *props = sel->info.properties;
unsigned wave_size = 64;
unsigned max_vgprs = 256;
unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
unsigned max_sgprs_per_wave = 128;
- unsigned max_block_threads;
-
- if (props[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH])
- max_block_threads = props[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
- props[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
- props[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
- else
- max_block_threads = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
-
+ unsigned max_block_threads = si_get_max_workgroup_size(shader);
unsigned min_waves_per_cu = DIV_ROUND_UP(max_block_threads, wave_size);
unsigned min_waves_per_simd = DIV_ROUND_UP(min_waves_per_cu, 4);
union si_shader_part_key *key)
{
struct gallivm_state *gallivm = &ctx->gallivm;
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
LLVMTypeRef params[5];
int num_params, i;
union si_shader_part_key *key)
{
struct gallivm_state *gallivm = &ctx->gallivm;
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
LLVMTypeRef params[16];
LLVMValueRef func;
int last_sgpr, num_params;
interp[1] = LLVMBuildExtractValue(gallivm->builder, ret,
interp_vgpr + 1, "");
interp_ij = lp_build_gather_values(gallivm, interp, 2);
- interp_ij = LLVMBuildBitCast(gallivm->builder, interp_ij,
- ctx->v2i32, "");
}
/* Use the absolute location of the input. */
union si_shader_part_key *key)
{
struct gallivm_state *gallivm = &ctx->gallivm;
- struct lp_build_tgsi_context *bld_base = &ctx->soa.bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
LLVMTypeRef params[16+8*4+3];
LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
int last_sgpr, num_params, i;
return true;
}
-static void si_fix_num_sgprs(struct si_shader *shader)
+void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
+ unsigned *lds_size)
+{
+ /* SPI barrier management bug:
+ * Make sure we have at least 4k of LDS in use to avoid the bug.
+ * It applies to workgroup sizes of more than one wavefront.
+ */
+ if (sscreen->b.family == CHIP_BONAIRE ||
+ sscreen->b.family == CHIP_KABINI ||
+ sscreen->b.family == CHIP_MULLINS)
+ *lds_size = MAX2(*lds_size, 8);
+}
+
+static void si_fix_resource_usage(struct si_screen *sscreen,
+ struct si_shader *shader)
{
unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
+
+ if (shader->selector->type == PIPE_SHADER_COMPUTE &&
+ si_get_max_workgroup_size(shader) > 64) {
+ si_multiwave_lds_size_workaround(sscreen,
+ &shader->config.lds_size);
+ }
}
int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
struct pipe_debug_callback *debug)
{
struct si_shader_selector *sel = shader->selector;
- struct si_shader *mainp = sel->main_shader_part;
+ struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
int r;
/* LS, ES, VS are compiled on demand if the main part hasn't been
}
}
- si_fix_num_sgprs(shader);
+ si_fix_resource_usage(sscreen, shader);
si_shader_dump(sscreen, shader, debug, sel->info.processor,
- stderr);
+ stderr, true);
/* Upload. */
r = si_shader_binary_upload(sscreen, shader);