#include "gallivm/lp_bld_logic.h"
#include "gallivm/lp_bld_tgsi.h"
#include "gallivm/lp_bld_arit.h"
+#include "gallivm/lp_bld_flow.h"
#include "radeon_llvm.h"
#include "radeon_llvm_emit.h"
#include "util/u_memory.h"
struct tgsi_token * tokens;
struct si_pipe_shader *shader;
unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
+ int param_streamout_config;
+ int param_streamout_write_index;
+ int param_streamout_offset[4];
+ int param_vertex_id;
+ int param_instance_id;
LLVMValueRef const_md;
- LLVMValueRef const_resource;
+ LLVMValueRef const_resource[NUM_CONST_BUFFERS];
#if HAVE_LLVM >= 0x0304
LLVMValueRef ddxy_lds;
#endif
- LLVMValueRef *constants;
+ LLVMValueRef *constants[NUM_CONST_BUFFERS];
LLVMValueRef *resources;
LLVMValueRef *samplers;
+ LLVMValueRef so_buffers[4];
};
static struct si_shader_context * si_shader_context(
{
struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
+ LLVMValueRef indices[2] = {
+ LLVMConstInt(LLVMInt64TypeInContext(base->gallivm->context), 0, false),
+ offset
+ };
LLVMValueRef computed_ptr = LLVMBuildGEP(
- base->gallivm->builder, base_ptr, &offset, 1, "");
+ base->gallivm->builder, base_ptr, indices, 2, "");
LLVMValueRef result = LLVMBuildLoad(base->gallivm->builder, computed_ptr, "");
LLVMSetMetadata(result, 1, si_shader_ctx->const_md);
return result;
}
-static LLVMValueRef get_instance_index(
+static LLVMValueRef get_instance_index_for_fetch(
struct radeon_llvm_context * radeon_bld,
unsigned divisor)
{
+ struct si_shader_context *si_shader_ctx =
+ si_shader_context(&radeon_bld->soa.bld_base);
struct gallivm_state * gallivm = radeon_bld->soa.bld_base.base.gallivm;
- LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_INSTANCE_ID);
+ LLVMValueRef result = LLVMGetParam(radeon_bld->main_fn,
+ si_shader_ctx->param_instance_id);
result = LLVMBuildAdd(gallivm->builder, result, LLVMGetParam(
radeon_bld->main_fn, SI_PARAM_START_INSTANCE), "");
if (divisor) {
/* Build index from instance ID, start instance and divisor */
si_shader_ctx->shader->shader.uses_instanceid = true;
- buffer_index = get_instance_index(&si_shader_ctx->radeon_bld, divisor);
+ buffer_index = get_instance_index_for_fetch(&si_shader_ctx->radeon_bld, divisor);
} else {
/* Load the buffer index, which is always stored in VGPR0
* for Vertex Shaders */
- buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_VERTEX_ID);
+ buffer_index = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
+ si_shader_ctx->param_vertex_id);
}
vec4_type = LLVMVectorType(base->elem_type, 4);
unsigned index,
const struct tgsi_full_declaration *decl)
{
-
+ struct si_shader_context *si_shader_ctx =
+ si_shader_context(&radeon_bld->soa.bld_base);
LLVMValueRef value = 0;
switch (decl->Semantic.Name) {
case TGSI_SEMANTIC_INSTANCEID:
- value = get_instance_index(radeon_bld, 1);
+ value = LLVMGetParam(radeon_bld->main_fn,
+ si_shader_ctx->param_instance_id);
break;
case TGSI_SEMANTIC_VERTEXID:
- value = LLVMGetParam(radeon_bld->main_fn, SI_PARAM_VERTEX_ID);
+ value = LLVMGetParam(radeon_bld->main_fn,
+ si_shader_ctx->param_vertex_id);
break;
default:
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
struct lp_build_context * base = &bld_base->base;
const struct tgsi_ind_register *ireg = ®->Indirect;
- unsigned idx;
+ unsigned buf, idx;
LLVMValueRef args[2];
LLVMValueRef addr;
return lp_build_gather_values(bld_base->base.gallivm, values, 4);
}
+ buf = reg->Register.Dimension ? reg->Dimension.Index : 0;
idx = reg->Register.Index * 4 + swizzle;
+
if (!reg->Register.Indirect)
- return bitcast(bld_base, type, si_shader_ctx->constants[idx]);
+ return bitcast(bld_base, type, si_shader_ctx->constants[buf][idx]);
- args[0] = si_shader_ctx->const_resource;
+ args[0] = si_shader_ctx->const_resource[buf];
args[1] = lp_build_const_int32(base->gallivm, idx * 4);
addr = si_shader_ctx->radeon_bld.soa.addr[ireg->Index][ireg->Swizzle];
addr = LLVMBuildLoad(base->gallivm->builder, addr, "load addr reg");
if (si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_NEVER) {
LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][3];
+ LLVMValueRef alpha_ref = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
+ SI_PARAM_ALPHA_REF);
+
LLVMValueRef alpha_pass =
lp_build_cmp(&bld_base->base,
si_shader_ctx->shader->key.ps.alpha_func,
LLVMBuildLoad(gallivm->builder, out_ptr, ""),
- lp_build_const_float(gallivm, si_shader_ctx->shader->key.ps.alpha_ref));
+ alpha_ref);
LLVMValueRef arg =
lp_build_select(&bld_base->base,
alpha_pass,
LLVMValueRef out_elts[4];
LLVMValueRef base_elt;
LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
- LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, uint->one);
+ LLVMValueRef constbuf_index = lp_build_const_int32(base->gallivm, NUM_PIPE_CONST_BUFFERS);
+ LLVMValueRef const_resource = build_indexed_load(si_shader_ctx, ptr, constbuf_index);
for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
LLVMValueRef out_ptr = si_shader_ctx->radeon_bld.soa.outputs[index][chan];
}
}
+static void si_dump_streamout(struct pipe_stream_output_info *so)
+{
+ unsigned i;
+
+ if (so->num_outputs)
+ fprintf(stderr, "STREAMOUT\n");
+
+ for (i = 0; i < so->num_outputs; i++) {
+ unsigned mask = ((1 << so->output[i].num_components) - 1) <<
+ so->output[i].start_component;
+ fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
+ i, so->output[i].output_buffer,
+ so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
+ so->output[i].register_index,
+ mask & 1 ? "x" : "",
+ mask & 2 ? "y" : "",
+ mask & 4 ? "z" : "",
+ mask & 8 ? "w" : "");
+ }
+}
+
+/* TBUFFER_STORE_FORMAT_{X,XY,XYZ,XYZW} <- the suffix is selected by num_channels=1..4.
+ * The type of vdata must be one of i32 (num_channels=1), v2i32 (num_channels=2),
+ * or v4i32 (num_channels=3,4). */
+static void build_tbuffer_store(struct si_shader_context *shader,
+ LLVMValueRef rsrc,
+ LLVMValueRef vdata,
+ unsigned num_channels,
+ LLVMValueRef vaddr,
+ LLVMValueRef soffset,
+ unsigned inst_offset,
+ unsigned dfmt,
+ unsigned nfmt,
+ unsigned offen,
+ unsigned idxen,
+ unsigned glc,
+ unsigned slc,
+ unsigned tfe)
+{
+ struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
+ LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
+ LLVMValueRef args[] = {
+ rsrc,
+ vdata,
+ LLVMConstInt(i32, num_channels, 0),
+ vaddr,
+ soffset,
+ LLVMConstInt(i32, inst_offset, 0),
+ LLVMConstInt(i32, dfmt, 0),
+ LLVMConstInt(i32, nfmt, 0),
+ LLVMConstInt(i32, offen, 0),
+ LLVMConstInt(i32, idxen, 0),
+ LLVMConstInt(i32, glc, 0),
+ LLVMConstInt(i32, slc, 0),
+ LLVMConstInt(i32, tfe, 0)
+ };
+
+ /* The intrinsic is overloaded, we need to add a type suffix for overloading to work. */
+ unsigned func = CLAMP(num_channels, 1, 3) - 1;
+ const char *types[] = {"i32", "v2i32", "v4i32"};
+ char name[256];
+ snprintf(name, sizeof(name), "llvm.SI.tbuffer.store.%s", types[func]);
+
+ lp_build_intrinsic(gallivm->builder, name,
+ LLVMVoidTypeInContext(gallivm->context),
+ args, Elements(args));
+}
+
+static void build_streamout_store(struct si_shader_context *shader,
+ LLVMValueRef rsrc,
+ LLVMValueRef vdata,
+ unsigned num_channels,
+ LLVMValueRef vaddr,
+ LLVMValueRef soffset,
+ unsigned inst_offset)
+{
+ static unsigned dfmt[] = {
+ V_008F0C_BUF_DATA_FORMAT_32,
+ V_008F0C_BUF_DATA_FORMAT_32_32,
+ V_008F0C_BUF_DATA_FORMAT_32_32_32,
+ V_008F0C_BUF_DATA_FORMAT_32_32_32_32
+ };
+ assert(num_channels >= 1 && num_channels <= 4);
+
+ build_tbuffer_store(shader, rsrc, vdata, num_channels, vaddr, soffset,
+ inst_offset, dfmt[num_channels-1],
+ V_008F0C_BUF_NUM_FORMAT_UINT, 1, 0, 1, 1, 0);
+}
+
+/* On SI, the vertex shader is responsible for writing streamout data
+ * to buffers. */
+static void si_llvm_emit_streamout(struct si_shader_context *shader)
+{
+ struct pipe_stream_output_info *so = &shader->shader->selector->so;
+ struct gallivm_state *gallivm = &shader->radeon_bld.gallivm;
+ LLVMBuilderRef builder = gallivm->builder;
+ int i, j;
+ struct lp_build_if_state if_ctx;
+
+ LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
+
+ LLVMValueRef so_param =
+ LLVMGetParam(shader->radeon_bld.main_fn,
+ shader->param_streamout_config);
+
+ /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
+ LLVMValueRef so_vtx_count =
+ LLVMBuildAnd(builder,
+ LLVMBuildLShr(builder, so_param,
+ LLVMConstInt(i32, 16, 0), ""),
+ LLVMConstInt(i32, 127, 0), "");
+
+ LLVMValueRef tid = build_intrinsic(builder, "llvm.SI.tid", i32,
+ NULL, 0, LLVMReadNoneAttribute);
+
+ /* can_emit = tid < so_vtx_count; */
+ LLVMValueRef can_emit =
+ LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
+
+ /* Emit the streamout code conditionally. This actually avoids
+ * out-of-bounds buffer access. The hw tells us via the SGPR
+ * (so_vtx_count) which threads are allowed to emit streamout data. */
+ lp_build_if(&if_ctx, gallivm, can_emit);
+ {
+ /* The buffer offset is computed as follows:
+ * ByteOffset = streamout_offset[buffer_id]*4 +
+ * (streamout_write_index + thread_id)*stride[buffer_id] +
+ * attrib_offset
+ */
+
+ LLVMValueRef so_write_index =
+ LLVMGetParam(shader->radeon_bld.main_fn,
+ shader->param_streamout_write_index);
+
+ /* Compute (streamout_write_index + thread_id). */
+ so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
+
+ /* Compute the write offset for each enabled buffer. */
+ LLVMValueRef so_write_offset[4] = {};
+ for (i = 0; i < 4; i++) {
+ if (!so->stride[i])
+ continue;
+
+ LLVMValueRef so_offset = LLVMGetParam(shader->radeon_bld.main_fn,
+ shader->param_streamout_offset[i]);
+ so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(i32, 4, 0), "");
+
+ so_write_offset[i] = LLVMBuildMul(builder, so_write_index,
+ LLVMConstInt(i32, so->stride[i]*4, 0), "");
+ so_write_offset[i] = LLVMBuildAdd(builder, so_write_offset[i], so_offset, "");
+ }
+
+ LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS] = shader->radeon_bld.soa.outputs;
+
+ /* Write streamout data. */
+ for (i = 0; i < so->num_outputs; i++) {
+ unsigned buf_idx = so->output[i].output_buffer;
+ unsigned reg = so->output[i].register_index;
+ unsigned start = so->output[i].start_component;
+ unsigned num_comps = so->output[i].num_components;
+ LLVMValueRef out[4];
+
+ assert(num_comps && num_comps <= 4);
+ if (!num_comps || num_comps > 4)
+ continue;
+
+ /* Load the output as int. */
+ for (j = 0; j < num_comps; j++) {
+ out[j] = LLVMBuildLoad(builder, outputs[reg][start+j], "");
+ out[j] = LLVMBuildBitCast(builder, out[j], i32, "");
+ }
+
+ /* Pack the output. */
+ LLVMValueRef vdata = NULL;
+
+ switch (num_comps) {
+ case 1: /* as i32 */
+ vdata = out[0];
+ break;
+ case 2: /* as v2i32 */
+ case 3: /* as v4i32 (aligned to 4) */
+ case 4: /* as v4i32 */
+ vdata = LLVMGetUndef(LLVMVectorType(i32, util_next_power_of_two(num_comps)));
+ for (j = 0; j < num_comps; j++) {
+ vdata = LLVMBuildInsertElement(builder, vdata, out[j],
+ LLVMConstInt(i32, j, 0), "");
+ }
+ break;
+ }
+
+ build_streamout_store(shader, shader->so_buffers[buf_idx],
+ vdata, num_comps,
+ so_write_offset[buf_idx],
+ LLVMConstInt(i32, 0, 0),
+ so->output[i].dst_offset*4);
+ }
+ }
+ lp_build_endif(&if_ctx);
+}
+
/* XXX: This is partially implemented for VS only at this point. It is not complete */
static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
{
LLVMValueRef last_args[9] = { 0 };
LLVMValueRef pos_args[4][9] = { { 0 } };
unsigned semantic_name;
- unsigned color_count = 0;
unsigned param_count = 0;
- int depth_index = -1, stencil_index = -1;
+ int depth_index = -1, stencil_index = -1, psize_index = -1, edgeflag_index = -1;
int i;
+ if (si_shader_ctx->shader->selector->so.num_outputs) {
+ si_llvm_emit_streamout(si_shader_ctx);
+ }
+
while (!tgsi_parse_end_of_tokens(parse)) {
struct tgsi_full_declaration *d =
&parse->FullToken.FullDeclaration;
/* Select the correct target */
switch(semantic_name) {
case TGSI_SEMANTIC_PSIZE:
- shader->vs_out_misc_write = 1;
- shader->vs_out_point_size = 1;
- target = V_008DFC_SQ_EXP_POS + 1;
- break;
+ shader->vs_out_misc_write = true;
+ shader->vs_out_point_size = true;
+ psize_index = index;
+ continue;
+ case TGSI_SEMANTIC_EDGEFLAG:
+ shader->vs_out_misc_write = true;
+ shader->vs_out_edgeflag = true;
+ edgeflag_index = index;
+ continue;
case TGSI_SEMANTIC_POSITION:
if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
target = V_008DFC_SQ_EXP_POS;
shader->output[i].param_offset = param_count;
param_count++;
} else {
- target = V_008DFC_SQ_EXP_MRT + color_count;
+ target = V_008DFC_SQ_EXP_MRT + shader->output[i].sid;
if (si_shader_ctx->shader->key.ps.alpha_to_one) {
si_alpha_to_one(bld_base, index);
}
- if (color_count == 0 &&
+ if (shader->output[i].sid == 0 &&
si_shader_ctx->shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
si_alpha_test(bld_base, index);
-
- color_count++;
}
break;
case TGSI_SEMANTIC_CLIPDIST:
LLVMVoidTypeInContext(base->gallivm->context),
args, 9);
}
-
}
if (semantic_name == TGSI_SEMANTIC_CLIPDIST) {
if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
unsigned pos_idx = 0;
+ /* We need to add the position output manually if it's missing. */
+ if (!pos_args[0][0]) {
+ pos_args[0][0] = lp_build_const_int32(base->gallivm, 0xf); /* writemask */
+ pos_args[0][1] = uint->zero; /* EXEC mask */
+ pos_args[0][2] = uint->zero; /* last export? */
+ pos_args[0][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS);
+ pos_args[0][4] = uint->zero; /* COMPR flag */
+ pos_args[0][5] = base->zero; /* X */
+ pos_args[0][6] = base->zero; /* Y */
+ pos_args[0][7] = base->zero; /* Z */
+ pos_args[0][8] = base->one; /* W */
+ }
+
+ /* Write the misc vector (point size, edgeflag, layer, viewport). */
+ if (shader->vs_out_misc_write) {
+ pos_args[1][0] = lp_build_const_int32(base->gallivm, /* writemask */
+ shader->vs_out_point_size |
+ (shader->vs_out_edgeflag << 1));
+ pos_args[1][1] = uint->zero; /* EXEC mask */
+ pos_args[1][2] = uint->zero; /* last export? */
+ pos_args[1][3] = lp_build_const_int32(base->gallivm, V_008DFC_SQ_EXP_POS + 1);
+ pos_args[1][4] = uint->zero; /* COMPR flag */
+ pos_args[1][5] = base->zero; /* X */
+ pos_args[1][6] = base->zero; /* Y */
+ pos_args[1][7] = base->zero; /* Z */
+ pos_args[1][8] = base->zero; /* W */
+
+ if (shader->vs_out_point_size) {
+ pos_args[1][5] = LLVMBuildLoad(base->gallivm->builder,
+ si_shader_ctx->radeon_bld.soa.outputs[psize_index][0], "");
+ }
+
+ if (shader->vs_out_edgeflag) {
+ LLVMValueRef output = LLVMBuildLoad(base->gallivm->builder,
+ si_shader_ctx->radeon_bld.soa.outputs[edgeflag_index][0], "");
+
+ /* The output is a float, but the hw expects an integer
+ * with the first bit containing the edge flag. */
+ output = LLVMBuildFPToUI(base->gallivm->builder, output,
+ bld_base->uint_bld.elem_type, "");
+
+ output = lp_build_min(&bld_base->int_bld, output, bld_base->int_bld.one);
+
+ /* The LLVM intrinsic expects a float. */
+ pos_args[1][6] = LLVMBuildBitCast(base->gallivm->builder, output,
+ base->elem_type, "");
+ }
+ }
+
for (i = 0; i < 4; i++)
if (pos_args[i][0])
shader->nr_pos_exports++;
const struct tgsi_full_instruction * inst = emit_data->inst;
unsigned opcode = inst->Instruction.Opcode;
unsigned target = inst->Texture.Texture;
- unsigned sampler_src, sampler_index;
LLVMValueRef coords[4];
LLVMValueRef address[16];
- LLVMValueRef sample_index_rewrite = NULL;
- LLVMValueRef sample_chan = NULL;
int ref_pos;
unsigned num_coords = tgsi_util_get_texture_coord_dim(target, &ref_pos);
unsigned count = 0;
unsigned chan;
+ unsigned sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
+ unsigned sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
+
+ if (target == TGSI_TEXTURE_BUFFER) {
+ LLVMTypeRef i128 = LLVMIntTypeInContext(gallivm->context, 128);
+ LLVMTypeRef v2i128 = LLVMVectorType(i128, 2);
+ LLVMTypeRef i8 = LLVMInt8TypeInContext(gallivm->context);
+ LLVMTypeRef v16i8 = LLVMVectorType(i8, 16);
+
+ /* Truncate v32i8 to v16i8. */
+ LLVMValueRef res = si_shader_ctx->resources[sampler_index];
+ res = LLVMBuildBitCast(gallivm->builder, res, v2i128, "");
+ res = LLVMBuildExtractElement(gallivm->builder, res, bld_base->uint_bld.zero, "");
+ res = LLVMBuildBitCast(gallivm->builder, res, v16i8, "");
+
+ emit_data->dst_type = LLVMVectorType(bld_base->base.elem_type, 4);
+ emit_data->args[0] = res;
+ emit_data->args[1] = bld_base->uint_bld.zero;
+ emit_data->args[2] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
+ emit_data->arg_count = 3;
+ return;
+ }
/* Fetch and project texture coordinates */
coords[3] = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_W);
if (num_coords > 2)
address[count++] = coords[2];
- /* Pack LOD */
+ /* Pack LOD or sample index */
if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXF)
address[count++] = coords[3];
"");
}
- sampler_src = emit_data->inst->Instruction.NumSrcRegs - 1;
- sampler_index = emit_data->inst->Src[sampler_src].Register.Index;
-
/* Adjust the sample index according to FMASK.
*
* For uncompressed MSAA surfaces, FMASK should return 0x76543210,
target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
struct lp_build_context *uint_bld = &bld_base->uint_bld;
struct lp_build_emit_data txf_emit_data = *emit_data;
- LLVMValueRef txf_address[16];
+ LLVMValueRef txf_address[4];
unsigned txf_count = count;
- memcpy(txf_address, address, sizeof(address));
+ memcpy(txf_address, address, sizeof(txf_address));
+
+ if (target == TGSI_TEXTURE_2D_MSAA) {
+ txf_address[2] = bld_base->uint_bld.zero;
+ }
+ txf_address[3] = bld_base->uint_bld.zero;
/* Pad to a power-of-two size. */
while (txf_count < util_next_power_of_two(txf_count))
LLVMInt32TypeInContext(bld_base->base.gallivm->context), 4);
txf_emit_data.args[0] = lp_build_gather_values(gallivm, txf_address, txf_count);
txf_emit_data.args[1] = si_shader_ctx->resources[FMASK_TEX_OFFSET + sampler_index];
- txf_emit_data.args[2] = lp_build_const_int32(bld_base->base.gallivm, target);
+ txf_emit_data.args[2] = lp_build_const_int32(bld_base->base.gallivm,
+ target == TGSI_TEXTURE_2D_MSAA ? TGSI_TEXTURE_2D : TGSI_TEXTURE_2D_ARRAY);
txf_emit_data.arg_count = 3;
build_tex_intrinsic(&txf_action, bld_base, &txf_emit_data);
/* Initialize some constants. */
- if (target == TGSI_TEXTURE_2D_MSAA) {
- sample_chan = LLVMConstInt(uint_bld->elem_type, 2, 0);
- } else {
- sample_chan = LLVMConstInt(uint_bld->elem_type, 3, 0);
- }
-
LLVMValueRef four = LLVMConstInt(uint_bld->elem_type, 4, 0);
LLVMValueRef F = LLVMConstInt(uint_bld->elem_type, 0xF, 0);
txf_emit_data.output[0],
uint_bld->zero, "");
- LLVMValueRef sample_index =
- LLVMBuildExtractElement(gallivm->builder,
- txf_emit_data.args[0],
- sample_chan, "");
+ unsigned sample_chan = target == TGSI_TEXTURE_2D_MSAA ? 2 : 3;
LLVMValueRef sample_index4 =
- LLVMBuildMul(gallivm->builder, sample_index, four, "");
+ LLVMBuildMul(gallivm->builder, address[sample_chan], four, "");
LLVMValueRef shifted_fmask =
LLVMBuildLShr(gallivm->builder, fmask, sample_index4, "");
LLVMBuildICmp(gallivm->builder, LLVMIntNE,
fmask_word1, uint_bld->zero, "");
- sample_index_rewrite =
+ /* Replace the MSAA sample index. */
+ address[sample_chan] =
LLVMBuildSelect(gallivm->builder, word1_is_nonzero,
- final_sample, sample_index, "");
+ final_sample, address[sample_chan], "");
}
/* Resource */
assert(inst->Texture.NumOffsets == 1);
- address[0] =
- lp_build_add(uint_bld, address[0],
- bld->immediates[off->Index][off->SwizzleX]);
- if (num_coords > 1)
+ switch (target) {
+ case TGSI_TEXTURE_3D:
+ address[2] = lp_build_add(uint_bld, address[2],
+ bld->immediates[off->Index][off->SwizzleZ]);
+ /* fall through */
+ case TGSI_TEXTURE_2D:
+ case TGSI_TEXTURE_SHADOW2D:
+ case TGSI_TEXTURE_RECT:
+ case TGSI_TEXTURE_SHADOWRECT:
+ case TGSI_TEXTURE_2D_ARRAY:
+ case TGSI_TEXTURE_SHADOW2D_ARRAY:
address[1] =
lp_build_add(uint_bld, address[1],
- bld->immediates[off->Index][off->SwizzleY]);
- if (num_coords > 2)
- address[2] =
- lp_build_add(uint_bld, address[2],
- bld->immediates[off->Index][off->SwizzleZ]);
+ bld->immediates[off->Index][off->SwizzleY]);
+ /* fall through */
+ case TGSI_TEXTURE_1D:
+ case TGSI_TEXTURE_SHADOW1D:
+ case TGSI_TEXTURE_1D_ARRAY:
+ case TGSI_TEXTURE_SHADOW1D_ARRAY:
+ address[0] =
+ lp_build_add(uint_bld, address[0],
+ bld->immediates[off->Index][off->SwizzleX]);
+ break;
+ /* texture offsets do not apply to other texture targets */
+ }
}
emit_data->dst_type = LLVMVectorType(
address[count++] = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
emit_data->args[0] = lp_build_gather_values(gallivm, address, count);
-
- /* Replace the MSAA sample index if needed. */
- if (sample_index_rewrite) {
- emit_data->args[0] =
- LLVMBuildInsertElement(gallivm->builder, emit_data->args[0],
- sample_index_rewrite, sample_chan, "");
- }
}
static void build_tex_intrinsic(const struct lp_build_tgsi_action * action,
struct lp_build_emit_data * emit_data)
{
struct lp_build_context * base = &bld_base->base;
- char intr_name[23];
+ char intr_name[127];
+
+ if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
+ emit_data->output[emit_data->chan] = build_intrinsic(
+ base->gallivm->builder,
+ "llvm.SI.vs.load.input", emit_data->dst_type,
+ emit_data->args, emit_data->arg_count,
+ LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
+ return;
+ }
sprintf(intr_name, "%sv%ui32", action->intr_name,
LLVMGetVectorSize(LLVMTypeOf(emit_data->args[0])));
{
struct si_shader_context *si_shader_ctx = si_shader_context(bld_base);
const struct tgsi_full_instruction *inst = emit_data->inst;
+ struct gallivm_state *gallivm = bld_base->base.gallivm;
+
+ if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
+ LLVMTypeRef i32 = LLVMInt32TypeInContext(gallivm->context);
+ LLVMTypeRef v8i32 = LLVMVectorType(i32, 8);
+
+ /* Read the size from the buffer descriptor directly. */
+ LLVMValueRef size = si_shader_ctx->resources[inst->Src[1].Register.Index];
+ size = LLVMBuildBitCast(gallivm->builder, size, v8i32, "");
+ size = LLVMBuildExtractElement(gallivm->builder, size,
+ lp_build_const_int32(gallivm, 2), "");
+ emit_data->args[0] = size;
+ return;
+ }
/* Mip level */
emit_data->args[0] = lp_build_emit_fetch(bld_base, inst, 0, TGSI_CHAN_X);
4);
}
+static void build_txq_intrinsic(const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ if (emit_data->inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
+ /* Just return the buffer size. */
+ emit_data->output[emit_data->chan] = emit_data->args[0];
+ return;
+ }
+
+ build_tgsi_intrinsic_nomem(action, bld_base, emit_data);
+}
+
#if HAVE_LLVM >= 0x0304
static void si_llvm_emit_ddxy(
static const struct lp_build_tgsi_action txq_action = {
.fetch_args = txq_fetch_args,
- .emit = build_tgsi_intrinsic_nomem,
+ .emit = build_txq_intrinsic,
.intr_name = "llvm.SI.resinfo"
};
{
struct lp_build_tgsi_context *bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
struct gallivm_state *gallivm = bld_base->base.gallivm;
- LLVMTypeRef params[20], f32, i8, i32, v2i32, v3i32;
- unsigned i;
+ LLVMTypeRef params[21], f32, i8, i32, v2i32, v3i32;
+ unsigned i, last_sgpr, num_params;
i8 = LLVMInt8TypeInContext(gallivm->context);
i32 = LLVMInt32TypeInContext(gallivm->context);
v2i32 = LLVMVectorType(i32, 2);
v3i32 = LLVMVectorType(i32, 3);
- params[SI_PARAM_CONST] = LLVMPointerType(LLVMVectorType(i8, 16), CONST_ADDR_SPACE);
- params[SI_PARAM_SAMPLER] = params[SI_PARAM_CONST];
- params[SI_PARAM_RESOURCE] = LLVMPointerType(LLVMVectorType(i8, 32), CONST_ADDR_SPACE);
-
- if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
- params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_SAMPLER];
+ params[SI_PARAM_CONST] = LLVMPointerType(
+ LLVMArrayType(LLVMVectorType(i8, 16), NUM_CONST_BUFFERS), CONST_ADDR_SPACE);
+ /* We assume at most 16 textures per program at the moment.
+ * This need probably need to be changed to support bindless textures */
+ params[SI_PARAM_SAMPLER] = LLVMPointerType(
+ LLVMArrayType(LLVMVectorType(i8, 16), NUM_SAMPLER_VIEWS), CONST_ADDR_SPACE);
+ params[SI_PARAM_RESOURCE] = LLVMPointerType(
+ LLVMArrayType(LLVMVectorType(i8, 32), NUM_SAMPLER_STATES), CONST_ADDR_SPACE);
+
+ switch (si_shader_ctx->type) {
+ case TGSI_PROCESSOR_VERTEX:
+ params[SI_PARAM_VERTEX_BUFFER] = params[SI_PARAM_CONST];
+ params[SI_PARAM_SO_BUFFER] = params[SI_PARAM_CONST];
params[SI_PARAM_START_INSTANCE] = i32;
- params[SI_PARAM_VERTEX_ID] = i32;
- params[SI_PARAM_DUMMY_0] = i32;
- params[SI_PARAM_DUMMY_1] = i32;
- params[SI_PARAM_INSTANCE_ID] = i32;
- radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 9);
+ num_params = SI_PARAM_START_INSTANCE+1;
- } else {
+ /* The locations of the other parameters are assigned dynamically. */
+
+ /* Streamout SGPRs. */
+ if (si_shader_ctx->shader->selector->so.num_outputs) {
+ params[si_shader_ctx->param_streamout_config = num_params++] = i32;
+ params[si_shader_ctx->param_streamout_write_index = num_params++] = i32;
+ }
+ /* A streamout buffer offset is loaded if the stride is non-zero. */
+ for (i = 0; i < 4; i++) {
+ if (!si_shader_ctx->shader->selector->so.stride[i])
+ continue;
+
+ params[si_shader_ctx->param_streamout_offset[i] = num_params++] = i32;
+ }
+
+ last_sgpr = num_params-1;
+
+ /* VGPRs */
+ params[si_shader_ctx->param_vertex_id = num_params++] = i32;
+ params[num_params++] = i32; /* unused*/
+ params[num_params++] = i32; /* unused */
+ params[si_shader_ctx->param_instance_id = num_params++] = i32;
+ break;
+
+ case TGSI_PROCESSOR_FRAGMENT:
+ params[SI_PARAM_ALPHA_REF] = f32;
params[SI_PARAM_PRIM_MASK] = i32;
+ last_sgpr = SI_PARAM_PRIM_MASK;
params[SI_PARAM_PERSP_SAMPLE] = v2i32;
params[SI_PARAM_PERSP_CENTER] = v2i32;
params[SI_PARAM_PERSP_CENTROID] = v2i32;
params[SI_PARAM_ANCILLARY] = f32;
params[SI_PARAM_SAMPLE_COVERAGE] = f32;
params[SI_PARAM_POS_FIXED_PT] = f32;
- radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, 20);
+ num_params = SI_PARAM_POS_FIXED_PT+1;
+ break;
+
+ default:
+ assert(0 && "unimplemented shader");
+ return;
}
+ assert(num_params <= Elements(params));
+ radeon_llvm_create_func(&si_shader_ctx->radeon_bld, params, num_params);
radeon_llvm_shader_type(si_shader_ctx->radeon_bld.main_fn, si_shader_ctx->type);
- for (i = SI_PARAM_CONST; i <= SI_PARAM_VERTEX_BUFFER; ++i) {
- LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
- LLVMAddAttribute(P, LLVMInRegAttribute);
- }
- if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
- LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
- SI_PARAM_START_INSTANCE);
- LLVMAddAttribute(P, LLVMInRegAttribute);
+ for (i = 0; i <= last_sgpr; ++i) {
+ LLVMValueRef P = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, i);
+ switch (i) {
+ default:
+ LLVMAddAttribute(P, LLVMInRegAttribute);
+ break;
+#if HAVE_LLVM >= 0x0304
+ /* We tell llvm that array inputs are passed by value to allow Sinking pass
+ * to move load. Inputs are constant so this is fine. */
+ case SI_PARAM_CONST:
+ case SI_PARAM_SAMPLER:
+ case SI_PARAM_RESOURCE:
+ LLVMAddAttribute(P, LLVMByValAttribute);
+ break;
+#endif
+ }
}
#if HAVE_LLVM >= 0x0304
struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
struct gallivm_state * gallivm = bld_base->base.gallivm;
const struct tgsi_shader_info * info = bld_base->info;
+ unsigned buf;
+ LLVMValueRef ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
- unsigned i, num_const = info->file_max[TGSI_FILE_CONSTANT] + 1;
-
- LLVMValueRef ptr;
+ for (buf = 0; buf < NUM_CONST_BUFFERS; buf++) {
+ unsigned i, num_const = info->const_file_max[buf] + 1;
- if (num_const == 0)
- return;
+ if (num_const == 0)
+ continue;
- /* Allocate space for the constant values */
- si_shader_ctx->constants = CALLOC(num_const * 4, sizeof(LLVMValueRef));
-
- /* Load the resource descriptor */
- ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn, SI_PARAM_CONST);
- si_shader_ctx->const_resource = build_indexed_load(si_shader_ctx, ptr, bld_base->uint_bld.zero);
-
- /* Load the constants, we rely on the code sinking to do the rest */
- for (i = 0; i < num_const * 4; ++i) {
- LLVMValueRef args[2] = {
- si_shader_ctx->const_resource,
- lp_build_const_int32(gallivm, i * 4)
- };
- si_shader_ctx->constants[i] = build_intrinsic(gallivm->builder, "llvm.SI.load.const",
- bld_base->base.elem_type, args, 2, LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
+ /* Allocate space for the constant values */
+ si_shader_ctx->constants[buf] = CALLOC(num_const * 4, sizeof(LLVMValueRef));
+
+ /* Load the resource descriptor */
+ si_shader_ctx->const_resource[buf] =
+ build_indexed_load(si_shader_ctx, ptr, lp_build_const_int32(gallivm, buf));
+
+ /* Load the constants, we rely on the code sinking to do the rest */
+ for (i = 0; i < num_const * 4; ++i) {
+ LLVMValueRef args[2] = {
+ si_shader_ctx->const_resource[buf],
+ lp_build_const_int32(gallivm, i * 4)
+ };
+ si_shader_ctx->constants[buf][i] =
+ build_intrinsic(gallivm->builder, "llvm.SI.load.const",
+ bld_base->base.elem_type, args, 2,
+ LLVMReadNoneAttribute | LLVMNoUnwindAttribute);
+ }
}
}
}
}
+static void preload_streamout_buffers(struct si_shader_context *si_shader_ctx)
+{
+ struct lp_build_tgsi_context * bld_base = &si_shader_ctx->radeon_bld.soa.bld_base;
+ struct gallivm_state * gallivm = bld_base->base.gallivm;
+ unsigned i;
+
+ if (!si_shader_ctx->shader->selector->so.num_outputs)
+ return;
+
+ LLVMValueRef buf_ptr = LLVMGetParam(si_shader_ctx->radeon_bld.main_fn,
+ SI_PARAM_SO_BUFFER);
+
+ /* Load the resources, we rely on the code sinking to do the rest */
+ for (i = 0; i < 4; ++i) {
+ if (si_shader_ctx->shader->selector->so.stride[i]) {
+ LLVMValueRef offset = lp_build_const_int32(gallivm, i);
+
+ si_shader_ctx->so_buffers[i] = build_indexed_load(si_shader_ctx, buf_ptr, offset);
+ }
+ }
+}
+
int si_compile_llvm(struct r600_context *rctx, struct si_pipe_shader *shader,
LLVMModuleRef mod)
{
unsigned i;
uint32_t *ptr;
- bool dump;
struct radeon_llvm_binary binary;
-
- dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
-
+ bool dump = r600_can_dump_shader(&rctx->screen->b,
+ shader->selector ? shader->selector->tokens : NULL);
memset(&binary, 0, sizeof(binary));
radeon_llvm_compile(mod, &binary,
- r600_get_llvm_processor_name(rctx->screen->family), dump);
- if (dump) {
+ r600_get_llvm_processor_name(rctx->screen->b.family), dump);
+ if (dump && ! binary.disassembled) {
fprintf(stderr, "SI CODE:\n");
for (i = 0; i < binary.code_size; i+=4 ) {
fprintf(stderr, "%02x%02x%02x%02x\n", binary.code[i + 3],
}
/* copy new shader */
- si_resource_reference(&shader->bo, NULL);
- shader->bo = si_resource_create_custom(rctx->context.screen, PIPE_USAGE_IMMUTABLE,
+ r600_resource_reference(&shader->bo, NULL);
+ shader->bo = r600_resource_create_custom(rctx->b.b.screen, PIPE_USAGE_IMMUTABLE,
binary.code_size);
if (shader->bo == NULL) {
return -ENOMEM;
}
- ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ ptr = (uint32_t*)rctx->b.ws->buffer_map(shader->bo->cs_buf, rctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
if (0 /*R600_BIG_ENDIAN*/) {
for (i = 0; i < binary.code_size / 4; ++i) {
ptr[i] = util_bswap32(*(uint32_t*)(binary.code + i*4));
} else {
memcpy(ptr, binary.code, binary.code_size);
}
- rctx->ws->buffer_unmap(shader->bo->cs_buf);
+ rctx->b.ws->buffer_unmap(shader->bo->cs_buf);
free(binary.code);
free(binary.config);
struct tgsi_shader_info shader_info;
struct lp_build_tgsi_context * bld_base;
LLVMModuleRef mod;
- bool dump;
int r = 0;
-
- dump = debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE);
+ bool dump = r600_can_dump_shader(&rctx->screen->b, shader->selector->tokens);
assert(shader->shader.noutput == 0);
assert(shader->shader.ninterp == 0);
create_function(&si_shader_ctx);
preload_constants(&si_shader_ctx);
preload_samplers(&si_shader_ctx);
+ preload_streamout_buffers(&si_shader_ctx);
shader->shader.nr_cbufs = rctx->framebuffer.nr_cbufs;
* conversion fails. */
if (dump) {
tgsi_dump(sel->tokens, 0);
+ si_dump_streamout(&sel->so);
}
if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
- FREE(si_shader_ctx.constants);
+ for (int i = 0; i < NUM_CONST_BUFFERS; i++)
+ FREE(si_shader_ctx.constants[i]);
FREE(si_shader_ctx.resources);
FREE(si_shader_ctx.samplers);
return -EINVAL;
radeon_llvm_dispose(&si_shader_ctx.radeon_bld);
tgsi_parse_free(&si_shader_ctx.parse);
- FREE(si_shader_ctx.constants);
+ for (int i = 0; i < NUM_CONST_BUFFERS; i++)
+ FREE(si_shader_ctx.constants[i]);
FREE(si_shader_ctx.resources);
FREE(si_shader_ctx.samplers);
void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
{
- si_resource_reference(&shader->bo, NULL);
+ r600_resource_reference(&shader->bo, NULL);
}