#include "ac_binary.h"
#include "ac_llvm_util.h"
+#include "ac_exp_param.h"
#include "si_shader_internal.h"
#include "si_pipe.h"
#include "sid.h"
static void si_init_shader_ctx(struct si_shader_context *ctx,
struct si_screen *sscreen,
- struct si_shader *shader,
LLVMTargetMachineRef tm);
static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
struct lp_build_tgsi_context *bld_base,
struct lp_build_emit_data *emit_data);
-static void si_dump_shader_key(unsigned shader, struct si_shader_key *key,
+static void si_dump_shader_key(unsigned processor, struct si_shader *shader,
FILE *f);
+static unsigned llvm_get_type_size(LLVMTypeRef type);
+
static void si_build_vs_prolog_function(struct si_shader_context *ctx,
union si_shader_part_key *key);
static void si_build_vs_epilog_function(struct si_shader_context *ctx,
{
switch (ctx->type) {
case PIPE_SHADER_TESS_CTRL:
- return unpack_param(ctx, SI_PARAM_REL_IDS, 0, 8);
+ return unpack_param(ctx, ctx->param_tcs_rel_ids, 0, 8);
case PIPE_SHADER_TESS_EVAL:
return LLVMGetParam(ctx->main_fn,
static LLVMValueRef
get_tcs_in_patch_stride(struct si_shader_context *ctx)
{
- if (ctx->type == PIPE_SHADER_VERTEX)
- return unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 0, 13);
- else if (ctx->type == PIPE_SHADER_TESS_CTRL)
- return unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 0, 13);
- else {
- assert(0);
- return NULL;
- }
+ return unpack_param(ctx, ctx->param_vs_state_bits, 8, 13);
}
static LLVMValueRef
get_tcs_out_patch_stride(struct si_shader_context *ctx)
{
- return unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 0, 13);
+ return unpack_param(ctx, ctx->param_tcs_out_lds_layout, 0, 13);
}
static LLVMValueRef
{
return lp_build_mul_imm(&ctx->bld_base.uint_bld,
unpack_param(ctx,
- SI_PARAM_TCS_OUT_OFFSETS,
+ ctx->param_tcs_out_lds_offsets,
0, 16),
4);
}
{
return lp_build_mul_imm(&ctx->bld_base.uint_bld,
unpack_param(ctx,
- SI_PARAM_TCS_OUT_OFFSETS,
+ ctx->param_tcs_out_lds_offsets,
16, 16),
4);
}
LLVMValueRef input[3];
/* Load the T list */
- t_list_ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_VERTEX_BUFFERS);
+ t_list_ptr = LLVMGetParam(ctx->main_fn, ctx->param_vertex_buffers);
t_offset = LLVMConstInt(ctx->i32, input_index, 0);
ctx->param_vertex_index0 +
input_index);
- fix_fetch = ctx->shader->key.mono.vs.fix_fetch[input_index];
+ fix_fetch = ctx->shader->key.mono.vs_fix_fetch[input_index];
/* Do multiple loads for special formats. */
switch (fix_fetch) {
ctx->param_vs_prim_id);
case PIPE_SHADER_TESS_CTRL:
return LLVMGetParam(ctx->main_fn,
- SI_PARAM_PATCH_ID);
+ ctx->param_tcs_patch_id);
case PIPE_SHADER_TESS_EVAL:
return LLVMGetParam(ctx->main_fn,
ctx->param_tes_patch_id);
case PIPE_SHADER_GEOMETRY:
return LLVMGetParam(ctx->main_fn,
- SI_PARAM_PRIMITIVE_ID);
+ ctx->param_gs_prim_id);
default:
assert(0);
return ctx->i32_0;
LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
LLVMValueRef param_stride, constant16;
- vertices_per_patch = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 6);
- num_patches = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 0, 9);
+ vertices_per_patch = unpack_param(ctx, ctx->param_tcs_offchip_layout, 9, 6);
+ num_patches = unpack_param(ctx, ctx->param_tcs_offchip_layout, 0, 9);
total_vertices = LLVMBuildMul(gallivm->builder, vertices_per_patch,
num_patches, "");
if (!vertex_index) {
LLVMValueRef patch_data_offset =
- unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 16, 16);
+ unpack_param(ctx, ctx->param_tcs_offchip_layout, 16, 16);
base_addr = LLVMBuildAdd(gallivm->builder, base_addr,
patch_data_offset, "");
struct si_shader_context *ctx = si_shader_context(bld_base);
LLVMValueRef dw_addr, stride;
- stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
+ stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
dw_addr = get_tcs_in_current_patch_offset(ctx);
dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
LLVMValueRef dw_addr, stride;
if (reg->Register.Dimension) {
- stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
+ stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
dw_addr = get_tcs_out_current_patch_offset(ctx);
dw_addr = get_dw_address(ctx, NULL, reg, stride, dw_addr);
} else {
LLVMValueRef rw_buffers, buffer, base, addr;
rw_buffers = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ ctx->param_rw_buffers);
buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
LLVMConstInt(ctx->i32, SI_HS_RING_TESS_OFFCHIP, 0));
- base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
+ base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
addr = get_tcs_tes_buffer_address_from_reg(ctx, NULL, reg);
return buffer_load(bld_base, type, swizzle, buffer, base, addr, true);
}
if (reg->Register.Dimension) {
- stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
+ stride = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 13, 8);
dw_addr = get_tcs_out_current_patch_offset(ctx);
dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
skip_lds_store = !sh_info->reads_pervertex_outputs;
}
rw_buffers = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ ctx->param_rw_buffers);
buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
LLVMConstInt(ctx->i32, SI_HS_RING_TESS_OFFCHIP, 0));
- base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
+ base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
/* Get the vertex offset parameter */
vtx_offset_param = reg->Dimension.Index;
if (vtx_offset_param < 2) {
- vtx_offset_param += SI_PARAM_VTX0_OFFSET;
+ vtx_offset_param += ctx->param_gs_vtx0_offset;
} else {
assert(vtx_offset_param < 6);
- vtx_offset_param += SI_PARAM_VTX2_OFFSET - 2;
+ vtx_offset_param += ctx->param_gs_vtx2_offset - 2;
}
vtx_offset = lp_build_mul_imm(uint,
LLVMGetParam(ctx->main_fn,
struct lp_build_context *uint_bld = &ctx->bld_base.uint_bld;
struct gallivm_state *gallivm = &ctx->gallivm;
LLVMBuilderRef builder = gallivm->builder;
- LLVMValueRef desc = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
+ LLVMValueRef desc = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
LLVMValueRef resource = ac_build_indexed_load_const(&ctx->ac, desc, buf_index);
LLVMGetParam(ctx->main_fn,
ctx->param_vertex_id),
LLVMGetParam(ctx->main_fn,
- SI_PARAM_BASE_VERTEX), "");
+ ctx->param_base_vertex), "");
break;
case TGSI_SEMANTIC_VERTEXID_NOBASE:
- value = LLVMGetParam(ctx->main_fn,
- ctx->param_vertex_id);
+ /* Unused. Clarify the meaning in indexed vs. non-indexed
+ * draws if this is ever used again. */
+ assert(false);
break;
case TGSI_SEMANTIC_BASEVERTEX:
- value = LLVMGetParam(ctx->main_fn,
- SI_PARAM_BASE_VERTEX);
+ {
+ /* For non-indexed draws, the base vertex set by the driver
+ * (for direct draws) or the CP (for indirect draws) is the
+ * first vertex ID, but GLSL expects 0 to be returned.
+ */
+ LLVMValueRef vs_state = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
+ LLVMValueRef indexed;
+
+ indexed = LLVMBuildLShr(gallivm->builder, vs_state, ctx->i32_1, "");
+ indexed = LLVMBuildTrunc(gallivm->builder, indexed, ctx->i1, "");
+
+ value = LLVMBuildSelect(gallivm->builder, indexed,
+ LLVMGetParam(ctx->main_fn, ctx->param_base_vertex),
+ ctx->i32_0, "");
break;
+ }
case TGSI_SEMANTIC_BASEINSTANCE:
- value = LLVMGetParam(ctx->main_fn,
- SI_PARAM_START_INSTANCE);
+ value = LLVMGetParam(ctx->main_fn, ctx->param_start_instance);
break;
case TGSI_SEMANTIC_DRAWID:
- value = LLVMGetParam(ctx->main_fn,
- SI_PARAM_DRAWID);
+ value = LLVMGetParam(ctx->main_fn, ctx->param_draw_id);
break;
case TGSI_SEMANTIC_INVOCATIONID:
if (ctx->type == PIPE_SHADER_TESS_CTRL)
- value = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
+ value = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
else if (ctx->type == PIPE_SHADER_GEOMETRY)
value = LLVMGetParam(ctx->main_fn,
- SI_PARAM_GS_INSTANCE_ID);
+ ctx->param_gs_instance_id);
else
assert(!"INVOCATIONID not implemented");
break;
case TGSI_SEMANTIC_VERTICESIN:
if (ctx->type == PIPE_SHADER_TESS_CTRL)
- value = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 26, 6);
+ value = unpack_param(ctx, ctx->param_tcs_out_lds_layout, 26, 6);
else if (ctx->type == PIPE_SHADER_TESS_EVAL)
- value = unpack_param(ctx, SI_PARAM_TCS_OFFCHIP_LAYOUT, 9, 7);
+ value = unpack_param(ctx, ctx->param_tcs_offchip_layout, 9, 7);
else
assert(!"invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
break;
int param = si_shader_io_get_unique_index(decl->Semantic.Name, 0);
rw_buffers = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ ctx->param_rw_buffers);
buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
LLVMConstInt(ctx->i32, SI_HS_RING_TESS_OFFCHIP, 0));
- base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
+ base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
LLVMConstInt(ctx->i32, param, 0));
int i, offset;
slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
- buf = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
+ buf = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
buf = ac_build_indexed_load_const(&ctx->ac, buf, slot);
offset = decl->Semantic.Name == TGSI_SEMANTIC_DEFAULT_TESSINNER_SI ? 4 : 0;
}
break;
+ case TGSI_SEMANTIC_SUBGROUP_SIZE:
+ value = LLVMConstInt(ctx->i32, 64, 0);
+ break;
+
+ case TGSI_SEMANTIC_SUBGROUP_INVOCATION:
+ value = ac_get_thread_id(&ctx->ac);
+ break;
+
+ case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
+ {
+ LLVMValueRef id = ac_get_thread_id(&ctx->ac);
+ id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
+ value = LLVMBuildShl(gallivm->builder, LLVMConstInt(ctx->i64, 1, 0), id, "");
+ value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
+ break;
+ }
+
+ case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
+ case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
+ case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
+ case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
+ {
+ LLVMValueRef id = ac_get_thread_id(&ctx->ac);
+ if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_GT_MASK ||
+ decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK) {
+ /* All bits set except LSB */
+ value = LLVMConstInt(ctx->i64, -2, 0);
+ } else {
+ /* All bits set */
+ value = LLVMConstInt(ctx->i64, -1, 0);
+ }
+ id = LLVMBuildZExt(gallivm->builder, id, ctx->i64, "");
+ value = LLVMBuildShl(gallivm->builder, value, id, "");
+ if (decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LE_MASK ||
+ decl->Semantic.Name == TGSI_SEMANTIC_SUBGROUP_LT_MASK)
+ value = LLVMBuildNot(gallivm->builder, value, "");
+ value = LLVMBuildBitCast(gallivm->builder, value, ctx->v2i32, "");
+ break;
+ }
+
default:
assert(!"unknown system value");
return;
static LLVMValueRef load_const_buffer_desc(struct si_shader_context *ctx, int i)
{
LLVMValueRef list_ptr = LLVMGetParam(ctx->main_fn,
- SI_PARAM_CONST_BUFFERS);
+ ctx->param_const_buffers);
return ac_build_indexed_load_const(&ctx->ac, list_ptr,
LLVMConstInt(ctx->i32, i, 0));
idx = reg->Register.Index * 4 + swizzle;
if (reg->Register.Dimension && reg->Dimension.Indirect) {
- LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_CONST_BUFFERS);
+ LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_const_buffers);
LLVMValueRef index;
index = get_bounded_indirect_index(ctx, ®->DimIndirect,
reg->Dimension.Index,
unsigned chan;
unsigned const_chan;
LLVMValueRef base_elt;
- LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
+ LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
SI_VS_CONST_CLIP_PLANES, 0);
LLVMValueRef const_resource = ac_build_indexed_load_const(&ctx->ac, ptr, constbuf_index);
LLVMValueRef so_write_offset[4] = {};
LLVMValueRef so_buffers[4];
LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ ctx->param_rw_buffers);
for (i = 0; i < 4; i++) {
if (!so->stride[i])
LLVMValueRef lds_vertex_stride, lds_vertex_offset, lds_base;
uint64_t inputs;
- invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
+ invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
- rw_buffers = LLVMGetParam(ctx->main_fn, SI_PARAM_RW_BUFFERS);
+ rw_buffers = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
LLVMConstInt(ctx->i32, SI_HS_RING_TESS_OFFCHIP, 0));
- buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
+ buffer_offset = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
- lds_vertex_stride = unpack_param(ctx, SI_PARAM_TCS_IN_LAYOUT, 13, 8);
+ lds_vertex_stride = unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
lds_vertex_offset = LLVMBuildMul(gallivm->builder, invocation_id,
lds_vertex_stride, "");
lds_base = get_tcs_in_current_patch_offset(ctx);
lds_base = LLVMBuildAdd(gallivm->builder, lds_base, lds_vertex_offset, "");
- inputs = ctx->shader->key.mono.tcs.inputs_to_copy;
+ inputs = ctx->shader->key.mono.ff_tcs_inputs_to_copy;
while (inputs) {
unsigned i = u_bit_scan64(&inputs);
/* Get the buffer. */
rw_buffers = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ ctx->param_rw_buffers);
buffer = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
LLVMConstInt(ctx->i32, SI_HS_RING_TESS_FACTOR, 0));
/* Get the offset. */
tf_base = LLVMGetParam(ctx->main_fn,
- SI_PARAM_TESS_FACTOR_OFFSET);
+ ctx->param_tcs_factor_offset);
byteoffset = LLVMBuildMul(gallivm->builder, rel_patch_id,
LLVMConstInt(ctx->i32, 4 * stride, 0), "");
buf = ac_build_indexed_load_const(&ctx->ac, rw_buffers,
LLVMConstInt(ctx->i32, SI_HS_RING_TESS_OFFCHIP, 0));
- base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
+ base = LLVMGetParam(ctx->main_fn, ctx->param_tcs_offchip_offset);
param_outer = si_shader_io_get_unique_index(
TGSI_SEMANTIC_TESSOUTER, 0);
si_copy_tcs_inputs(bld_base);
rel_patch_id = get_rel_patch_id(ctx);
- invocation_id = unpack_param(ctx, SI_PARAM_REL_IDS, 8, 5);
+ invocation_id = unpack_param(ctx, ctx->param_tcs_rel_ids, 8, 5);
tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
/* Return epilog parameters from this function. */
/* RW_BUFFERS pointer */
rw_buffers = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ ctx->param_rw_buffers);
rw_buffers = LLVMBuildPtrToInt(builder, rw_buffers, ctx->i64, "");
rw_buffers = LLVMBuildBitCast(builder, rw_buffers, ctx->v2i32, "");
rw0 = LLVMBuildExtractElement(builder, rw_buffers,
/* Tess offchip and factor buffer soffset are after user SGPRs. */
offchip_layout = LLVMGetParam(ctx->main_fn,
- SI_PARAM_TCS_OFFCHIP_LAYOUT);
- offchip_soffset = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
+ ctx->param_tcs_offchip_layout);
+ offchip_soffset = LLVMGetParam(ctx->main_fn,
+ ctx->param_tcs_offchip_offset);
tf_soffset = LLVMGetParam(ctx->main_fn,
- SI_PARAM_TESS_FACTOR_OFFSET);
+ ctx->param_tcs_factor_offset);
ret = LLVMBuildInsertValue(builder, ret, offchip_layout,
- SI_SGPR_TCS_OFFCHIP_LAYOUT, "");
+ GFX6_SGPR_TCS_OFFCHIP_LAYOUT, "");
ret = LLVMBuildInsertValue(builder, ret, offchip_soffset,
- SI_TCS_NUM_USER_SGPR, "");
+ GFX6_TCS_NUM_USER_SGPR, "");
ret = LLVMBuildInsertValue(builder, ret, tf_soffset,
- SI_TCS_NUM_USER_SGPR + 1, "");
+ GFX6_TCS_NUM_USER_SGPR + 1, "");
/* VGPRs */
rel_patch_id = bitcast(bld_base, TGSI_TYPE_FLOAT, rel_patch_id);
invocation_id = bitcast(bld_base, TGSI_TYPE_FLOAT, invocation_id);
tf_lds_offset = bitcast(bld_base, TGSI_TYPE_FLOAT, tf_lds_offset);
- vgpr = SI_TCS_NUM_USER_SGPR + 2;
+ vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
LLVMValueRef vertex_id = LLVMGetParam(ctx->main_fn,
ctx->param_rel_auto_id);
LLVMValueRef vertex_dw_stride =
- unpack_param(ctx, SI_PARAM_LS_OUT_LAYOUT, 13, 8);
+ unpack_param(ctx, ctx->param_vs_state_bits, 24, 8);
LLVMValueRef base_dw_addr = LLVMBuildMul(gallivm->builder, vertex_id,
vertex_dw_stride, "");
LLVMValueRef *out_ptr = ctx->outputs[i];
unsigned name = info->output_semantic_name[i];
unsigned index = info->output_semantic_index[i];
+
+ /* The ARB_shader_viewport_layer_array spec contains the
+ * following issue:
+ *
+ * 2) What happens if gl_ViewportIndex or gl_Layer is
+ * written in the vertex shader and a geometry shader is
+ * present?
+ *
+ * RESOLVED: The value written by the last vertex processing
+ * stage is used. If the last vertex processing stage
+ * (vertex, tessellation evaluation or geometry) does not
+ * statically assign to gl_ViewportIndex or gl_Layer, index
+ * or layer zero is assumed.
+ *
+ * So writes to those outputs in VS-as-LS are simply ignored.
+ */
+ if (name == TGSI_SEMANTIC_LAYER ||
+ name == TGSI_SEMANTIC_VIEWPORT_INDEX)
+ continue;
+
int param = si_shader_io_get_unique_index(name, index);
LLVMValueRef dw_addr = LLVMBuildAdd(gallivm->builder, base_dw_addr,
LLVMConstInt(ctx->i32, param * 4, 0), "");
struct si_shader_context *ctx = si_shader_context(bld_base);
ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
- LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID));
+ LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id));
}
static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
if (!cond) {
/* The state is in the first bit of the user SGPR. */
cond = LLVMGetParam(ctx->main_fn,
- SI_PARAM_VS_STATE_BITS);
+ ctx->param_vs_state_bits);
cond = LLVMBuildTrunc(gallivm->builder, cond,
ctx->i1, "");
lp_build_if(&if_ctx, gallivm, cond);
/* Prevent optimizations (at least of memory accesses) across the current
* point in the program by emitting empty inline assembly that is marked as
* having side effects.
+ *
+ * Optionally, a value can be passed through the inline assembly to prevent
+ * LLVM from hoisting calls to ReadNone functions.
*/
-#if 0 /* unused currently */
-static void emit_optimization_barrier(struct si_shader_context *ctx)
+static void emit_optimization_barrier(struct si_shader_context *ctx,
+ LLVMValueRef *pvgpr)
{
+ static int counter = 0;
+
LLVMBuilderRef builder = ctx->gallivm.builder;
- LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
- LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, "", "", true, false);
- LLVMBuildCall(builder, inlineasm, NULL, 0, "");
+ char code[16];
+
+ snprintf(code, sizeof(code), "; %d", p_atomic_inc_return(&counter));
+
+ if (!pvgpr) {
+ LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
+ LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
+ LLVMBuildCall(builder, inlineasm, NULL, 0, "");
+ } else {
+ LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
+ LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "=v,0", true, false);
+ LLVMValueRef vgpr = *pvgpr;
+ LLVMTypeRef vgpr_type = LLVMTypeOf(vgpr);
+ unsigned vgpr_size = llvm_get_type_size(vgpr_type);
+ LLVMValueRef vgpr0;
+
+ assert(vgpr_size % 4 == 0);
+
+ vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
+ vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
+ vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
+ vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
+ vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
+
+ *pvgpr = vgpr;
+ }
}
-#endif
/* Combine these with & instead of |. */
#define NOOP_WAITCNT 0xf7f
{
LLVMValueRef index;
LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
- SI_PARAM_SHADER_BUFFERS);
+ ctx->param_shader_buffers);
if (!reg->Register.Indirect)
index = LLVMConstInt(ctx->i32, reg->Register.Index, 0);
{
struct si_shader_context *ctx = si_shader_context(bld_base);
LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
- SI_PARAM_IMAGES);
+ ctx->param_images);
LLVMValueRef index;
bool dcc_off = is_store;
static LLVMValueRef image_fetch_coords(
struct lp_build_tgsi_context *bld_base,
const struct tgsi_full_instruction *inst,
- unsigned src)
+ unsigned src, LLVMValueRef desc)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
struct gallivm_state *gallivm = &ctx->gallivm;
coords[chan] = tmp;
}
- /* 1D textures are allocated and used as 2D on GFX9. */
if (ctx->screen->b.chip_class >= GFX9) {
+ /* 1D textures are allocated and used as 2D on GFX9. */
if (target == TGSI_TEXTURE_1D) {
coords[1] = ctx->i32_0;
num_coords++;
} else if (target == TGSI_TEXTURE_1D_ARRAY) {
coords[2] = coords[1];
coords[1] = ctx->i32_0;
+ num_coords++;
+ } else if (target == TGSI_TEXTURE_2D) {
+ /* The hw can't bind a slice of a 3D image as a 2D
+ * image, because it ignores BASE_ARRAY if the target
+ * is 3D. The workaround is to read BASE_ARRAY and set
+ * it as the 3rd address operand for all 2D images.
+ */
+ LLVMValueRef first_layer, const5, mask;
+
+ const5 = LLVMConstInt(ctx->i32, 5, 0);
+ mask = LLVMConstInt(ctx->i32, S_008F24_BASE_ARRAY(~0), 0);
+ first_layer = LLVMBuildExtractElement(builder, desc, const5, "");
+ first_layer = LLVMBuildAnd(builder, first_layer, mask, "");
+
+ coords[2] = first_layer;
+ num_coords++;
}
}
LLVMValueRef coords;
image_fetch_rsrc(bld_base, &inst->Src[0], false, target, &rsrc);
- coords = image_fetch_coords(bld_base, inst, 1);
+ coords = image_fetch_coords(bld_base, inst, 1, rsrc);
if (target == TGSI_TEXTURE_BUFFER) {
buffer_append_args(ctx, emit_data, rsrc, coords,
*/
bool force_glc = ctx->screen->b.chip_class == SI;
- coords = image_fetch_coords(bld_base, inst, 0);
+ image_fetch_rsrc(bld_base, &memory, true, target, &rsrc);
+ coords = image_fetch_coords(bld_base, inst, 0, rsrc);
if (target == TGSI_TEXTURE_BUFFER) {
- image_fetch_rsrc(bld_base, &memory, true, target, &rsrc);
buffer_append_args(ctx, emit_data, rsrc, coords,
ctx->i32_0, false, force_glc);
} else {
emit_data->args[1] = coords;
- image_fetch_rsrc(bld_base, &memory, true, target,
- &emit_data->args[2]);
+ emit_data->args[2] = rsrc;
emit_data->args[3] = LLVMConstInt(ctx->i32, 15, 0); /* dmask */
emit_data->arg_count = 4;
LLVMValueRef coords;
image_fetch_rsrc(bld_base, &inst->Src[0], true, target, &rsrc);
- coords = image_fetch_coords(bld_base, inst, 1);
+ coords = image_fetch_coords(bld_base, inst, 1, rsrc);
if (target == TGSI_TEXTURE_BUFFER) {
buffer_append_args(ctx, emit_data, rsrc, coords,
LLVMValueRef *res_ptr, LLVMValueRef *samp_ptr, LLVMValueRef *fmask_ptr)
{
struct si_shader_context *ctx = si_shader_context(bld_base);
- LLVMValueRef list = LLVMGetParam(ctx->main_fn, SI_PARAM_SAMPLERS);
+ LLVMValueRef list = LLVMGetParam(ctx->main_fn, ctx->param_samplers);
const struct tgsi_full_instruction *inst = emit_data->inst;
const struct tgsi_full_src_register *reg;
unsigned target = inst->Texture.Texture;
LLVMConstInt(ctx->i32, LLVMIntNE, 0)
};
- if (LLVMTypeOf(value) != ctx->i32)
- args[0] = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
+ /* We currently have no other way to prevent LLVM from lifting the icmp
+ * calls to a dominating basic block.
+ */
+ emit_optimization_barrier(ctx, &args[0]);
+
+ if (LLVMTypeOf(args[0]) != ctx->i32)
+ args[0] = LLVMBuildBitCast(gallivm->builder, args[0], ctx->i32, "");
return lp_build_intrinsic(gallivm->builder,
"llvm.amdgcn.icmp.i32",
LLVMBuildSExt(gallivm->builder, tmp, ctx->i32, "");
}
+static void ballot_emit(
+ const struct lp_build_tgsi_action *action,
+ struct lp_build_tgsi_context *bld_base,
+ struct lp_build_emit_data *emit_data)
+{
+ struct si_shader_context *ctx = si_shader_context(bld_base);
+ LLVMBuilderRef builder = ctx->gallivm.builder;
+ LLVMValueRef tmp;
+
+ tmp = lp_build_emit_fetch(bld_base, emit_data->inst, 0, TGSI_CHAN_X);
+ tmp = si_emit_ballot(ctx, tmp);
+ tmp = LLVMBuildBitCast(builder, tmp, ctx->v2i32, "");
+
+ emit_data->output[0] = LLVMBuildExtractElement(builder, tmp, ctx->i32_0, "");
+ emit_data->output[1] = LLVMBuildExtractElement(builder, tmp, ctx->i32_1, "");
+}
+
+static void read_invoc_fetch_args(
+ struct lp_build_tgsi_context *bld_base,
+ struct lp_build_emit_data *emit_data)
+{
+ emit_data->args[0] = lp_build_emit_fetch(bld_base, emit_data->inst,
+ 0, emit_data->src_chan);
+
+ /* Always read the source invocation (= lane) from the X channel. */
+ emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
+ 1, TGSI_CHAN_X);
+ emit_data->arg_count = 2;
+}
+
+static void read_lane_emit(
+ const struct lp_build_tgsi_action *action,
+ struct lp_build_tgsi_context *bld_base,
+ struct lp_build_emit_data *emit_data)
+{
+ struct si_shader_context *ctx = si_shader_context(bld_base);
+ LLVMBuilderRef builder = ctx->gallivm.builder;
+
+ /* We currently have no other way to prevent LLVM from lifting the icmp
+ * calls to a dominating basic block.
+ */
+ emit_optimization_barrier(ctx, &emit_data->args[0]);
+
+ for (unsigned i = 0; i < emit_data->arg_count; ++i) {
+ emit_data->args[i] = LLVMBuildBitCast(builder, emit_data->args[i],
+ ctx->i32, "");
+ }
+
+ emit_data->output[emit_data->chan] =
+ ac_build_intrinsic(&ctx->ac, action->intr_name,
+ ctx->i32, emit_data->args, emit_data->arg_count,
+ AC_FUNC_ATTR_READNONE |
+ AC_FUNC_ATTR_CONVERGENT);
+}
+
static unsigned si_llvm_get_stream(struct lp_build_tgsi_context *bld_base,
struct lp_build_emit_data *emit_data)
{
struct gallivm_state *gallivm = &ctx->gallivm;
struct lp_build_if_state if_state;
LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
- SI_PARAM_GS2VS_OFFSET);
+ ctx->param_gs2vs_offset);
LLVMValueRef gs_next_vertex;
LLVMValueRef can_emit, kill;
unsigned chan, offset;
/* Signal vertex emission */
ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
- LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID));
+ LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id));
if (!use_kill)
lp_build_endif(&if_state);
}
/* Signal primitive cut */
stream = si_llvm_get_stream(bld_base, emit_data);
ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
- LLVMGetParam(ctx->main_fn, SI_PARAM_GS_WAVE_ID));
+ LLVMGetParam(ctx->main_fn, ctx->param_gs_wave_id));
}
static void si_llvm_emit_barrier(const struct lp_build_tgsi_action *action,
struct si_shader *shader = ctx->shader;
LLVMTypeRef params[SI_NUM_PARAMS + SI_MAX_ATTRIBS], v3i32;
LLVMTypeRef returns[16+32*4];
- unsigned i, last_sgpr, num_params, num_return_sgprs;
+ unsigned i, last_sgpr, num_params = 0, num_return_sgprs;
unsigned num_returns = 0;
unsigned num_prolog_vgprs = 0;
v3i32 = LLVMVectorType(ctx->i32, 3);
- params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
- params[SI_PARAM_CONST_BUFFERS] = const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
- params[SI_PARAM_SAMPLERS] = const_array(ctx->v8i32, SI_NUM_SAMPLERS);
- params[SI_PARAM_IMAGES] = const_array(ctx->v8i32, SI_NUM_IMAGES);
- params[SI_PARAM_SHADER_BUFFERS] = const_array(ctx->v4i32, SI_NUM_SHADER_BUFFERS);
+ params[ctx->param_rw_buffers = num_params++] =
+ const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
+ params[ctx->param_const_buffers = num_params++] =
+ const_array(ctx->v16i8, SI_NUM_CONST_BUFFERS);
+ params[ctx->param_samplers = num_params++] =
+ const_array(ctx->v8i32, SI_NUM_SAMPLERS);
+ params[ctx->param_images = num_params++] =
+ const_array(ctx->v8i32, SI_NUM_IMAGES);
+ params[ctx->param_shader_buffers = num_params++] =
+ const_array(ctx->v4i32, SI_NUM_SHADER_BUFFERS);
switch (ctx->type) {
case PIPE_SHADER_VERTEX:
- params[SI_PARAM_VERTEX_BUFFERS] = const_array(ctx->v16i8, SI_MAX_ATTRIBS);
- params[SI_PARAM_BASE_VERTEX] = ctx->i32;
- params[SI_PARAM_START_INSTANCE] = ctx->i32;
- params[SI_PARAM_DRAWID] = ctx->i32;
- num_params = SI_PARAM_DRAWID+1;
+ params[ctx->param_vertex_buffers = num_params++] =
+ const_array(ctx->v16i8, SI_NUM_VERTEX_BUFFERS);
+ params[ctx->param_base_vertex = num_params++] = ctx->i32;
+ params[ctx->param_start_instance = num_params++] = ctx->i32;
+ params[ctx->param_draw_id = num_params++] = ctx->i32;
+ params[ctx->param_vs_state_bits = num_params++] = ctx->i32;
if (shader->key.as_es) {
params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
} else if (shader->key.as_ls) {
- params[SI_PARAM_LS_OUT_LAYOUT] = ctx->i32;
- num_params = SI_PARAM_LS_OUT_LAYOUT+1;
+ /* no extra parameters */
} else {
- if (shader->is_gs_copy_shader) {
- num_params = SI_PARAM_RW_BUFFERS+1;
- } else {
- params[SI_PARAM_VS_STATE_BITS] = ctx->i32;
- num_params = SI_PARAM_VS_STATE_BITS+1;
- }
+ if (shader->is_gs_copy_shader)
+ num_params = ctx->param_rw_buffers + 1;
/* The locations of the other parameters are assigned dynamically. */
declare_streamout_params(ctx, &shader->selector->so,
break;
case PIPE_SHADER_TESS_CTRL:
- params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
- params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
- params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
- params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
- params[ctx->param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx->i32;
- params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
- last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
+ params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
+ params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
+ params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
+ params[ctx->param_vs_state_bits = num_params++] = ctx->i32;
+ params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
+ params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
+ last_sgpr = num_params - 1;
/* VGPRs */
- params[SI_PARAM_PATCH_ID] = ctx->i32;
- params[SI_PARAM_REL_IDS] = ctx->i32;
- num_params = SI_PARAM_REL_IDS+1;
+ params[ctx->param_tcs_patch_id = num_params++] = ctx->i32;
+ params[ctx->param_tcs_rel_ids = num_params++] = ctx->i32;
- /* SI_PARAM_TCS_OC_LDS and PARAM_TESS_FACTOR_OFFSET are
+ /* param_tcs_offchip_offset and param_tcs_factor_offset are
* placed after the user SGPRs.
*/
- for (i = 0; i < SI_TCS_NUM_USER_SGPR + 2; i++)
+ for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
returns[num_returns++] = ctx->i32; /* SGPRs */
for (i = 0; i < 3; i++)
break;
case PIPE_SHADER_TESS_EVAL:
- params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
- num_params = SI_PARAM_TCS_OFFCHIP_LAYOUT+1;
+ params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
if (shader->key.as_es) {
- params[ctx->param_oc_lds = num_params++] = ctx->i32;
+ params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
params[num_params++] = ctx->i32;
params[ctx->param_es2gs_offset = num_params++] = ctx->i32;
} else {
params[num_params++] = ctx->i32;
declare_streamout_params(ctx, &shader->selector->so,
params, ctx->i32, &num_params);
- params[ctx->param_oc_lds = num_params++] = ctx->i32;
+ params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
}
last_sgpr = num_params - 1;
break;
case PIPE_SHADER_GEOMETRY:
- params[SI_PARAM_GS2VS_OFFSET] = ctx->i32;
- params[SI_PARAM_GS_WAVE_ID] = ctx->i32;
- last_sgpr = SI_PARAM_GS_WAVE_ID;
+ params[ctx->param_gs2vs_offset = num_params++] = ctx->i32;
+ params[ctx->param_gs_wave_id = num_params++] = ctx->i32;
+ last_sgpr = num_params - 1;
/* VGPRs */
- params[SI_PARAM_VTX0_OFFSET] = ctx->i32;
- params[SI_PARAM_VTX1_OFFSET] = ctx->i32;
- params[SI_PARAM_PRIMITIVE_ID] = ctx->i32;
- params[SI_PARAM_VTX2_OFFSET] = ctx->i32;
- params[SI_PARAM_VTX3_OFFSET] = ctx->i32;
- params[SI_PARAM_VTX4_OFFSET] = ctx->i32;
- params[SI_PARAM_VTX5_OFFSET] = ctx->i32;
- params[SI_PARAM_GS_INSTANCE_ID] = ctx->i32;
- num_params = SI_PARAM_GS_INSTANCE_ID+1;
+ params[ctx->param_gs_vtx0_offset = num_params++] = ctx->i32;
+ params[ctx->param_gs_vtx1_offset = num_params++] = ctx->i32;
+ params[ctx->param_gs_prim_id = num_params++] = ctx->i32;
+ params[ctx->param_gs_vtx2_offset = num_params++] = ctx->i32;
+ params[ctx->param_gs_vtx3_offset = num_params++] = ctx->i32;
+ params[ctx->param_gs_vtx4_offset = num_params++] = ctx->i32;
+ params[ctx->param_gs_vtx5_offset = num_params++] = ctx->i32;
+ params[ctx->param_gs_instance_id = num_params++] = ctx->i32;
break;
case PIPE_SHADER_FRAGMENT:
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
- SI_PARAM_RW_BUFFERS);
+ ctx->param_rw_buffers);
if ((ctx->type == PIPE_SHADER_VERTEX &&
ctx->shader->key.as_es) ||
if (shader->prolog)
size += shader->prolog->binary.code_size;
+ if (shader->previous_stage)
+ size += shader->previous_stage->binary.code_size;
if (shader->epilog)
size += shader->epilog->binary.code_size;
return size;
{
const struct ac_shader_binary *prolog =
shader->prolog ? &shader->prolog->binary : NULL;
+ const struct ac_shader_binary *previous_stage =
+ shader->previous_stage ? &shader->previous_stage->binary : NULL;
const struct ac_shader_binary *epilog =
shader->epilog ? &shader->epilog->binary : NULL;
const struct ac_shader_binary *mainb = &shader->binary;
unsigned char *ptr;
assert(!prolog || !prolog->rodata_size);
- assert((!prolog && !epilog) || !mainb->rodata_size);
+ assert(!previous_stage || !previous_stage->rodata_size);
+ assert((!prolog && !previous_stage && !epilog) || !mainb->rodata_size);
assert(!epilog || !epilog->rodata_size);
/* GFX9 can fetch at most 128 bytes past the end of the shader.
/* Upload. */
ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
- PIPE_TRANSFER_READ_WRITE);
+ PIPE_TRANSFER_READ_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
if (prolog) {
util_memcpy_cpu_to_le32(ptr, prolog->code, prolog->code_size);
ptr += prolog->code_size;
}
+ if (previous_stage) {
+ util_memcpy_cpu_to_le32(ptr, previous_stage->code,
+ previous_stage->code_size);
+ ptr += previous_stage->code_size;
+ }
util_memcpy_cpu_to_le32(ptr, mainb->code, mainb->code_size);
ptr += mainb->code_size;
{
if (!check_debug_option ||
r600_can_dump_shader(&sscreen->b, processor))
- si_dump_shader_key(processor, &shader->key, file);
+ si_dump_shader_key(processor, shader, file);
if (!check_debug_option && shader->binary.llvm_ir_string) {
fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
if (shader->prolog)
si_shader_dump_disassembly(&shader->prolog->binary,
debug, "prolog", file);
+ if (shader->previous_stage)
+ si_shader_dump_disassembly(&shader->previous_stage->binary,
+ debug, "previous stage", file);
si_shader_dump_disassembly(&shader->binary, debug, "main", file);
shader->selector = gs_selector;
shader->is_gs_copy_shader = true;
- si_init_shader_ctx(&ctx, sscreen, shader, tm);
+ si_init_shader_ctx(&ctx, sscreen, tm);
+ ctx.shader = shader;
ctx.type = PIPE_SHADER_VERTEX;
builder = gallivm->builder;
return shader;
}
-static void si_dump_shader_key(unsigned shader, struct si_shader_key *key,
+static void si_dump_shader_key_vs(struct si_shader_key *key,
+ struct si_vs_prolog_bits *prolog,
+ const char *prefix, FILE *f)
+{
+ fprintf(f, " %s.instance_divisors = {", prefix);
+ for (int i = 0; i < ARRAY_SIZE(prolog->instance_divisors); i++) {
+ fprintf(f, !i ? "%u" : ", %u",
+ prolog->instance_divisors[i]);
+ }
+ fprintf(f, "}\n");
+
+ fprintf(f, " mono.vs.fix_fetch = {");
+ for (int i = 0; i < SI_MAX_ATTRIBS; i++)
+ fprintf(f, !i ? "%u" : ", %u", key->mono.vs_fix_fetch[i]);
+ fprintf(f, "}\n");
+}
+
+static void si_dump_shader_key(unsigned processor, struct si_shader *shader,
FILE *f)
{
- int i;
+ struct si_shader_key *key = &shader->key;
fprintf(f, "SHADER KEY\n");
- switch (shader) {
+ switch (processor) {
case PIPE_SHADER_VERTEX:
- fprintf(f, " part.vs.prolog.instance_divisors = {");
- for (i = 0; i < ARRAY_SIZE(key->part.vs.prolog.instance_divisors); i++)
- fprintf(f, !i ? "%u" : ", %u",
- key->part.vs.prolog.instance_divisors[i]);
- fprintf(f, "}\n");
- fprintf(f, " part.vs.epilog.export_prim_id = %u\n", key->part.vs.epilog.export_prim_id);
+ si_dump_shader_key_vs(key, &key->part.vs.prolog,
+ "part.vs.prolog", f);
fprintf(f, " as_es = %u\n", key->as_es);
fprintf(f, " as_ls = %u\n", key->as_ls);
-
- fprintf(f, " mono.vs.fix_fetch = {");
- for (i = 0; i < SI_MAX_ATTRIBS; i++)
- fprintf(f, !i ? "%u" : ", %u", key->mono.vs.fix_fetch[i]);
- fprintf(f, "}\n");
+ fprintf(f, " part.vs.epilog.export_prim_id = %u\n",
+ key->part.vs.epilog.export_prim_id);
break;
case PIPE_SHADER_TESS_CTRL:
+ if (shader->selector->screen->b.chip_class >= GFX9) {
+ si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
+ "part.tcs.ls_prolog", f);
+ }
fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
- fprintf(f, " mono.tcs.inputs_to_copy = 0x%"PRIx64"\n", key->mono.tcs.inputs_to_copy);
+ fprintf(f, " mono.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.ff_tcs_inputs_to_copy);
break;
case PIPE_SHADER_TESS_EVAL:
assert(0);
}
- if ((shader == PIPE_SHADER_GEOMETRY ||
- shader == PIPE_SHADER_TESS_EVAL ||
- shader == PIPE_SHADER_VERTEX) &&
+ if ((processor == PIPE_SHADER_GEOMETRY ||
+ processor == PIPE_SHADER_TESS_EVAL ||
+ processor == PIPE_SHADER_VERTEX) &&
!key->as_es && !key->as_ls) {
fprintf(f, " opt.hw_vs.kill_outputs = 0x%"PRIx64"\n", key->opt.hw_vs.kill_outputs);
fprintf(f, " opt.hw_vs.kill_outputs2 = 0x%x\n", key->opt.hw_vs.kill_outputs2);
static void si_init_shader_ctx(struct si_shader_context *ctx,
struct si_screen *sscreen,
- struct si_shader *shader,
LLVMTargetMachineRef tm)
{
struct lp_build_tgsi_context *bld_base;
struct lp_build_tgsi_action tmpl = {};
- si_llvm_context_init(ctx, sscreen, shader, tm,
- (shader && shader->selector) ? &shader->selector->info : NULL,
- (shader && shader->selector) ? shader->selector->tokens : NULL);
+ si_llvm_context_init(ctx, sscreen, tm);
bld_base = &ctx->bld_base;
bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
bld_base->op_actions[TGSI_OPCODE_VOTE_ALL].emit = vote_all_emit;
bld_base->op_actions[TGSI_OPCODE_VOTE_ANY].emit = vote_any_emit;
bld_base->op_actions[TGSI_OPCODE_VOTE_EQ].emit = vote_eq_emit;
+ bld_base->op_actions[TGSI_OPCODE_BALLOT].emit = ballot_emit;
+ bld_base->op_actions[TGSI_OPCODE_READ_FIRST].intr_name = "llvm.amdgcn.readfirstlane";
+ bld_base->op_actions[TGSI_OPCODE_READ_FIRST].emit = read_lane_emit;
+ bld_base->op_actions[TGSI_OPCODE_READ_INVOC].intr_name = "llvm.amdgcn.readlane";
+ bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
+ bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
}
-#define EXP_TARGET (HAVE_LLVM >= 0x0500 ? 0 : 3)
-#define EXP_OUT0 (HAVE_LLVM >= 0x0500 ? 2 : 5)
-
-/* Return true if the PARAM export has been eliminated. */
-static bool si_eliminate_const_output(struct si_shader_context *ctx,
- LLVMValueRef inst, unsigned offset)
-{
- struct si_shader *shader = ctx->shader;
- unsigned num_outputs = shader->selector->info.num_outputs;
- unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
- bool is_zero[4] = {}, is_one[4] = {};
-
- for (i = 0; i < 4; i++) {
- LLVMBool loses_info;
- LLVMValueRef p = LLVMGetOperand(inst, EXP_OUT0 + i);
-
- /* It's a constant expression. Undef outputs are eliminated too. */
- if (LLVMIsUndef(p)) {
- is_zero[i] = true;
- is_one[i] = true;
- } else if (LLVMIsAConstantFP(p)) {
- double a = LLVMConstRealGetDouble(p, &loses_info);
-
- if (a == 0)
- is_zero[i] = true;
- else if (a == 1)
- is_one[i] = true;
- else
- return false; /* other constant */
- } else
- return false;
- }
-
- /* Only certain combinations of 0 and 1 can be eliminated. */
- if (is_zero[0] && is_zero[1] && is_zero[2])
- default_val = is_zero[3] ? 0 : 1;
- else if (is_one[0] && is_one[1] && is_one[2])
- default_val = is_zero[3] ? 2 : 3;
- else
- return false;
-
- /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
- LLVMInstructionEraseFromParent(inst);
-
- /* Change OFFSET to DEFAULT_VAL. */
- for (i = 0; i < num_outputs; i++) {
- if (shader->info.vs_output_param_offset[i] == offset) {
- shader->info.vs_output_param_offset[i] =
- EXP_PARAM_DEFAULT_VAL_0000 + default_val;
- break;
- }
- }
- return true;
-}
-
-struct si_vs_exports {
- unsigned num;
- unsigned offset[SI_MAX_VS_OUTPUTS];
- LLVMValueRef inst[SI_MAX_VS_OUTPUTS];
-};
-
static void si_eliminate_const_vs_outputs(struct si_shader_context *ctx)
{
struct si_shader *shader = ctx->shader;
struct tgsi_shader_info *info = &shader->selector->info;
- LLVMBasicBlockRef bb;
- struct si_vs_exports exports;
- bool removed_any = false;
-
- exports.num = 0;
if (ctx->type == PIPE_SHADER_FRAGMENT ||
ctx->type == PIPE_SHADER_COMPUTE ||
shader->key.as_ls)
return;
- /* Process all LLVM instructions. */
- bb = LLVMGetFirstBasicBlock(ctx->main_fn);
- while (bb) {
- LLVMValueRef inst = LLVMGetFirstInstruction(bb);
-
- while (inst) {
- LLVMValueRef cur = inst;
- inst = LLVMGetNextInstruction(inst);
-
- if (LLVMGetInstructionOpcode(cur) != LLVMCall)
- continue;
-
- LLVMValueRef callee = lp_get_called_value(cur);
-
- if (!lp_is_function(callee))
- continue;
-
- const char *name = LLVMGetValueName(callee);
- unsigned num_args = LLVMCountParams(callee);
-
- /* Check if this is an export instruction. */
- if ((num_args != 9 && num_args != 8) ||
- (strcmp(name, "llvm.SI.export") &&
- strcmp(name, "llvm.amdgcn.exp.f32")))
- continue;
-
- LLVMValueRef arg = LLVMGetOperand(cur, EXP_TARGET);
- unsigned target = LLVMConstIntGetZExtValue(arg);
-
- if (target < V_008DFC_SQ_EXP_PARAM)
- continue;
-
- target -= V_008DFC_SQ_EXP_PARAM;
-
- /* Eliminate constant value PARAM exports. */
- if (si_eliminate_const_output(ctx, cur, target)) {
- removed_any = true;
- } else {
- exports.offset[exports.num] = target;
- exports.inst[exports.num] = cur;
- exports.num++;
- }
- }
- bb = LLVMGetNextBasicBlock(bb);
- }
-
- /* Remove holes in export memory due to removed PARAM exports.
- * This is done by renumbering all PARAM exports.
- */
- if (removed_any) {
- ubyte current_offset[SI_MAX_VS_OUTPUTS];
- unsigned new_count = 0;
- unsigned out, i;
-
- /* Make a copy of the offsets. We need the old version while
- * we are modifying some of them. */
- assert(sizeof(current_offset) ==
- sizeof(shader->info.vs_output_param_offset));
- memcpy(current_offset, shader->info.vs_output_param_offset,
- sizeof(current_offset));
-
- for (i = 0; i < exports.num; i++) {
- unsigned offset = exports.offset[i];
-
- for (out = 0; out < info->num_outputs; out++) {
- if (current_offset[out] != offset)
- continue;
-
- LLVMSetOperand(exports.inst[i], EXP_TARGET,
- LLVMConstInt(ctx->i32,
- V_008DFC_SQ_EXP_PARAM + new_count, 0));
- shader->info.vs_output_param_offset[out] = new_count;
- new_count++;
- break;
- }
- }
- shader->info.nr_param_exports = new_count;
- }
+ ac_eliminate_const_vs_outputs(&ctx->ac,
+ ctx->main_fn,
+ shader->info.vs_output_param_offset,
+ info->num_outputs,
+ &shader->info.nr_param_exports);
}
static void si_count_scratch_private_memory(struct si_shader_context *ctx)
/**
* Compute the VS prolog key, which contains all the information needed to
* build the VS prolog function, and set shader->info bits where needed.
+ *
+ * \param info Shader info of the vertex shader.
+ * \param num_input_sgprs Number of input SGPRs for the vertex shader.
+ * \param prolog_key Key of the VS prolog
+ * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
+ * \param key Output shader part key.
*/
-static void si_get_vs_prolog_key(struct si_shader *shader,
+static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
+ unsigned num_input_sgprs,
+ const struct si_vs_prolog_bits *prolog_key,
+ struct si_shader *shader_out,
union si_shader_part_key *key)
{
- struct tgsi_shader_info *info = &shader->selector->info;
-
memset(key, 0, sizeof(*key));
- key->vs_prolog.states = shader->key.part.vs.prolog;
- key->vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
+ key->vs_prolog.states = *prolog_key;
+ key->vs_prolog.num_input_sgprs = num_input_sgprs;
key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
/* Set the instanceID flag. */
for (unsigned i = 0; i < info->num_inputs; i++)
if (key->vs_prolog.states.instance_divisors[i])
- shader->info.uses_instanceid = true;
+ shader_out->info.uses_instanceid = true;
}
/**
{
struct si_shader_selector *sel = shader->selector;
struct si_shader_context ctx;
- LLVMModuleRef mod;
int r = -1;
/* Dump TGSI code before doing TGSI->LLVM conversion in case the
si_dump_streamout(&sel->so);
}
- si_init_shader_ctx(&ctx, sscreen, shader, tm);
+ si_init_shader_ctx(&ctx, sscreen, tm);
+ si_llvm_context_set_tgsi(&ctx, shader);
ctx.separate_prolog = !is_monolithic;
- memset(shader->info.vs_output_param_offset, EXP_PARAM_UNDEFINED,
+ memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
sizeof(shader->info.vs_output_param_offset));
shader->info.uses_instanceid = sel->info.uses_instanceid;
bool need_prolog;
bool need_epilog;
- need_prolog = sel->info.num_inputs;
+ need_prolog = sel->vs_needs_prolog;
need_epilog = !shader->key.as_es && !shader->key.as_ls;
parts[need_prolog ? 1 : 0] = ctx.main_fn;
if (need_prolog) {
union si_shader_part_key prolog_key;
- si_get_vs_prolog_key(shader, &prolog_key);
+ si_get_vs_prolog_key(&sel->info,
+ shader->info.num_input_sgprs,
+ &shader->key.part.vs.prolog,
+ shader, &prolog_key);
si_build_vs_prolog_function(&ctx, &prolog_key);
parts[0] = ctx.main_fn;
}
si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2, need_prolog ? 1 : 0);
}
- mod = ctx.gallivm.module;
-
/* Dump LLVM IR before any optimization passes */
if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
r600_can_dump_shader(&sscreen->b, ctx.type))
- ac_dump_module(mod);
+ LLVMDumpModule(ctx.gallivm.module);
si_llvm_finalize_module(&ctx,
r600_extra_shader_checks(&sscreen->b, ctx.type));
/* Compile to bytecode. */
r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
- mod, debug, ctx.type, "TGSI shader");
+ ctx.gallivm.module, debug, ctx.type, "TGSI shader");
si_llvm_dispose(&ctx);
if (r) {
fprintf(stderr, "LLVM failed to compile shader\n");
struct si_shader_context ctx;
struct gallivm_state *gallivm = &ctx.gallivm;
- si_init_shader_ctx(&ctx, sscreen, &shader, tm);
+ si_init_shader_ctx(&ctx, sscreen, tm);
+ ctx.shader = &shader;
ctx.type = type;
switch (type) {
LLVMBuildRetVoid(gallivm->builder);
}
+static bool si_get_vs_prolog(struct si_screen *sscreen,
+ LLVMTargetMachineRef tm,
+ struct si_shader *shader,
+ struct pipe_debug_callback *debug,
+ struct si_shader *main_part,
+ const struct si_vs_prolog_bits *key)
+{
+ struct si_shader_selector *vs = main_part->selector;
+
+ /* The prolog is a no-op if there are no inputs. */
+ if (!vs->vs_needs_prolog)
+ return true;
+
+ /* Get the prolog. */
+ union si_shader_part_key prolog_key;
+ si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
+ key, shader, &prolog_key);
+
+ shader->prolog =
+ si_get_shader_part(sscreen, &sscreen->vs_prologs,
+ PIPE_SHADER_VERTEX, true, &prolog_key, tm,
+ debug, si_build_vs_prolog_function,
+ "Vertex Shader Prolog");
+ return shader->prolog != NULL;
+}
+
/**
* Create & compile a vertex shader epilog. This a helper used by VS and TES.
*/
struct si_shader *shader,
struct pipe_debug_callback *debug)
{
- struct tgsi_shader_info *info = &shader->selector->info;
- union si_shader_part_key prolog_key;
-
- /* Get the prolog. */
- si_get_vs_prolog_key(shader, &prolog_key);
-
- /* The prolog is a no-op if there are no inputs. */
- if (info->num_inputs) {
- shader->prolog =
- si_get_shader_part(sscreen, &sscreen->vs_prologs,
- PIPE_SHADER_VERTEX, true,
- &prolog_key, tm, debug,
- si_build_vs_prolog_function,
- "Vertex Shader Prolog");
- if (!shader->prolog)
- return false;
- }
+ if (!si_get_vs_prolog(sscreen, tm, shader, debug, shader,
+ &shader->key.part.vs.prolog))
+ return false;
/* Get the epilog. */
if (!shader->key.as_es && !shader->key.as_ls &&
struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
LLVMTypeRef params[16];
LLVMValueRef func;
- int last_sgpr, num_params;
+ int last_sgpr, num_params = 0;
/* Declare inputs. Only RW_BUFFERS and TESS_FACTOR_OFFSET are used. */
- params[SI_PARAM_RW_BUFFERS] = const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
- params[SI_PARAM_CONST_BUFFERS] = ctx->i64;
- params[SI_PARAM_SAMPLERS] = ctx->i64;
- params[SI_PARAM_IMAGES] = ctx->i64;
- params[SI_PARAM_SHADER_BUFFERS] = ctx->i64;
- params[SI_PARAM_TCS_OFFCHIP_LAYOUT] = ctx->i32;
- params[SI_PARAM_TCS_OUT_OFFSETS] = ctx->i32;
- params[SI_PARAM_TCS_OUT_LAYOUT] = ctx->i32;
- params[SI_PARAM_TCS_IN_LAYOUT] = ctx->i32;
- params[ctx->param_oc_lds = SI_PARAM_TCS_OC_LDS] = ctx->i32;
- params[SI_PARAM_TESS_FACTOR_OFFSET] = ctx->i32;
- last_sgpr = SI_PARAM_TESS_FACTOR_OFFSET;
- num_params = last_sgpr + 1;
+ params[ctx->param_rw_buffers = num_params++] =
+ const_array(ctx->v16i8, SI_NUM_RW_BUFFERS);
+ params[ctx->param_const_buffers = num_params++] = ctx->i64;
+ params[ctx->param_samplers = num_params++] = ctx->i64;
+ params[ctx->param_images = num_params++] = ctx->i64;
+ params[ctx->param_shader_buffers = num_params++] = ctx->i64;
+ params[ctx->param_tcs_offchip_layout = num_params++] = ctx->i32;
+ params[ctx->param_tcs_out_lds_offsets = num_params++] = ctx->i32;
+ params[ctx->param_tcs_out_lds_layout = num_params++] = ctx->i32;
+ params[ctx->param_vs_state_bits = num_params++] = ctx->i32;
+ params[ctx->param_tcs_offchip_offset = num_params++] = ctx->i32;
+ params[ctx->param_tcs_factor_offset = num_params++] = ctx->i32;
+ last_sgpr = num_params - 1;
params[num_params++] = ctx->i32; /* patch index within the wave (REL_PATCH_ID) */
params[num_params++] = ctx->i32; /* invocation ID within the patch */
struct si_shader *shader,
struct pipe_debug_callback *debug)
{
- union si_shader_part_key epilog_key;
+ if (sscreen->b.chip_class >= GFX9) {
+ struct si_shader *ls_main_part =
+ shader->key.part.tcs.ls->main_shader_part_ls;
+
+ if (!si_get_vs_prolog(sscreen, tm, shader, debug, ls_main_part,
+ &shader->key.part.tcs.ls_prolog))
+ return false;
+
+ shader->previous_stage = ls_main_part;
+ }
/* Get the epilog. */
+ union si_shader_part_key epilog_key;
memset(&epilog_key, 0, sizeof(epilog_key));
epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
LLVMTypeRef params[16+8*4+3];
LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
- int last_sgpr, num_params, i;
+ int last_sgpr, num_params = 0, i;
struct si_ps_exports exp = {};
/* Declare input SGPRs. */
- params[SI_PARAM_RW_BUFFERS] = ctx->i64;
- params[SI_PARAM_CONST_BUFFERS] = ctx->i64;
- params[SI_PARAM_SAMPLERS] = ctx->i64;
- params[SI_PARAM_IMAGES] = ctx->i64;
- params[SI_PARAM_SHADER_BUFFERS] = ctx->i64;
+ params[ctx->param_rw_buffers = num_params++] = ctx->i64;
+ params[ctx->param_const_buffers = num_params++] = ctx->i64;
+ params[ctx->param_samplers = num_params++] = ctx->i64;
+ params[ctx->param_images = num_params++] = ctx->i64;
+ params[ctx->param_shader_buffers = num_params++] = ctx->i64;
+ assert(num_params == SI_PARAM_ALPHA_REF);
params[SI_PARAM_ALPHA_REF] = ctx->f32;
last_sgpr = SI_PARAM_ALPHA_REF;
shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
shader->prolog->config.num_vgprs);
}
+ if (shader->previous_stage) {
+ shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
+ shader->previous_stage->config.num_sgprs);
+ shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
+ shader->previous_stage->config.num_vgprs);
+ shader->config.spilled_sgprs =
+ MAX2(shader->config.spilled_sgprs,
+ shader->previous_stage->config.spilled_sgprs);
+ shader->config.spilled_vgprs =
+ MAX2(shader->config.spilled_vgprs,
+ shader->previous_stage->config.spilled_vgprs);
+ shader->config.private_mem_vgprs =
+ MAX2(shader->config.private_mem_vgprs,
+ shader->previous_stage->config.private_mem_vgprs);
+ shader->config.scratch_bytes_per_wave =
+ MAX2(shader->config.scratch_bytes_per_wave,
+ shader->previous_stage->config.scratch_bytes_per_wave);
+ shader->info.uses_instanceid |=
+ shader->previous_stage->info.uses_instanceid;
+ }
if (shader->epilog) {
shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
shader->epilog->config.num_sgprs);