struct lp_build_tgsi_context *bld_base,
struct lp_build_emit_data *emit_data);
-static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
- FILE *f);
+static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
static void si_build_vs_prolog_function(struct si_shader_context *ctx,
union si_shader_part_key *key);
return false;
}
-static bool is_merged_shader(struct si_shader_context *ctx)
+/** Whether the shader runs as a combination of multiple API shaders */
+static bool is_multi_part_shader(struct si_shader_context *ctx)
{
if (ctx->screen->info.chip_class <= GFX8)
return false;
ctx->type == PIPE_SHADER_GEOMETRY;
}
+/** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
+static bool is_merged_shader(struct si_shader_context *ctx)
+{
+ return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
+}
+
void si_init_function_info(struct si_function_info *fninfo)
{
fninfo->num_params = 0;
assert(!"invalid generic index");
return 0;
- case TGSI_SEMANTIC_PSIZE:
- return SI_MAX_IO_GENERIC + 1;
- case TGSI_SEMANTIC_CLIPDIST:
- assert(index <= 1);
- return SI_MAX_IO_GENERIC + 2 + index;
case TGSI_SEMANTIC_FOG:
- return SI_MAX_IO_GENERIC + 4;
- case TGSI_SEMANTIC_LAYER:
- return SI_MAX_IO_GENERIC + 5;
- case TGSI_SEMANTIC_VIEWPORT_INDEX:
- return SI_MAX_IO_GENERIC + 6;
- case TGSI_SEMANTIC_PRIMID:
- return SI_MAX_IO_GENERIC + 7;
+ return SI_MAX_IO_GENERIC + 1;
case TGSI_SEMANTIC_COLOR:
assert(index < 2);
- return SI_MAX_IO_GENERIC + 8 + index;
+ return SI_MAX_IO_GENERIC + 2 + index;
case TGSI_SEMANTIC_BCOLOR:
assert(index < 2);
/* If it's a varying, COLOR and BCOLOR alias. */
if (is_varying)
- return SI_MAX_IO_GENERIC + 8 + index;
+ return SI_MAX_IO_GENERIC + 2 + index;
else
- return SI_MAX_IO_GENERIC + 10 + index;
+ return SI_MAX_IO_GENERIC + 4 + index;
case TGSI_SEMANTIC_TEXCOORD:
assert(index < 8);
- STATIC_ASSERT(SI_MAX_IO_GENERIC + 12 + 8 <= 63);
- return SI_MAX_IO_GENERIC + 12 + index;
+ return SI_MAX_IO_GENERIC + 6 + index;
+
+ /* These are rarely used between LS and HS or ES and GS. */
+ case TGSI_SEMANTIC_CLIPDIST:
+ assert(index < 2);
+ return SI_MAX_IO_GENERIC + 6 + 8 + index;
case TGSI_SEMANTIC_CLIPVERTEX:
- return 63;
+ return SI_MAX_IO_GENERIC + 6 + 8 + 2;
+ case TGSI_SEMANTIC_PSIZE:
+ return SI_MAX_IO_GENERIC + 6 + 8 + 3;
+
+ /* These can't be written by LS, HS, and ES. */
+ case TGSI_SEMANTIC_LAYER:
+ return SI_MAX_IO_GENERIC + 6 + 8 + 4;
+ case TGSI_SEMANTIC_VIEWPORT_INDEX:
+ return SI_MAX_IO_GENERIC + 6 + 8 + 5;
+ case TGSI_SEMANTIC_PRIMID:
+ STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
+ return SI_MAX_IO_GENERIC + 6 + 8 + 6;
default:
fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
assert(!"invalid semantic name");
tmp = ac_build_opencoded_load_format(
&ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
- t_list, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0,
- false, false, true);
+ t_list, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
for (unsigned i = 0; i < 4; ++i)
out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
return;
for (unsigned i = 0; i < num_fetches; ++i) {
LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
fetches[i] = ac_build_buffer_load_format(&ctx->ac, t_list, vertex_index, voffset,
- channels_per_fetch, false, true);
+ channels_per_fetch, 0, true);
}
if (num_fetches == 1 && channels_per_fetch > 1) {
if (swizzle == ~0) {
value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
- 0, 1, 0, can_speculate, false);
+ 0, ac_glc, can_speculate, false);
return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
}
if (!llvm_type_is_64bit(ctx, type)) {
value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
- 0, 1, 0, can_speculate, false);
+ 0, ac_glc, can_speculate, false);
value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
return LLVMBuildExtractElement(ctx->ac.builder, value,
}
value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
- swizzle * 4, 1, 0, can_speculate, false);
+ swizzle * 4, ac_glc, can_speculate, false);
value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
- swizzle * 4 + 4, 1, 0, can_speculate, false);
+ swizzle * 4 + 4, ac_glc, can_speculate, false);
return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
}
LLVMConstInt(ctx->i32, tf_offset, 0), "");
}
+ uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+
+ if (ctx->screen->info.chip_class >= GFX10)
+ rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+ S_008F0C_OOB_SELECT(3) |
+ S_008F0C_RESOURCE_LEVEL(1);
+ else
+ rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+
LLVMValueRef desc[4];
desc[0] = addr;
desc[1] = LLVMConstInt(ctx->i32,
S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
desc[2] = LLVMConstInt(ctx->i32, 0xffffffff, 0);
- desc[3] = LLVMConstInt(ctx->i32,
- S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
- S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
- S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32), 0);
+ desc[3] = LLVMConstInt(ctx->i32, rsrc3, false);
return ac_build_gather_values(&ctx->ac, desc, 4);
}
if (reg->Register.WriteMask != 0xF && !is_tess_factor) {
ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
buf_addr, base,
- 4 * chan_index, 1, 0, false);
+ 4 * chan_index, ac_glc, false);
}
/* Write tess factors into VGPRs for the epilog. */
LLVMValueRef value = ac_build_gather_values(&ctx->ac,
values, 4);
ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buf_addr,
- base, 0, 1, 0, false);
+ base, 0, ac_glc, false);
}
}
ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
addr, base,
4 * buffer_store_offset,
- 1, 0, false);
+ ac_glc, false);
}
/* Write tess factors into VGPRs for the epilog. */
LLVMValueRef value = ac_build_gather_values(&ctx->ac,
values, 4);
ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
- base, 0, 1, 0, false);
+ base, 0, ac_glc, false);
}
}
soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
- vtx_offset, soffset, 0, 1, 0, true, false);
+ vtx_offset, soffset, 0, ac_glc, true, false);
if (llvm_type_is_64bit(ctx, type)) {
LLVMValueRef value2;
soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
ctx->i32_0, vtx_offset, soffset,
- 0, 1, 0, true, false);
+ 0, ac_glc, true, false);
return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
}
return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
LLVMValueRef offset)
{
return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
- 0, 0, 0, true, true);
+ 0, 0, true, true);
}
static LLVMValueRef load_sample_position(struct ac_shader_abi *abi, LLVMValueRef sample_id)
break;
case TGSI_SEMANTIC_INVOCATIONID:
- if (ctx->type == PIPE_SHADER_TESS_CTRL)
+ if (ctx->type == PIPE_SHADER_TESS_CTRL) {
value = unpack_llvm_param(ctx, ctx->abi.tcs_rel_ids, 8, 5);
- else if (ctx->type == PIPE_SHADER_GEOMETRY)
- value = ctx->abi.gs_invocation_id;
- else
+ } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
+ if (ctx->screen->info.chip_class >= GFX10) {
+ value = LLVMBuildAnd(ctx->ac.builder,
+ ctx->abi.gs_invocation_id,
+ LLVMConstInt(ctx->i32, 127, 0), "");
+ } else {
+ value = ctx->abi.gs_invocation_id;
+ }
+ } else {
assert(!"INVOCATIONID not implemented");
+ }
break;
case TGSI_SEMANTIC_POSITION:
desc1 = LLVMConstInt(ctx->i32,
S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
+ uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
+
+ if (ctx->screen->info.chip_class >= GFX10)
+ rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+ S_008F0C_OOB_SELECT(3) |
+ S_008F0C_RESOURCE_LEVEL(1);
+ else
+ rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+
LLVMValueRef desc_elems[] = {
desc0,
desc1,
LLVMConstInt(ctx->i32, (sel->info.const_file_max[0] + 1) * 16, 0),
- LLVMConstInt(ctx->i32,
- S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
- S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
- S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32), 0)
+ LLVMConstInt(ctx->i32, rsrc3, false)
};
return ac_build_gather_values(&ctx->ac, desc_elems, 4);
break;
case V_028714_SPI_SHADER_32_AR:
- args->enabled_channels = 0x9; /* writemask */
- args->out[0] = values[0];
- args->out[3] = values[3];
+ if (ctx->screen->info.chip_class >= GFX10) {
+ args->enabled_channels = 0x3; /* writemask */
+ args->out[0] = values[0];
+ args->out[1] = values[3];
+ } else {
+ args->enabled_channels = 0x9; /* writemask */
+ args->out[0] = values[0];
+ args->out[3] = values[3];
+ }
break;
case V_028714_SPI_SHADER_FP16_ABGR:
}
}
-static void emit_streamout_output(struct si_shader_context *ctx,
- LLVMValueRef const *so_buffers,
- LLVMValueRef const *so_write_offsets,
- struct pipe_stream_output *stream_out,
- struct si_shader_output_values *shader_out)
+void si_emit_streamout_output(struct si_shader_context *ctx,
+ LLVMValueRef const *so_buffers,
+ LLVMValueRef const *so_write_offsets,
+ struct pipe_stream_output *stream_out,
+ struct si_shader_output_values *shader_out)
{
unsigned buf_idx = stream_out->output_buffer;
unsigned start = stream_out->start_component;
vdata, num_comps,
so_write_offsets[buf_idx],
ctx->i32_0,
- stream_out->dst_offset * 4, 1, 1, false);
+ stream_out->dst_offset * 4, ac_glc | ac_slc, false);
}
/**
if (stream != so->output[i].stream)
continue;
- emit_streamout_output(ctx, so_buffers, so_write_offset,
- &so->output[i], &outputs[reg]);
+ si_emit_streamout_output(ctx, so_buffers, so_write_offset,
+ &so->output[i], &outputs[reg]);
}
}
lp_build_endif(&if_ctx);
/* Write the misc vector (point size, edgeflag, layer, viewport). */
if (shader->selector->info.writes_psize ||
- shader->selector->info.writes_edgeflag ||
+ shader->selector->pos_writes_edgeflag ||
shader->selector->info.writes_viewport_index ||
shader->selector->info.writes_layer) {
pos_args[1].enabled_channels = shader->selector->info.writes_psize |
- (shader->selector->info.writes_edgeflag << 1) |
+ (shader->selector->pos_writes_edgeflag << 1) |
(shader->selector->info.writes_layer << 2);
pos_args[1].valid_mask = 0; /* EXEC mask */
if (shader->selector->info.writes_psize)
pos_args[1].out[0] = psize_value;
- if (shader->selector->info.writes_edgeflag) {
+ if (shader->selector->pos_writes_edgeflag) {
/* The output is a float, but the hw expects an integer
* with the first bit containing the edge flag. */
edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
if (pos_args[i].out[0])
shader->info.nr_pos_exports++;
+ /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
+ * Setting valid_mask=1 prevents it and has no other effect.
+ */
+ if (ctx->screen->info.family == CHIP_NAVI10 ||
+ ctx->screen->info.family == CHIP_NAVI12 ||
+ ctx->screen->info.family == CHIP_NAVI14)
+ pos_args[0].valid_mask = 1;
+
pos_idx = 0;
for (i = 0; i < 4; i++) {
if (!pos_args[i].out[0])
LLVMValueRef value = lshs_lds_load(bld_base, ctx->ac.i32, ~0, lds_ptr);
ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
- buffer_offset, 0, 1, 0, false);
+ buffer_offset, 0, ac_glc, false);
}
}
ac_build_buffer_store_dword(&ctx->ac, buffer,
LLVMConstInt(ctx->i32, 0x80000000, 0),
1, ctx->i32_0, tf_base,
- offset, 1, 0, false);
+ offset, ac_glc, false);
offset += 4;
}
/* Store the tessellation factors. */
ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
MIN2(stride, 4), byteoffset, tf_base,
- offset, 1, 0, false);
+ offset, ac_glc, false);
offset += 16;
if (vec1)
ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
stride - 4, byteoffset, tf_base,
- offset, 1, 0, false);
+ offset, ac_glc, false);
/* Store the tess factors into the offchip buffer if TES reads them. */
if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
outer_comps, tf_outer_offset,
- base, 0, 1, 0, false);
+ base, 0, ac_glc, false);
if (inner_comps) {
param_inner = si_shader_io_get_unique_index_patch(
TGSI_SEMANTIC_TESSINNER, 0);
ac_build_gather_values(&ctx->ac, inner, inner_comps);
ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
inner_comps, tf_inner_offset,
- base, 0, 1, 0, false);
+ base, 0, ac_glc, false);
}
}
/* Pass GS inputs from ES to GS on GFX9. */
static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
{
+ LLVMBuilderRef builder = ctx->ac.builder;
LLVMValueRef ret = ctx->return_value;
ret = si_insert_input_ptr(ctx, ret, 0, 0);
ret = si_insert_input_ptr(ctx, ret, 1, 1);
- ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
+ if (ctx->shader->key.as_ngg)
+ ret = LLVMBuildInsertValue(builder, ret, ctx->gs_tg_info, 2, "");
+ else
+ ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
ret = si_insert_input_ptr(ctx, ret,
ctx->param_bindless_samplers_and_images,
8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
+ if (ctx->screen->info.chip_class >= GFX10) {
+ ret = si_insert_input_ptr(ctx, ret, ctx->param_vs_state_bits,
+ 8 + SI_SGPR_VS_STATE_BITS);
+ }
unsigned vgpr;
if (ctx->type == PIPE_SHADER_VERTEX)
ctx->esgs_ring,
out_val, 1, NULL, soffset,
(4 * param + chan) * 4,
- 1, 1, true);
+ ac_glc | ac_slc, true);
}
}
static void emit_gs_epilogue(struct si_shader_context *ctx)
{
+ if (ctx->shader->key.as_ngg) {
+ gfx10_ngg_gs_emit_epilogue(ctx);
+ return;
+ }
+
+ if (ctx->screen->info.chip_class >= GFX10)
+ LLVMBuildFence(ctx->ac.builder, LLVMAtomicOrderingRelease, false, "");
+
ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
si_get_gs_wave_id(ctx));
}
}
- if (ctx->shader->selector->so.num_outputs)
+ if (ctx->ac.chip_class <= GFX9 &&
+ ctx->shader->selector->so.num_outputs)
si_llvm_emit_streamout(ctx, outputs, i, 0);
/* Export PrimitiveID. */
struct si_shader_context *ctx = si_shader_context(bld_base);
LLVMValueRef src0 = lp_build_emit_fetch(bld_base, emit_data->inst, 0, 0);
unsigned flags = LLVMConstIntGetZExtValue(src0);
- unsigned waitcnt = NOOP_WAITCNT;
+ unsigned wait_flags = 0;
if (flags & TGSI_MEMBAR_THREAD_GROUP)
- waitcnt &= VM_CNT & LGKM_CNT;
+ wait_flags |= AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE;
if (flags & (TGSI_MEMBAR_ATOMIC_BUFFER |
TGSI_MEMBAR_SHADER_BUFFER |
TGSI_MEMBAR_SHADER_IMAGE))
- waitcnt &= VM_CNT;
+ wait_flags |= AC_WAIT_VLOAD | AC_WAIT_VSTORE;
if (flags & TGSI_MEMBAR_SHARED)
- waitcnt &= LGKM_CNT;
+ wait_flags |= AC_WAIT_LGKM;
- if (waitcnt != NOOP_WAITCNT)
- ac_build_waitcnt(&ctx->ac, waitcnt);
+ ac_build_waitcnt(&ctx->ac, wait_flags);
}
static void clock_emit(
LLVMValueRef *addrs)
{
struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+
+ if (ctx->shader->key.as_ngg) {
+ gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
+ return;
+ }
+
struct tgsi_shader_info *info = &ctx->shader->selector->info;
struct si_shader *shader = ctx->shader;
struct lp_build_if_state if_state;
ctx->gsvs_ring[stream],
out_val, 1,
voffset, soffset, 0,
- 1, 1, true);
+ ac_glc | ac_slc, true);
}
}
{
struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+ if (ctx->shader->key.as_ngg) {
+ LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
+ return;
+ }
+
/* Signal primitive cut */
ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
si_get_gs_wave_id(ctx));
*/
if (ctx->screen->info.chip_class == GFX6 &&
ctx->type == PIPE_SHADER_TESS_CTRL) {
- ac_build_waitcnt(&ctx->ac, LGKM_CNT & VM_CNT);
+ ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
return;
}
struct pipe_stream_output_info *so,
struct si_function_info *fninfo)
{
- int i;
+ if (ctx->ac.chip_class >= GFX10)
+ return;
/* Streamout SGPRs. */
if (so->num_outputs) {
ctx->param_streamout_write_index = add_arg(fninfo, ARG_SGPR, ctx->ac.i32);
}
/* A streamout buffer offset is loaded if the stride is non-zero. */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
if (!so->stride[i])
continue;
static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
{
switch (shader->selector->type) {
+ case PIPE_SHADER_VERTEX:
+ case PIPE_SHADER_TESS_EVAL:
+ return shader->key.as_ngg ? 128 : 0;
+
case PIPE_SHADER_TESS_CTRL:
/* Return this so that LLVM doesn't remove s_barrier
* instructions on chips where we use s_barrier. */
- return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 64;
+ return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
case PIPE_SHADER_GEOMETRY:
- return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 64;
+ return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
case PIPE_SHADER_COMPUTE:
break; /* see below */
add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.vertex_id);
if (shader->key.as_ls) {
ctx->param_rel_auto_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
+ if (ctx->screen->info.chip_class >= GFX10) {
+ add_arg(fninfo, ARG_VGPR, ctx->i32); /* user VGPR */
+ add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
+ } else {
+ add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
+ add_arg(fninfo, ARG_VGPR, ctx->i32); /* unused */
+ }
+ } else if (ctx->screen->info.chip_class == GFX10 &&
+ !shader->is_gs_copy_shader) {
+ add_arg(fninfo, ARG_VGPR, ctx->i32); /* user vgpr */
+ add_arg(fninfo, ARG_VGPR, ctx->i32); /* user vgpr */
add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
} else {
add_arg_assign(fninfo, ARG_VGPR, ctx->i32, &ctx->abi.instance_id);
ctx->param_vs_prim_id = add_arg(fninfo, ARG_VGPR, ctx->i32);
+ add_arg(fninfo, ARG_VGPR, ctx->i32); /* unused */
}
- add_arg(fninfo, ARG_VGPR, ctx->i32); /* unused */
if (!shader->is_gs_copy_shader) {
/* Vertex load indices. */
if (ctx->screen->info.chip_class >= GFX9) {
if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
- else if (shader->key.as_es || type == PIPE_SHADER_GEOMETRY)
+ else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
}
/* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
declare_per_stage_desc_pointers(ctx, &fninfo,
ctx->type == PIPE_SHADER_GEOMETRY);
- ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
+
+ if (ctx->shader->key.as_ngg)
+ add_arg_assign(&fninfo, ARG_SGPR, ctx->i32, &ctx->gs_tg_info);
+ else
+ ctx->param_gs2vs_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
+
ctx->param_merged_wave_info = add_arg(&fninfo, ARG_SGPR, ctx->i32);
ctx->param_tcs_offchip_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
ctx->param_merged_scratch_offset = add_arg(&fninfo, ARG_SGPR, ctx->i32);
add_arg(&fninfo, ARG_SGPR, ctx->i32); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
declare_global_desc_pointers(ctx, &fninfo);
- declare_per_stage_desc_pointers(ctx, &fninfo,
- (ctx->type == PIPE_SHADER_VERTEX ||
- ctx->type == PIPE_SHADER_TESS_EVAL));
+ if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
+ declare_per_stage_desc_pointers(ctx, &fninfo,
+ (ctx->type == PIPE_SHADER_VERTEX ||
+ ctx->type == PIPE_SHADER_TESS_EVAL));
+ }
+
if (ctx->type == PIPE_SHADER_VERTEX) {
- declare_vs_specific_input_sgprs(ctx, &fninfo);
+ if (vs_blit_property)
+ declare_vs_blit_inputs(ctx, &fninfo, vs_blit_property);
+ else
+ declare_vs_specific_input_sgprs(ctx, &fninfo);
} else {
ctx->param_vs_state_bits = add_arg(&fninfo, ARG_SGPR, ctx->i32);
ctx->param_tcs_offchip_layout = add_arg(&fninfo, ARG_SGPR, ctx->i32);
declare_tes_input_vgprs(ctx, &fninfo);
}
- if (ctx->type == PIPE_SHADER_VERTEX ||
- ctx->type == PIPE_SHADER_TESS_EVAL) {
+ if (ctx->shader->key.as_es &&
+ (ctx->type == PIPE_SHADER_VERTEX ||
+ ctx->type == PIPE_SHADER_TESS_EVAL)) {
unsigned num_user_sgprs;
if (ctx->type == PIPE_SHADER_VERTEX)
}
}
+/* Ensure that the esgs ring is declared.
+ *
+ * We declare it with 64KB alignment as a hint that the
+ * pointer value will always be 0.
+ */
+static void declare_esgs_ring(struct si_shader_context *ctx)
+{
+ if (ctx->esgs_ring)
+ return;
+
+ assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
+
+ ctx->esgs_ring = LLVMAddGlobalInAddressSpace(
+ ctx->ac.module, LLVMArrayType(ctx->i32, 0),
+ "esgs_ring",
+ AC_ADDR_SPACE_LDS);
+ LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
+ LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
+}
+
/**
* Load ESGS and GSVS ring buffer resource descriptors and save the variables
* for later use.
ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
} else {
if (USE_LDS_SYMBOLS && HAVE_LLVM >= 0x0900) {
- /* Declare the ESGS ring as an explicit LDS symbol.
- * For monolithic shaders, we declare the ring only once.
- *
- * We declare it with 64KB alignment as a hint that the
- * pointer value will always be 0.
- */
- ctx->esgs_ring = LLVMAddGlobalInAddressSpace(
- ctx->ac.module, LLVMArrayType(ctx->i32, 0),
- "esgs_ring",
- AC_ADDR_SPACE_LDS);
- LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
+ /* Declare the ESGS ring as an explicit LDS symbol. */
+ declare_esgs_ring(ctx);
} else {
ac_declare_lds_as_pointer(&ctx->ac);
ctx->esgs_ring = ctx->ac.lds;
ring = LLVMBuildInsertElement(builder, ring,
LLVMConstInt(ctx->i32, num_records, 0),
LLVMConstInt(ctx->i32, 2, 0), "");
+
+ uint32_t rsrc3 =
+ S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
+ S_008F0C_ADD_TID_ENABLE(1);
+
+ if (ctx->ac.chip_class >= GFX10) {
+ rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
+ S_008F0C_OOB_SELECT(2) |
+ S_008F0C_RESOURCE_LEVEL(1);
+ } else {
+ rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
+ S_008F0C_ELEMENT_SIZE(1); /* element_size = 4 (bytes) */
+ }
+
ring = LLVMBuildInsertElement(builder, ring,
- LLVMConstInt(ctx->i32,
- S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
- S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
- S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
- S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
- S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
- S_008F0C_ELEMENT_SIZE(1) | /* element_size = 4 (bytes) */
- S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
- S_008F0C_ADD_TID_ENABLE(1),
- 0),
+ LLVMConstInt(ctx->i32, rsrc3, false),
LLVMConstInt(ctx->i32, 3, 0), "");
ctx->gsvs_ring[stream] = ring;
struct ac_rtld_binary *rtld)
{
const struct si_shader_selector *sel = shader->selector;
- enum pipe_shader_type shader_type = sel ? sel->type : PIPE_SHADER_COMPUTE;
const char *part_elfs[5];
size_t part_sizes[5];
unsigned num_parts = 0;
#undef add_part
- struct ac_rtld_symbol lds_symbols[1];
+ struct ac_rtld_symbol lds_symbols[2];
unsigned num_lds_symbols = 0;
- if (sel && screen->info.chip_class >= GFX9 &&
- sel->type == PIPE_SHADER_GEOMETRY && !shader->is_gs_copy_shader) {
+ if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
+ (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
/* We add this symbol even on LLVM <= 8 to ensure that
* shader->config.lds_size is set correctly below.
*/
sym->align = 64 * 1024;
}
+ if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
+ struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
+ sym->name = "ngg_emit";
+ sym->size = shader->ngg.ngg_emit_size * 4;
+ sym->align = 4;
+ }
+
bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
.info = &screen->info,
.options = {
.halt_at_entry = screen->options.halt_shaders,
},
- .shader_type = tgsi_processor_to_shader_stage(shader_type),
+ .shader_type = tgsi_processor_to_shader_stage(sel->type),
.num_parts = num_parts,
.elf_ptrs = part_elfs,
.elf_sizes = part_sizes,
return rtld.rx_size;
}
-
static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
{
uint64_t *scratch_va = data;
static void si_shader_dump_disassembly(struct si_screen *screen,
const struct si_shader_binary *binary,
+ enum pipe_shader_type shader_type,
struct pipe_debug_callback *debug,
const char *name, FILE *file)
{
if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
.info = &screen->info,
+ .shader_type = tgsi_processor_to_shader_stage(shader_type),
.num_parts = 1,
.elf_ptrs = &binary->elf_buffer,
.elf_sizes = &binary->elf_size }))
DIV_ROUND_UP(max_workgroup_size, 64);
}
break;
+ default:;
}
/* Compute the per-SIMD wave counts. */
const struct ac_shader_config *conf = &shader->config;
if (screen->options.debug_disassembly)
- si_shader_dump_disassembly(screen, &shader->binary, debug, "main", NULL);
+ si_shader_dump_disassembly(screen, &shader->binary,
+ shader->selector->type,
+ debug, "main", NULL);
pipe_debug_message(debug, SHADER_INFO,
"Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
static void si_shader_dump_stats(struct si_screen *sscreen,
struct si_shader *shader,
- unsigned processor,
FILE *file,
bool check_debug_option)
{
const struct ac_shader_config *conf = &shader->config;
if (!check_debug_option ||
- si_can_dump_shader(sscreen, processor)) {
- if (processor == PIPE_SHADER_FRAGMENT) {
+ si_can_dump_shader(sscreen, shader->selector->type)) {
+ if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
fprintf(file, "*** SHADER CONFIG ***\n"
"SPI_PS_INPUT_ADDR = 0x%04x\n"
"SPI_PS_INPUT_ENA = 0x%04x\n",
}
}
-const char *si_get_shader_name(const struct si_shader *shader, unsigned processor)
+const char *si_get_shader_name(const struct si_shader *shader)
{
- switch (processor) {
+ switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
if (shader->key.as_es)
return "Vertex Shader as ES";
return "Vertex Shader as LS";
else if (shader->key.opt.vs_as_prim_discard_cs)
return "Vertex Shader as Primitive Discard CS";
+ else if (shader->key.as_ngg)
+ return "Vertex Shader as ESGS";
else
return "Vertex Shader as VS";
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL:
if (shader->key.as_es)
return "Tessellation Evaluation Shader as ES";
+ else if (shader->key.as_ngg)
+ return "Tessellation Evaluation Shader as ESGS";
else
return "Tessellation Evaluation Shader as VS";
case PIPE_SHADER_GEOMETRY:
}
void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
- struct pipe_debug_callback *debug, unsigned processor,
+ struct pipe_debug_callback *debug,
FILE *file, bool check_debug_option)
{
+ enum pipe_shader_type shader_type = shader->selector->type;
+
if (!check_debug_option ||
- si_can_dump_shader(sscreen, processor))
- si_dump_shader_key(processor, shader, file);
+ si_can_dump_shader(sscreen, shader_type))
+ si_dump_shader_key(shader, file);
if (!check_debug_option && shader->binary.llvm_ir_string) {
if (shader->previous_stage &&
shader->previous_stage->binary.llvm_ir_string) {
fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
- si_get_shader_name(shader, processor));
+ si_get_shader_name(shader));
fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
}
fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
- si_get_shader_name(shader, processor));
+ si_get_shader_name(shader));
fprintf(file, "%s\n", shader->binary.llvm_ir_string);
}
if (!check_debug_option ||
- (si_can_dump_shader(sscreen, processor) &&
+ (si_can_dump_shader(sscreen, shader_type) &&
!(sscreen->debug_flags & DBG(NO_ASM)))) {
- fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
+ fprintf(file, "\n%s:\n", si_get_shader_name(shader));
if (shader->prolog)
si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
- debug, "prolog", file);
+ shader_type, debug, "prolog", file);
if (shader->previous_stage)
si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
- debug, "previous stage", file);
+ shader_type, debug, "previous stage", file);
if (shader->prolog2)
si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
- debug, "prolog2", file);
+ shader_type, debug, "prolog2", file);
- si_shader_dump_disassembly(sscreen, &shader->binary, debug, "main", file);
+ si_shader_dump_disassembly(sscreen, &shader->binary, shader_type, debug, "main", file);
if (shader->epilog)
si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
- debug, "epilog", file);
+ shader_type, debug, "epilog", file);
fprintf(file, "\n");
}
- si_shader_dump_stats(sscreen, shader, processor, file,
- check_debug_option);
+ si_shader_dump_stats(sscreen, shader, file, check_debug_option);
}
static int si_compile_llvm(struct si_screen *sscreen,
struct ac_llvm_compiler *compiler,
LLVMModuleRef mod,
struct pipe_debug_callback *debug,
- unsigned processor,
+ enum pipe_shader_type shader_type,
const char *name,
bool less_optimized)
{
unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
- if (si_can_dump_shader(sscreen, processor)) {
+ if (si_can_dump_shader(sscreen, shader_type)) {
fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
struct ac_rtld_binary rtld;
if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
.info = &sscreen->info,
+ .shader_type = tgsi_processor_to_shader_stage(shader_type),
.num_parts = 1,
.elf_ptrs = &binary->elf_buffer,
.elf_sizes = &binary->elf_size }))
/* Fetch the vertex stream ID.*/
LLVMValueRef stream_id;
- if (gs_selector->so.num_outputs)
+ if (ctx.ac.chip_class <= GFX9 && gs_selector->so.num_outputs)
stream_id = si_unpack_param(&ctx, ctx.param_streamout_config, 24, 2);
else
stream_id = ctx.i32_0;
ac_build_buffer_load(&ctx.ac,
ctx.gsvs_ring[0], 1,
ctx.i32_0, voffset,
- soffset, 0, 1, 1,
+ soffset, 0, ac_glc | ac_slc,
true, false);
}
}
/* Streamout and exports. */
- if (gs_selector->so.num_outputs) {
+ if (ctx.ac.chip_class <= GFX9 && gs_selector->so.num_outputs) {
si_llvm_emit_streamout(&ctx, outputs,
gsinfo->num_outputs,
stream);
"GS Copy Shader", false) == 0) {
if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
fprintf(stderr, "GS Copy Shader:\n");
- si_shader_dump(sscreen, ctx.shader, debug,
- PIPE_SHADER_GEOMETRY, stderr, true);
+ si_shader_dump(sscreen, ctx.shader, debug, stderr, true);
if (!ctx.shader->config.scratch_bytes_per_wave)
ok = si_shader_binary_upload(sscreen, ctx.shader, 0);
fprintf(f, "}\n");
}
-static void si_dump_shader_key(unsigned processor, const struct si_shader *shader,
- FILE *f)
+static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
{
const struct si_shader_key *key = &shader->key;
+ enum pipe_shader_type shader_type = shader->selector->type;
fprintf(f, "SHADER KEY\n");
- switch (processor) {
+ switch (shader_type) {
case PIPE_SHADER_VERTEX:
si_dump_shader_key_vs(key, &key->part.vs.prolog,
"part.vs.prolog", f);
assert(0);
}
- if ((processor == PIPE_SHADER_GEOMETRY ||
- processor == PIPE_SHADER_TESS_EVAL ||
- processor == PIPE_SHADER_VERTEX) &&
+ if ((shader_type == PIPE_SHADER_GEOMETRY ||
+ shader_type == PIPE_SHADER_TESS_EVAL ||
+ shader_type == PIPE_SHADER_VERTEX) &&
!key->as_es && !key->as_ls) {
fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
else if (shader->key.opt.vs_as_prim_discard_cs)
ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
+ else if (shader->key.as_ngg)
+ ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
else
ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
bld_base->emit_epilogue = si_tgsi_emit_epilogue;
ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
if (shader->key.as_es)
ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
+ else if (shader->key.as_ngg)
+ ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
else
ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
bld_base->emit_epilogue = si_tgsi_emit_epilogue;
create_function(ctx);
preload_ring_buffers(ctx);
+ if (ctx->type == PIPE_SHADER_TESS_CTRL &&
+ sel->tcs_info.tessfactors_are_def_in_all_invocs) {
+ for (unsigned i = 0; i < 6; i++) {
+ ctx->invoc0_tess_factors[i] =
+ ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
+ }
+ }
+
+ if (ctx->type == PIPE_SHADER_GEOMETRY) {
+ for (unsigned i = 0; i < 4; i++) {
+ ctx->gs_next_vertex[i] =
+ ac_build_alloca(&ctx->ac, ctx->i32, "");
+ }
+ if (shader->key.as_ngg) {
+ for (unsigned i = 0; i < 4; ++i) {
+ ctx->gs_curprim_verts[i] =
+ lp_build_alloca(&ctx->gallivm, ctx->ac.i32, "");
+ ctx->gs_generated_prims[i] =
+ lp_build_alloca(&ctx->gallivm, ctx->ac.i32, "");
+ }
+
+ unsigned scratch_size = 8;
+ if (sel->so.num_outputs)
+ scratch_size = 44;
+
+ LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
+ ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
+ ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
+ LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
+ LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
+
+ ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
+ LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
+ LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
+ LLVMSetAlignment(ctx->gs_ngg_emit, 4);
+ }
+ }
+
+ if (shader->key.as_ngg && ctx->type != PIPE_SHADER_GEOMETRY) {
+ /* Unconditionally declare scratch space base for streamout and
+ * vertex compaction. Whether space is actually allocated is
+ * determined during linking / PM4 creation.
+ *
+ * Add an extra dword per vertex to ensure an odd stride, which
+ * avoids bank conflicts for SoA accesses.
+ */
+ declare_esgs_ring(ctx);
+
+ /* This is really only needed when streamout and / or vertex
+ * compaction is enabled.
+ */
+ LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
+ ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
+ asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
+ LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
+ LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
+ }
+
/* For GFX9 merged shaders:
* - Set EXEC for the first shader. If the prolog is present, set
* EXEC there instead.
*
* For monolithic merged shaders, the first shader is wrapped in an
* if-block together with its prolog in si_build_wrapper_function.
+ *
+ * NGG vertex and tess eval shaders running as the last
+ * vertex/geometry stage handle execution explicitly using
+ * if-statements.
*/
if (ctx->screen->info.chip_class >= GFX9) {
if (!shader->is_monolithic &&
si_init_exec_from_input(ctx,
ctx->param_merged_wave_info, 0);
} else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
- ctx->type == PIPE_SHADER_GEOMETRY) {
- if (!shader->is_monolithic)
+ ctx->type == PIPE_SHADER_GEOMETRY ||
+ shader->key.as_ngg) {
+ LLVMValueRef num_threads;
+ bool nested_barrier;
+
+ if (!shader->is_monolithic ||
+ (ctx->type == PIPE_SHADER_TESS_EVAL &&
+ shader->key.as_ngg))
ac_init_exec_full_mask(&ctx->ac);
- LLVMValueRef num_threads = si_unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
+ if (ctx->type == PIPE_SHADER_TESS_CTRL ||
+ ctx->type == PIPE_SHADER_GEOMETRY) {
+ if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
+ gfx10_ngg_gs_emit_prologue(ctx);
+ nested_barrier = false;
+ } else {
+ nested_barrier = true;
+ }
+
+ /* Number of patches / primitives */
+ num_threads = si_unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
+ } else {
+ /* Number of vertices */
+ num_threads = si_unpack_param(ctx, ctx->param_merged_wave_info, 0, 8);
+ nested_barrier = false;
+ }
+
LLVMValueRef ena =
LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
ac_get_thread_id(&ctx->ac), num_threads, "");
lp_build_if(&ctx->merged_wrap_if_state, &ctx->gallivm, ena);
- /* The barrier must execute for all shaders in a
- * threadgroup.
- *
- * Execute the barrier inside the conditional block,
- * so that empty waves can jump directly to s_endpgm,
- * which will also signal the barrier.
- *
- * If the shader is TCS and the TCS epilog is present
- * and contains a barrier, it will wait there and then
- * reach s_endpgm.
- */
- si_llvm_emit_barrier(NULL, bld_base, NULL);
- }
- }
-
- if (ctx->type == PIPE_SHADER_TESS_CTRL &&
- sel->tcs_info.tessfactors_are_def_in_all_invocs) {
- for (unsigned i = 0; i < 6; i++) {
- ctx->invoc0_tess_factors[i] =
- ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
- }
- }
-
- if (ctx->type == PIPE_SHADER_GEOMETRY) {
- int i;
- for (i = 0; i < 4; i++) {
- ctx->gs_next_vertex[i] =
- ac_build_alloca(&ctx->ac, ctx->i32, "");
+ if (nested_barrier) {
+ /* Execute a barrier before the second shader in
+ * a merged shader.
+ *
+ * Execute the barrier inside the conditional block,
+ * so that empty waves can jump directly to s_endpgm,
+ * which will also signal the barrier.
+ *
+ * This is possible in gfx9, because an empty wave
+ * for the second shader does not participate in
+ * the epilogue. With NGG, empty waves may still
+ * be required to export data (e.g. GS output vertices),
+ * so we cannot let them exit early.
+ *
+ * If the shader is TCS and the TCS epilog is present
+ * and contains a barrier, it will wait there and then
+ * reach s_endpgm.
+ */
+ si_llvm_emit_barrier(NULL, bld_base, NULL);
+ }
}
}
key->vs_prolog.last_input = MAX2(1, info->num_inputs) - 1;
key->vs_prolog.as_ls = shader_out->key.as_ls;
key->vs_prolog.as_es = shader_out->key.as_es;
+ key->vs_prolog.as_ngg = shader_out->key.as_ngg;
if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
key->vs_prolog.as_ls = 1;
} else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
key->vs_prolog.as_es = 1;
key->vs_prolog.num_merged_next_stage_vgprs = 5;
+ } else if (shader_out->key.as_ngg) {
+ key->vs_prolog.num_merged_next_stage_vgprs = 5;
}
/* Enable loading the InstanceID VGPR. */
/* Merged shaders are executed conditionally depending
* on the number of enabled threads passed in the input SGPRs. */
- if (is_merged_shader(ctx) && part == 0) {
+ if (is_multi_part_shader(ctx) && part == 0) {
LLVMValueRef ena, count = initial[3];
count = LLVMBuildAnd(builder, count,
ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
- if (is_merged_shader(ctx) &&
+ if (is_multi_part_shader(ctx) &&
part + 1 == next_shader_first_part) {
lp_build_endif(&if_state);
/* Dump TGSI code before doing TGSI->LLVM conversion in case the
* conversion fails. */
- if (si_can_dump_shader(sscreen, sel->info.processor) &&
+ if (si_can_dump_shader(sscreen, sel->type) &&
!(sscreen->debug_flags & DBG(NO_TGSI))) {
if (sel->tokens)
tgsi_dump(sel->tokens, 0);
memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
gs_prolog_key.gs_prolog.is_monolithic = true;
+ gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
si_build_gs_prolog_function(&ctx, &gs_prolog_key);
gs_prolog = ctx.main_fn;
/* Compile to bytecode. */
r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
ctx.ac.module, debug, ctx.type,
- si_get_shader_name(shader, ctx.type),
+ si_get_shader_name(shader),
si_should_optimize_less(compiler, shader->selector));
si_llvm_dispose(&ctx);
if (r) {
result->key = *key;
struct si_shader shader = {};
- struct si_shader_context ctx;
-
- si_init_shader_ctx(&ctx, sscreen, compiler);
- ctx.shader = &shader;
- ctx.type = type;
switch (type) {
case PIPE_SHADER_VERTEX:
shader.key.as_ls = key->vs_prolog.as_ls;
shader.key.as_es = key->vs_prolog.as_es;
+ shader.key.as_ngg = key->vs_prolog.as_ngg;
break;
case PIPE_SHADER_TESS_CTRL:
assert(!prolog);
break;
case PIPE_SHADER_GEOMETRY:
assert(prolog);
+ shader.key.as_ngg = key->gs_prolog.as_ngg;
break;
case PIPE_SHADER_FRAGMENT:
if (prolog)
unreachable("bad shader part");
}
+ struct si_shader_context ctx;
+ si_init_shader_ctx(&ctx, sscreen, compiler);
+ ctx.shader = &shader;
+ ctx.type = type;
+
build(&ctx, key);
/* Compile. */
}
unsigned vertex_id_vgpr = first_vs_vgpr;
- unsigned instance_id_vgpr = first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
+ unsigned instance_id_vgpr =
+ ctx->screen->info.chip_class >= GFX10 ?
+ first_vs_vgpr + 3 :
+ first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
key->vs_prolog.num_input_sgprs + i, "");
}
+ struct lp_build_if_state wrap_if_state;
+ LLVMValueRef original_ret = ret;
+ bool wrapped = false;
+
+ if (key->vs_prolog.is_monolithic && key->vs_prolog.as_ngg) {
+ LLVMValueRef num_threads;
+ LLVMValueRef ena;
+
+ num_threads = si_unpack_param(ctx, 3, 0, 8);
+ ena = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
+ ac_get_thread_id(&ctx->ac), num_threads, "");
+ lp_build_if(&wrap_if_state, &ctx->gallivm, ena);
+ wrapped = true;
+ }
+
/* Compute vertex load indices from instance divisors. */
LLVMValueRef instance_divisor_constbuf = NULL;
fninfo.num_params + i, "");
}
+ if (wrapped) {
+ lp_build_endif(&wrap_if_state);
+
+ LLVMValueRef values[2] = {
+ ret,
+ original_ret
+ };
+ LLVMBasicBlockRef bbs[2] = {
+ wrap_if_state.true_block,
+ wrap_if_state.entry_block
+ };
+ ret = ac_build_phi(&ctx->ac, LLVMTypeOf(ret), 2, values, bbs);
+ }
+
si_llvm_build_ret(ctx, ret);
}
/* Create the function. */
si_create_function(ctx, "tcs_epilog", NULL, 0, &fninfo,
- ctx->screen->info.chip_class >= GFX7 ? 128 : 64);
+ ctx->screen->info.chip_class >= GFX7 ? 128 : 0);
ac_declare_lds_as_pointer(&ctx->ac);
func = ctx->main_fn;
union si_shader_part_key prolog_key;
memset(&prolog_key, 0, sizeof(prolog_key));
prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
+ prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
PIPE_SHADER_GEOMETRY, true,
/* LS, ES, VS are compiled on demand if the main part hasn't been
* compiled for that stage.
*
+ * GS are compiled on demand if the main part hasn't been compiled
+ * for the chosen NGG-ness.
+ *
* Vertex shaders are compiled on demand when a vertex fetch
* workaround must be applied.
*/
shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
shader->info.num_input_vgprs);
break;
+ default:;
}
/* Update SGPR and VGPR counts. */
si_calculate_max_simd_waves(shader);
}
- if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY)
+ if (shader->key.as_ngg) {
+ assert(!shader->key.as_es && !shader->key.as_ls);
+ gfx10_ngg_calculate_subgroup_info(shader);
+ } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
+ }
si_fix_resource_usage(sscreen, shader);
- si_shader_dump(sscreen, shader, debug, sel->info.processor,
- stderr, true);
+ si_shader_dump(sscreen, shader, debug, stderr, true);
/* Upload. */
if (!si_shader_binary_upload(sscreen, shader, 0)) {