#include "util/u_memory.h"
#include "util/u_prim.h"
+#include "ac_llvm_cull.h"
static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
{
}
void gfx10_ngg_build_export_prim(struct si_shader_context *ctx,
- LLVMValueRef user_edgeflags[3])
+ LLVMValueRef user_edgeflags[3],
+ LLVMValueRef prim_passthrough)
{
- if (gfx10_is_ngg_passthrough(ctx->shader)) {
+ LLVMBuilderRef builder = ctx->ac.builder;
+
+ if (gfx10_is_ngg_passthrough(ctx->shader) ||
+ ctx->shader->key.opt.ngg_culling) {
ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
{
struct ac_ngg_prim prim = {};
- prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
+ if (prim_passthrough)
+ prim.passthrough = prim_passthrough;
+ else
+ prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
+
+ /* This is only used with NGG culling, which returns the NGG
+ * passthrough prim export encoding.
+ */
+ if (ctx->shader->selector->info.writes_edgeflag) {
+ unsigned all_bits_no_edgeflags = ~SI_NGG_PRIM_EDGE_FLAG_BITS;
+ LLVMValueRef edgeflags = LLVMConstInt(ctx->i32, all_bits_no_edgeflags, 0);
+
+ unsigned num_vertices;
+ ngg_get_vertices_per_prim(ctx, &num_vertices);
+
+ for (unsigned i = 0; i < num_vertices; i++) {
+ unsigned shift = 9 + i*10;
+ LLVMValueRef edge;
+
+ edge = LLVMBuildLoad(builder, user_edgeflags[i], "");
+ edge = LLVMBuildZExt(builder, edge, ctx->i32, "");
+ edge = LLVMBuildShl(builder, edge, LLVMConstInt(ctx->i32, shift, 0), "");
+ edgeflags = LLVMBuildOr(builder, edgeflags, edge, "");
+ }
+ prim.passthrough = LLVMBuildAnd(builder, prim.passthrough, edgeflags, "");
+ }
+
ac_build_export_prim(&ctx->ac, &prim);
}
ac_build_endif(&ctx->ac, 6001);
}
}
+/* LDS layout of ES vertex data for NGG culling. */
+enum {
+ /* Byte 0: Boolean ES thread accepted (unculled) flag, and later the old
+ * ES thread ID. After vertex compaction, compacted ES threads
+ * store the old thread ID here to copy input VGPRs from uncompacted
+ * ES threads.
+ * Byte 1: New ES thread ID, loaded by GS to prepare the prim export value.
+ * Byte 2: TES rel patch ID
+ * Byte 3: Unused
+ */
+ lds_byte0_accept_flag = 0,
+ lds_byte0_old_thread_id = 0,
+ lds_byte1_new_thread_id,
+ lds_byte2_tes_rel_patch_id,
+ lds_byte3_unused,
+
+ lds_packed_data = 0, /* lds_byteN_... */
+
+ lds_pos_x,
+ lds_pos_y,
+ lds_pos_z,
+ lds_pos_w,
+ lds_pos_x_div_w,
+ lds_pos_y_div_w,
+ /* If VS: */
+ lds_vertex_id,
+ lds_instance_id, /* optional */
+ /* If TES: */
+ lds_tes_u = lds_vertex_id,
+ lds_tes_v = lds_instance_id,
+ lds_tes_patch_id, /* optional */
+};
+
+static LLVMValueRef si_build_gep_i8(struct si_shader_context *ctx,
+ LLVMValueRef ptr, unsigned byte_index)
+{
+ assert(byte_index < 4);
+ LLVMTypeRef pi8 = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
+ LLVMValueRef index = LLVMConstInt(ctx->i32, byte_index, 0);
+
+ return LLVMBuildGEP(ctx->ac.builder,
+ LLVMBuildPointerCast(ctx->ac.builder, ptr, pi8, ""),
+ &index, 1, "");
+}
+
static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
{
unsigned lds_vertex_size = 0;
shader->key.mono.u.vs_export_prim_id)
lds_vertex_size = MAX2(lds_vertex_size, 1);
+ if (shader->key.opt.ngg_culling) {
+ if (shader->selector->type == PIPE_SHADER_VERTEX) {
+ STATIC_ASSERT(lds_instance_id + 1 == 9);
+ lds_vertex_size = MAX2(lds_vertex_size, 9);
+ } else {
+ assert(shader->selector->type == PIPE_SHADER_TESS_EVAL);
+
+ if (shader->selector->info.uses_primid ||
+ shader->key.mono.u.vs_export_prim_id) {
+ STATIC_ASSERT(lds_tes_patch_id + 2 == 11);
+ lds_vertex_size = MAX2(lds_vertex_size, 11);
+ } else {
+ STATIC_ASSERT(lds_tes_v + 1 == 9);
+ lds_vertex_size = MAX2(lds_vertex_size, 9);
+ }
+ }
+ }
+
return lds_vertex_size;
}
return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
}
+static void load_bitmasks_2x64(struct si_shader_context *ctx,
+ LLVMValueRef lds_ptr, unsigned dw_offset,
+ LLVMValueRef mask[2], LLVMValueRef *total_bitcount)
+{
+ LLVMBuilderRef builder = ctx->ac.builder;
+ LLVMValueRef ptr64 = LLVMBuildPointerCast(builder, lds_ptr,
+ LLVMPointerType(LLVMArrayType(ctx->i64, 2),
+ AC_ADDR_SPACE_LDS), "");
+ for (unsigned i = 0; i < 2; i++) {
+ LLVMValueRef index = LLVMConstInt(ctx->i32, dw_offset / 2 + i, 0);
+ mask[i] = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ptr64, index), "");
+ }
+
+ /* We get better code if we don't use the 128-bit bitcount. */
+ *total_bitcount = LLVMBuildAdd(builder, ac_build_bit_count(&ctx->ac, mask[0]),
+ ac_build_bit_count(&ctx->ac, mask[1]), "");
+}
+
+/**
+ * Given a total thread count, update total and per-wave thread counts in input SGPRs
+ * and return the per-wave thread count.
+ *
+ * \param new_num_threads Total thread count on the input, per-wave thread count on the output.
+ * \param tg_info tg_info SGPR value
+ * \param tg_info_num_bits the bit size of thread count field in tg_info
+ * \param tg_info_shift the bit offset of the thread count field in tg_info
+ * \param wave_info merged_wave_info SGPR value
+ * \param wave_info_num_bits the bit size of thread count field in merged_wave_info
+ * \param wave_info_shift the bit offset of the thread count field in merged_wave_info
+ */
+static void update_thread_counts(struct si_shader_context *ctx,
+ LLVMValueRef *new_num_threads,
+ LLVMValueRef *tg_info,
+ unsigned tg_info_num_bits,
+ unsigned tg_info_shift,
+ LLVMValueRef *wave_info,
+ unsigned wave_info_num_bits,
+ unsigned wave_info_shift)
+{
+ LLVMBuilderRef builder = ctx->ac.builder;
+
+ /* Update the total thread count. */
+ unsigned tg_info_mask = ~(u_bit_consecutive(0, tg_info_num_bits) << tg_info_shift);
+ *tg_info = LLVMBuildAnd(builder, *tg_info,
+ LLVMConstInt(ctx->i32, tg_info_mask, 0), "");
+ *tg_info = LLVMBuildOr(builder, *tg_info,
+ LLVMBuildShl(builder, *new_num_threads,
+ LLVMConstInt(ctx->i32, tg_info_shift, 0), ""), "");
+
+ /* Update the per-wave thread count. */
+ LLVMValueRef prev_threads = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
+ LLVMConstInt(ctx->i32, ctx->ac.wave_size, 0), "");
+ *new_num_threads = LLVMBuildSub(builder, *new_num_threads, prev_threads, "");
+ *new_num_threads = ac_build_imax(&ctx->ac, *new_num_threads, ctx->i32_0);
+ *new_num_threads = ac_build_imin(&ctx->ac, *new_num_threads,
+ LLVMConstInt(ctx->i32, ctx->ac.wave_size, 0));
+ unsigned wave_info_mask = ~(u_bit_consecutive(0, wave_info_num_bits) << wave_info_shift);
+ *wave_info = LLVMBuildAnd(builder, *wave_info,
+ LLVMConstInt(ctx->i32, wave_info_mask, 0), "");
+ *wave_info = LLVMBuildOr(builder, *wave_info,
+ LLVMBuildShl(builder, *new_num_threads,
+ LLVMConstInt(ctx->i32, wave_info_shift, 0), ""), "");
+}
+
+/**
+ * Cull primitives for NGG VS or TES, then compact vertices, which happens
+ * before the VS or TES main function. Return values for the main function.
+ * Also return the position, which is passed to the shader as an input,
+ * so that we don't compute it twice.
+ */
+void gfx10_emit_ngg_culling_epilogue_4x_wave32(struct ac_shader_abi *abi,
+ unsigned max_outputs,
+ LLVMValueRef *addrs)
+{
+ struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+ struct si_shader *shader = ctx->shader;
+ struct si_shader_selector *sel = shader->selector;
+ struct si_shader_info *info = &sel->info;
+ LLVMBuilderRef builder = ctx->ac.builder;
+
+ assert(shader->key.opt.ngg_culling);
+ assert(shader->key.as_ngg);
+ assert(sel->type == PIPE_SHADER_VERTEX ||
+ (sel->type == PIPE_SHADER_TESS_EVAL && !shader->key.as_es));
+
+ LLVMValueRef position[4] = {};
+ for (unsigned i = 0; i < info->num_outputs; i++) {
+ switch (info->output_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ for (unsigned j = 0; j < 4; j++) {
+ position[j] = LLVMBuildLoad(ctx->ac.builder,
+ addrs[4 * i + j], "");
+ }
+ break;
+ }
+ }
+ assert(position[0]);
+
+ /* Store Position.XYZW into LDS. */
+ LLVMValueRef es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
+ for (unsigned chan = 0; chan < 4; chan++) {
+ LLVMBuildStore(builder, ac_to_integer(&ctx->ac, position[chan]),
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_pos_x + chan, 0)));
+ }
+ /* Store Position.XY / W into LDS. */
+ for (unsigned chan = 0; chan < 2; chan++) {
+ LLVMValueRef val = ac_build_fdiv(&ctx->ac, position[chan], position[3]);
+ LLVMBuildStore(builder, ac_to_integer(&ctx->ac, val),
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_pos_x_div_w + chan, 0)));
+ }
+
+ /* Store VertexID and InstanceID. ES threads will have to load them
+ * from LDS after vertex compaction and use them instead of their own
+ * system values.
+ */
+ bool uses_instance_id = false;
+ bool uses_tes_prim_id = false;
+ LLVMValueRef packed_data = ctx->i32_0;
+
+ if (ctx->type == PIPE_SHADER_VERTEX) {
+ uses_instance_id = sel->info.uses_instanceid ||
+ shader->key.part.vs.prolog.instance_divisor_is_one ||
+ shader->key.part.vs.prolog.instance_divisor_is_fetched;
+
+ LLVMBuildStore(builder, ctx->abi.vertex_id,
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_vertex_id, 0)));
+ if (uses_instance_id) {
+ LLVMBuildStore(builder, ctx->abi.instance_id,
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_instance_id, 0)));
+ }
+ } else {
+ uses_tes_prim_id = sel->info.uses_primid ||
+ shader->key.mono.u.vs_export_prim_id;
+
+ assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+ LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_u)),
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_tes_u, 0)));
+ LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_v)),
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_tes_v, 0)));
+ packed_data = LLVMBuildShl(builder, ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id),
+ LLVMConstInt(ctx->i32, lds_byte2_tes_rel_patch_id * 8, 0), "");
+ if (uses_tes_prim_id) {
+ LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.tes_patch_id),
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_tes_patch_id, 0)));
+ }
+ }
+ /* Initialize the packed data. */
+ LLVMBuildStore(builder, packed_data,
+ ac_build_gep0(&ctx->ac, es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_packed_data, 0)));
+ ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
+
+ LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
+
+ /* Initialize the last 3 gs_ngg_scratch dwords to 0, because we may have less
+ * than 4 waves, but we always read all 4 values. This is where the thread
+ * bitmasks of unculled threads will be stored.
+ *
+ * gs_ngg_scratch layout: esmask[0..3]
+ */
+ ac_build_ifcc(&ctx->ac,
+ LLVMBuildICmp(builder, LLVMIntULT, get_thread_id_in_tg(ctx),
+ LLVMConstInt(ctx->i32, 3, 0), ""), 16101);
+ {
+ LLVMValueRef index = LLVMBuildAdd(builder, tid, ctx->i32_1, "");
+ LLVMBuildStore(builder, ctx->i32_0,
+ ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, index));
+ }
+ ac_build_endif(&ctx->ac, 16101);
+ ac_build_s_barrier(&ctx->ac);
+
+ /* The hardware requires that there are no holes between unculled vertices,
+ * which means we have to pack ES threads, i.e. reduce the ES thread count
+ * and move ES input VGPRs to lower threads. The upside is that varyings
+ * are only fetched and computed for unculled vertices.
+ *
+ * Vertex compaction in GS threads:
+ *
+ * Part 1: Compute the surviving vertex mask in GS threads:
+ * - Compute 4 32-bit surviving vertex masks in LDS. (max 4 waves)
+ * - In GS, notify ES threads whether the vertex survived.
+ * - Barrier
+ * - ES threads will create the mask and store it in LDS.
+ * - Barrier
+ * - Each GS thread loads the vertex masks from LDS.
+ *
+ * Part 2: Compact ES threads in GS threads:
+ * - Compute the prefix sum for all 3 vertices from the masks. These are the new
+ * thread IDs for each vertex within the primitive.
+ * - Write the value of the old thread ID into the LDS address of the new thread ID.
+ * The ES thread will load the old thread ID and use it to load the position, VertexID,
+ * and InstanceID.
+ * - Update vertex indices and null flag in the GS input VGPRs.
+ * - Barrier
+ *
+ * Part 3: Update inputs GPRs
+ * - For all waves, update per-wave thread counts in input SGPRs.
+ * - In ES threads, update the ES input VGPRs (VertexID, InstanceID, TES inputs).
+ */
+
+ LLVMValueRef vtxindex[] = {
+ si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16),
+ si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16),
+ si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16),
+ };
+ LLVMValueRef gs_vtxptr[] = {
+ ngg_nogs_vertex_ptr(ctx, vtxindex[0]),
+ ngg_nogs_vertex_ptr(ctx, vtxindex[1]),
+ ngg_nogs_vertex_ptr(ctx, vtxindex[2]),
+ };
+ es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
+
+ LLVMValueRef gs_accepted = ac_build_alloca(&ctx->ac, ctx->i32, "");
+
+ /* Do culling in GS threads. */
+ ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 16002);
+ {
+ /* Load positions. */
+ LLVMValueRef pos[3][4] = {};
+ for (unsigned vtx = 0; vtx < 3; vtx++) {
+ for (unsigned chan = 0; chan < 4; chan++) {
+ unsigned index;
+ if (chan == 0 || chan == 1)
+ index = lds_pos_x_div_w + chan;
+ else if (chan == 3)
+ index = lds_pos_w;
+ else
+ continue;
+
+ LLVMValueRef addr = ac_build_gep0(&ctx->ac, gs_vtxptr[vtx],
+ LLVMConstInt(ctx->i32, index, 0));
+ pos[vtx][chan] = LLVMBuildLoad(builder, addr, "");
+ pos[vtx][chan] = ac_to_float(&ctx->ac, pos[vtx][chan]);
+ }
+ }
+
+ /* Load the viewport state for small prim culling. */
+ LLVMValueRef vp = ac_build_load_invariant(&ctx->ac,
+ ac_get_arg(&ctx->ac, ctx->small_prim_cull_info),
+ ctx->i32_0);
+ vp = LLVMBuildBitCast(builder, vp, ctx->v4f32, "");
+ LLVMValueRef vp_scale[2], vp_translate[2];
+ vp_scale[0] = ac_llvm_extract_elem(&ctx->ac, vp, 0);
+ vp_scale[1] = ac_llvm_extract_elem(&ctx->ac, vp, 1);
+ vp_translate[0] = ac_llvm_extract_elem(&ctx->ac, vp, 2);
+ vp_translate[1] = ac_llvm_extract_elem(&ctx->ac, vp, 3);
+
+ /* Get the small prim filter precision. */
+ LLVMValueRef small_prim_precision = si_unpack_param(ctx, ctx->vs_state_bits, 7, 4);
+ small_prim_precision = LLVMBuildOr(builder, small_prim_precision,
+ LLVMConstInt(ctx->i32, 0x70, 0), "");
+ small_prim_precision = LLVMBuildShl(builder, small_prim_precision,
+ LLVMConstInt(ctx->i32, 23, 0), "");
+ small_prim_precision = LLVMBuildBitCast(builder, small_prim_precision, ctx->f32, "");
+
+ /* Execute culling code. */
+ struct ac_cull_options options = {};
+ options.cull_front = shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE;
+ options.cull_back = shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE;
+ options.cull_view_xy = shader->key.opt.ngg_culling & SI_NGG_CULL_VIEW_SMALLPRIMS;
+ options.cull_small_prims = options.cull_view_xy;
+ options.cull_zero_area = options.cull_front || options.cull_back;
+ options.cull_w = true;
+
+ /* Tell ES threads whether their vertex survived. */
+ ac_build_ifcc(&ctx->ac, ac_cull_triangle(&ctx->ac, pos, ctx->i1true,
+ vp_scale, vp_translate,
+ small_prim_precision, &options), 16003);
+ {
+ LLVMBuildStore(builder, ctx->ac.i32_1, gs_accepted);
+ for (unsigned vtx = 0; vtx < 3; vtx++) {
+ LLVMBuildStore(builder, ctx->ac.i8_1,
+ si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte0_accept_flag));
+ }
+ }
+ ac_build_endif(&ctx->ac, 16003);
+ }
+ ac_build_endif(&ctx->ac, 16002);
+ ac_build_s_barrier(&ctx->ac);
+
+ gs_accepted = LLVMBuildLoad(builder, gs_accepted, "");
+
+ LLVMValueRef es_accepted = ac_build_alloca(&ctx->ac, ctx->i1, "");
+
+ /* Convert the per-vertex flag to a thread bitmask in ES threads and store it in LDS. */
+ ac_build_ifcc(&ctx->ac, si_is_es_thread(ctx), 16007);
+ {
+ LLVMValueRef es_accepted_flag =
+ LLVMBuildLoad(builder,
+ si_build_gep_i8(ctx, es_vtxptr, lds_byte0_accept_flag), "");
+
+ LLVMValueRef es_accepted_bool = LLVMBuildICmp(builder, LLVMIntNE,
+ es_accepted_flag, ctx->ac.i8_0, "");
+ LLVMValueRef es_mask = ac_get_i1_sgpr_mask(&ctx->ac, es_accepted_bool);
+
+ LLVMBuildStore(builder, es_accepted_bool, es_accepted);
+
+ ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntEQ,
+ tid, ctx->i32_0, ""), 16008);
+ {
+ LLVMBuildStore(builder, es_mask,
+ ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
+ get_wave_id_in_tg(ctx)));
+ }
+ ac_build_endif(&ctx->ac, 16008);
+ }
+ ac_build_endif(&ctx->ac, 16007);
+ ac_build_s_barrier(&ctx->ac);
+
+ /* Load the vertex masks and compute the new ES thread count. */
+ LLVMValueRef es_mask[2], new_num_es_threads, kill_wave;
+ load_bitmasks_2x64(ctx, ctx->gs_ngg_scratch, 0, es_mask, &new_num_es_threads);
+ new_num_es_threads = ac_build_readlane_no_opt_barrier(&ctx->ac, new_num_es_threads, NULL);
+
+ /* ES threads compute their prefix sum, which is the new ES thread ID.
+ * Then they write the value of the old thread ID into the LDS address
+ * of the new thread ID. It will be used it to load input VGPRs from
+ * the old thread's LDS location.
+ */
+ ac_build_ifcc(&ctx->ac, LLVMBuildLoad(builder, es_accepted, ""), 16009);
+ {
+ LLVMValueRef old_id = get_thread_id_in_tg(ctx);
+ LLVMValueRef new_id = ac_prefix_bitcount_2x64(&ctx->ac, es_mask, old_id);
+
+ LLVMBuildStore(builder, LLVMBuildTrunc(builder, old_id, ctx->i8, ""),
+ si_build_gep_i8(ctx, ngg_nogs_vertex_ptr(ctx, new_id),
+ lds_byte0_old_thread_id));
+ LLVMBuildStore(builder, LLVMBuildTrunc(builder, new_id, ctx->i8, ""),
+ si_build_gep_i8(ctx, es_vtxptr, lds_byte1_new_thread_id));
+ }
+ ac_build_endif(&ctx->ac, 16009);
+
+ /* Kill waves that have inactive threads. */
+ kill_wave = LLVMBuildICmp(builder, LLVMIntULE,
+ ac_build_imax(&ctx->ac, new_num_es_threads, ngg_get_prim_cnt(ctx)),
+ LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
+ LLVMConstInt(ctx->i32, ctx->ac.wave_size, 0), ""), "");
+ ac_build_ifcc(&ctx->ac, kill_wave, 19202);
+ {
+ /* If we are killing wave 0, send that there are no primitives
+ * in this threadgroup.
+ */
+ ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
+ ctx->i32_0, ctx->i32_0);
+ ac_build_s_endpgm(&ctx->ac);
+ }
+ ac_build_endif(&ctx->ac, 19202);
+ ac_build_s_barrier(&ctx->ac);
+
+ /* Send the final vertex and primitive counts. */
+ ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
+ new_num_es_threads, ngg_get_prim_cnt(ctx));
+
+ /* Update thread counts in SGPRs. */
+ LLVMValueRef new_gs_tg_info = ac_get_arg(&ctx->ac, ctx->gs_tg_info);
+ LLVMValueRef new_merged_wave_info = ac_get_arg(&ctx->ac, ctx->merged_wave_info);
+
+ /* This also converts the thread count from the total count to the per-wave count. */
+ update_thread_counts(ctx, &new_num_es_threads, &new_gs_tg_info, 9, 12,
+ &new_merged_wave_info, 8, 0);
+
+ /* Update vertex indices in VGPR0 (same format as NGG passthrough). */
+ LLVMValueRef new_vgpr0 = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
+
+ /* Set the null flag at the beginning (culled), and then
+ * overwrite it for accepted primitives.
+ */
+ LLVMBuildStore(builder, LLVMConstInt(ctx->i32, 1u << 31, 0), new_vgpr0);
+
+ /* Get vertex indices after vertex compaction. */
+ ac_build_ifcc(&ctx->ac, LLVMBuildTrunc(builder, gs_accepted, ctx->i1, ""), 16011);
+ {
+ struct ac_ngg_prim prim = {};
+ prim.num_vertices = 3;
+ prim.isnull = ctx->i1false;
+
+ for (unsigned vtx = 0; vtx < 3; vtx++) {
+ prim.index[vtx] =
+ LLVMBuildLoad(builder,
+ si_build_gep_i8(ctx, gs_vtxptr[vtx],
+ lds_byte1_new_thread_id), "");
+ prim.index[vtx] = LLVMBuildZExt(builder, prim.index[vtx], ctx->i32, "");
+ prim.edgeflag[vtx] = ngg_get_initial_edgeflag(ctx, vtx);
+ }
+
+ /* Set the new GS input VGPR. */
+ LLVMBuildStore(builder, ac_pack_prim_export(&ctx->ac, &prim), new_vgpr0);
+ }
+ ac_build_endif(&ctx->ac, 16011);
+
+ if (gfx10_ngg_export_prim_early(shader))
+ gfx10_ngg_build_export_prim(ctx, NULL, LLVMBuildLoad(builder, new_vgpr0, ""));
+
+ /* Set the new ES input VGPRs. */
+ LLVMValueRef es_data[4];
+ LLVMValueRef old_thread_id = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
+
+ for (unsigned i = 0; i < 4; i++)
+ es_data[i] = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
+
+ ac_build_ifcc(&ctx->ac, LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, tid,
+ new_num_es_threads, ""), 16012);
+ {
+ LLVMValueRef old_id, old_es_vtxptr, tmp;
+
+ /* Load ES input VGPRs from the ES thread before compaction. */
+ old_id = LLVMBuildLoad(builder,
+ si_build_gep_i8(ctx, es_vtxptr, lds_byte0_old_thread_id), "");
+ old_id = LLVMBuildZExt(builder, old_id, ctx->i32, "");
+
+ LLVMBuildStore(builder, old_id, old_thread_id);
+ old_es_vtxptr = ngg_nogs_vertex_ptr(ctx, old_id);
+
+ for (unsigned i = 0; i < 2; i++) {
+ tmp = LLVMBuildLoad(builder,
+ ac_build_gep0(&ctx->ac, old_es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_vertex_id + i, 0)), "");
+ LLVMBuildStore(builder, tmp, es_data[i]);
+ }
+
+ if (ctx->type == PIPE_SHADER_TESS_EVAL) {
+ tmp = LLVMBuildLoad(builder,
+ si_build_gep_i8(ctx, old_es_vtxptr,
+ lds_byte2_tes_rel_patch_id), "");
+ tmp = LLVMBuildZExt(builder, tmp, ctx->i32, "");
+ LLVMBuildStore(builder, tmp, es_data[2]);
+
+ if (uses_tes_prim_id) {
+ tmp = LLVMBuildLoad(builder,
+ ac_build_gep0(&ctx->ac, old_es_vtxptr,
+ LLVMConstInt(ctx->i32, lds_tes_patch_id, 0)), "");
+ LLVMBuildStore(builder, tmp, es_data[3]);
+ }
+ }
+ }
+ ac_build_endif(&ctx->ac, 16012);
+
+ /* Return values for the main function. */
+ LLVMValueRef ret = ctx->return_value;
+ LLVMValueRef val;
+
+ ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_gs_tg_info, 2, "");
+ ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_merged_wave_info, 3, "");
+ if (ctx->type == PIPE_SHADER_TESS_EVAL)
+ ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 4);
+
+ ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
+ 8 + SI_SGPR_RW_BUFFERS);
+ ret = si_insert_input_ptr(ctx, ret,
+ ctx->bindless_samplers_and_images,
+ 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
+ ret = si_insert_input_ptr(ctx, ret,
+ ctx->const_and_shader_buffers,
+ 8 + SI_SGPR_CONST_AND_SHADER_BUFFERS);
+ ret = si_insert_input_ptr(ctx, ret,
+ ctx->samplers_and_images,
+ 8 + SI_SGPR_SAMPLERS_AND_IMAGES);
+ ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits,
+ 8 + SI_SGPR_VS_STATE_BITS);
+
+ if (ctx->type == PIPE_SHADER_VERTEX) {
+ ret = si_insert_input_ptr(ctx, ret, ctx->args.base_vertex,
+ 8 + SI_SGPR_BASE_VERTEX);
+ ret = si_insert_input_ptr(ctx, ret, ctx->args.start_instance,
+ 8 + SI_SGPR_START_INSTANCE);
+ ret = si_insert_input_ptr(ctx, ret, ctx->args.draw_id,
+ 8 + SI_SGPR_DRAWID);
+ ret = si_insert_input_ptr(ctx, ret, ctx->vertex_buffers,
+ 8 + SI_VS_NUM_USER_SGPR);
+ } else {
+ assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+ ret = si_insert_input_ptr(ctx, ret, ctx->tcs_offchip_layout,
+ 8 + SI_SGPR_TES_OFFCHIP_LAYOUT);
+ ret = si_insert_input_ptr(ctx, ret, ctx->tes_offchip_addr,
+ 8 + SI_SGPR_TES_OFFCHIP_ADDR);
+ }
+
+ unsigned vgpr;
+ if (ctx->type == PIPE_SHADER_VERTEX)
+ vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR + 1;
+ else
+ vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
+
+ val = LLVMBuildLoad(builder, new_vgpr0, "");
+ ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
+ vgpr++, "");
+ vgpr++; /* gs_vtx23_offset */
+
+ ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
+ ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
+ vgpr++; /* gs_vtx45_offset */
+
+ if (ctx->type == PIPE_SHADER_VERTEX) {
+ val = LLVMBuildLoad(builder, es_data[0], "");
+ ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
+ vgpr++, ""); /* VGPR5 - VertexID */
+ vgpr += 2;
+ if (uses_instance_id) {
+ val = LLVMBuildLoad(builder, es_data[1], "");
+ ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
+ vgpr++, ""); /* VGPR8 - InstanceID */
+ } else {
+ vgpr++;
+ }
+ } else {
+ assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+ unsigned num_vgprs = uses_tes_prim_id ? 4 : 3;
+ for (unsigned i = 0; i < num_vgprs; i++) {
+ val = LLVMBuildLoad(builder, es_data[i], "");
+ ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
+ vgpr++, "");
+ }
+ if (num_vgprs == 3)
+ vgpr++;
+ }
+ /* Return the old thread ID. */
+ val = LLVMBuildLoad(builder, old_thread_id, "");
+ ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
+
+ /* These two also use LDS. */
+ if (sel->info.writes_edgeflag ||
+ (ctx->type == PIPE_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id))
+ ac_build_s_barrier(&ctx->ac);
+
+ ctx->return_value = ret;
+}
+
/**
* Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
*/
}
bool unterminated_es_if_block =
- gfx10_is_ngg_passthrough(ctx->shader) &&
+ !sel->so.num_outputs &&
+ !sel->info.writes_edgeflag &&
!ctx->screen->use_ngg_streamout && /* no query buffer */
(ctx->type != PIPE_SHADER_VERTEX ||
!ctx->shader->key.mono.u.vs_export_prim_id);
LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
LLVMValueRef is_es_thread = si_is_es_thread(ctx);
- LLVMValueRef vtxindex[] = {
- si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16),
- si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16),
- si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16),
- };
+ LLVMValueRef vtxindex[3];
+
+ if (ctx->shader->key.opt.ngg_culling) {
+ vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 9);
+ vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 10, 9);
+ vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 20, 9);
+ } else {
+ vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
+ vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
+ vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
+ }
/* Determine the number of vertices per primitive. */
unsigned num_vertices;
/* Build the primitive export. */
if (!gfx10_ngg_export_prim_early(ctx->shader)) {
assert(!unterminated_es_if_block);
- gfx10_ngg_build_export_prim(ctx, user_edgeflags);
+ gfx10_ngg_build_export_prim(ctx, user_edgeflags, NULL);
}
/* Export per-vertex data (positions and parameters). */
/* Unconditionally (re-)load the values for proper SSA form. */
for (i = 0; i < info->num_outputs; i++) {
- for (unsigned j = 0; j < 4; j++) {
- outputs[i].values[j] =
- LLVMBuildLoad(builder,
- addrs[4 * i + j],
- "");
+ /* If the NGG cull shader part computed the position, don't
+ * use the position from the current shader part. Instead,
+ * load it from LDS.
+ */
+ if (info->output_semantic_name[i] == TGSI_SEMANTIC_POSITION &&
+ ctx->shader->key.opt.ngg_culling) {
+ vertex_ptr = ngg_nogs_vertex_ptr(ctx,
+ ac_get_arg(&ctx->ac, ctx->ngg_old_thread_id));
+
+ for (unsigned j = 0; j < 4; j++) {
+ tmp = LLVMConstInt(ctx->i32, lds_pos_x + j, 0);
+ tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
+ tmp = LLVMBuildLoad(builder, tmp, "");
+ outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
+ }
+ } else {
+ for (unsigned j = 0; j < 4; j++) {
+ outputs[i].values[j] =
+ LLVMBuildLoad(builder,
+ addrs[4 * i + j], "");
+ }
}
}
}
static void declare_vs_input_vgprs(struct si_shader_context *ctx,
- unsigned *num_prolog_vgprs)
+ unsigned *num_prolog_vgprs,
+ bool ngg_cull_shader)
{
struct si_shader *shader = ctx->shader;
}
if (!shader->is_gs_copy_shader) {
+ if (shader->key.opt.ngg_culling && !ngg_cull_shader) {
+ ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
+ &ctx->ngg_old_thread_id);
+ }
+
/* Vertex load indices. */
if (shader->selector->info.num_inputs) {
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
}
}
-static void declare_tes_input_vgprs(struct si_shader_context *ctx)
+static void declare_tes_input_vgprs(struct si_shader_context *ctx, bool ngg_cull_shader)
{
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
+
+ if (ctx->shader->key.opt.ngg_culling && !ngg_cull_shader) {
+ ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
+ &ctx->ngg_old_thread_id);
+ }
}
enum {
ac_add_arg(args, file, registers, type, arg);
}
-void si_create_function(struct si_shader_context *ctx)
+void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader)
{
struct si_shader *shader = ctx->shader;
LLVMTypeRef returns[AC_MAX_ARGS];
declare_vs_blit_inputs(ctx, vs_blit_property);
/* VGPRs */
- declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
+ declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
break;
}
}
/* VGPRs */
- declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
+ declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
/* Return values */
if (shader->key.opt.vs_as_prim_discard_cs) {
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
if (ctx->type == PIPE_SHADER_VERTEX) {
- declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
+ declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
/* LS return values are inputs to the TCS main shader part. */
for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
- ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
+ ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
+ &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
declare_global_desc_pointers(ctx);
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
if (ctx->type == PIPE_SHADER_VERTEX) {
- declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
+ declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
} else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
- declare_tes_input_vgprs(ctx);
+ declare_tes_input_vgprs(ctx, ngg_cull_shader);
}
- if (ctx->shader->key.as_es &&
+ if ((ctx->shader->key.as_es || ngg_cull_shader) &&
(ctx->type == PIPE_SHADER_VERTEX ||
ctx->type == PIPE_SHADER_TESS_EVAL)) {
- unsigned num_user_sgprs;
+ unsigned num_user_sgprs, num_vgprs;
+ /* For the NGG cull shader, add 1 SGPR to hold the vertex buffer pointer. */
if (ctx->type == PIPE_SHADER_VERTEX)
- num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
+ num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader;
else
num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
+ /* The NGG cull shader has to return all 9 VGPRs + the old thread ID.
+ *
+ * The normal merged ESGS shader only has to return the 5 VGPRs
+ * for the GS stage.
+ */
+ num_vgprs = ngg_cull_shader ? 10 : 5;
+
/* ES return values are inputs to GS. */
for (i = 0; i < 8 + num_user_sgprs; i++)
returns[num_returns++] = ctx->i32; /* SGPRs */
- for (i = 0; i < 5; i++)
+ for (i = 0; i < num_vgprs; i++)
returns[num_returns++] = ctx->f32; /* VGPRs */
}
break;
}
/* VGPRs */
- declare_tes_input_vgprs(ctx);
+ declare_tes_input_vgprs(ctx, ngg_cull_shader);
break;
case PIPE_SHADER_GEOMETRY:
return;
}
- si_llvm_create_func(ctx, "main", returns, num_returns,
- si_get_max_workgroup_size(shader));
+ si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main",
+ returns, num_returns, si_get_max_workgroup_size(shader));
/* Reserve register locations for VGPR inputs the PS prolog may need. */
if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
!key->as_es && !key->as_ls) {
fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
+ if (shader_type != PIPE_SHADER_GEOMETRY)
+ fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling);
}
}
}
static bool si_build_main_function(struct si_shader_context *ctx,
- struct nir_shader *nir, bool free_nir)
+ struct nir_shader *nir, bool free_nir,
+ bool ngg_cull_shader)
{
struct si_shader *shader = ctx->shader;
struct si_shader_selector *sel = shader->selector;
ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
else if (shader->key.opt.vs_as_prim_discard_cs)
ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
+ else if (ngg_cull_shader)
+ ctx->abi.emit_outputs = gfx10_emit_ngg_culling_epilogue_4x_wave32;
else if (shader->key.as_ngg)
ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
else
if (shader->key.as_es)
ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
+ else if (ngg_cull_shader)
+ ctx->abi.emit_outputs = gfx10_emit_ngg_culling_epilogue_4x_wave32;
else if (shader->key.as_ngg)
ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
else
return false;
}
- si_create_function(ctx);
+ si_create_function(ctx, ngg_cull_shader);
if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
si_preload_esgs_ring(ctx);
if (sel->so.num_outputs)
scratch_size = 44;
+ assert(!ctx->gs_ngg_scratch);
LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
/* This is really only needed when streamout and / or vertex
* compaction is enabled.
*/
- if (sel->so.num_outputs && !ctx->gs_ngg_scratch) {
+ if (!ctx->gs_ngg_scratch &&
+ (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
if (!shader->is_monolithic ||
(ctx->type == PIPE_SHADER_TESS_EVAL &&
- (shader->key.as_ngg && !shader->key.as_es)))
+ shader->key.as_ngg && !shader->key.as_es &&
+ !shader->key.opt.ngg_culling))
ac_init_exec_full_mask(&ctx->ac);
if ((ctx->type == PIPE_SHADER_VERTEX ||
ctx->type == PIPE_SHADER_TESS_EVAL) &&
- shader->key.as_ngg && !shader->key.as_es) {
+ shader->key.as_ngg && !shader->key.as_es &&
+ !shader->key.opt.ngg_culling) {
gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
/* Build the primitive export at the beginning
* of the shader if possible.
*/
if (gfx10_ngg_export_prim_early(shader))
- gfx10_ngg_build_export_prim(ctx, NULL);
+ gfx10_ngg_build_export_prim(ctx, NULL, NULL);
}
if (ctx->type == PIPE_SHADER_TESS_CTRL ||
*
* \param info Shader info of the vertex shader.
* \param num_input_sgprs Number of input SGPRs for the vertex shader.
+ * \param has_old_ Whether the preceding shader part is the NGG cull shader.
* \param prolog_key Key of the VS prolog
* \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
* \param key Output shader part key.
*/
static void si_get_vs_prolog_key(const struct si_shader_info *info,
unsigned num_input_sgprs,
+ bool ngg_cull_shader,
const struct si_vs_prolog_bits *prolog_key,
struct si_shader *shader_out,
union si_shader_part_key *key)
key->vs_prolog.as_es = shader_out->key.as_es;
key->vs_prolog.as_ngg = shader_out->key.as_ngg;
+ if (!ngg_cull_shader)
+ key->vs_prolog.has_ngg_cull_inputs = !!shader_out->key.opt.ngg_culling;
+
if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
key->vs_prolog.as_ls = 1;
key->vs_prolog.num_merged_next_stage_vgprs = 2;
shader->info.uses_instanceid = sel->info.uses_instanceid;
- if (!si_build_main_function(&ctx, nir, free_nir)) {
+ LLVMValueRef ngg_cull_main_fn = NULL;
+ if (ctx.shader->key.opt.ngg_culling) {
+ if (!si_build_main_function(&ctx, nir, false, true)) {
+ si_llvm_dispose(&ctx);
+ return -1;
+ }
+ ngg_cull_main_fn = ctx.main_fn;
+ ctx.main_fn = NULL;
+ /* Re-set the IR. */
+ si_llvm_context_set_ir(&ctx, shader);
+ }
+
+ if (!si_build_main_function(&ctx, nir, free_nir, false)) {
si_llvm_dispose(&ctx);
return -1;
}
if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
- LLVMValueRef parts[2];
+ LLVMValueRef parts[4];
+ unsigned num_parts = 0;
bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog);
-
- parts[1] = ctx.main_fn;
+ LLVMValueRef main_fn = ctx.main_fn;
+
+ if (ngg_cull_main_fn) {
+ if (need_prolog) {
+ union si_shader_part_key prolog_key;
+ si_get_vs_prolog_key(&sel->info,
+ shader->info.num_input_sgprs,
+ true,
+ &shader->key.part.vs.prolog,
+ shader, &prolog_key);
+ prolog_key.vs_prolog.is_monolithic = true;
+ si_build_vs_prolog_function(&ctx, &prolog_key);
+ parts[num_parts++] = ctx.main_fn;
+ }
+ parts[num_parts++] = ngg_cull_main_fn;
+ }
if (need_prolog) {
union si_shader_part_key prolog_key;
si_get_vs_prolog_key(&sel->info,
shader->info.num_input_sgprs,
+ false,
&shader->key.part.vs.prolog,
shader, &prolog_key);
prolog_key.vs_prolog.is_monolithic = true;
si_build_vs_prolog_function(&ctx, &prolog_key);
- parts[0] = ctx.main_fn;
+ parts[num_parts++] = ctx.main_fn;
}
+ parts[num_parts++] = main_fn;
- si_build_wrapper_function(&ctx, parts + !need_prolog,
- 1 + need_prolog, need_prolog, 0);
+ si_build_wrapper_function(&ctx, parts, num_parts,
+ need_prolog ? 1 : 0, 0);
if (ctx.shader->key.opt.vs_as_prim_discard_cs)
si_build_prim_discard_compute_shader(&ctx);
+ } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_EVAL &&
+ ngg_cull_main_fn) {
+ LLVMValueRef parts[2];
+
+ parts[0] = ngg_cull_main_fn;
+ parts[1] = ctx.main_fn;
+
+ si_build_wrapper_function(&ctx, parts, 2, 0, 0);
} else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
if (sscreen->info.chip_class >= GFX9) {
struct si_shader_selector *ls = shader->key.part.tcs.ls;
shader_ls.is_monolithic = true;
si_llvm_context_set_ir(&ctx, &shader_ls);
- if (!si_build_main_function(&ctx, nir, free_nir)) {
+ if (!si_build_main_function(&ctx, nir, free_nir, false)) {
si_llvm_dispose(&ctx);
return -1;
}
union si_shader_part_key vs_prolog_key;
si_get_vs_prolog_key(&ls->info,
shader_ls.info.num_input_sgprs,
+ false,
&shader->key.part.tcs.ls_prolog,
shader, &vs_prolog_key);
vs_prolog_key.vs_prolog.is_monolithic = true;
shader_es.is_monolithic = true;
si_llvm_context_set_ir(&ctx, &shader_es);
- if (!si_build_main_function(&ctx, nir, free_nir)) {
+ if (!si_build_main_function(&ctx, nir, free_nir, false)) {
si_llvm_dispose(&ctx);
return -1;
}
union si_shader_part_key vs_prolog_key;
si_get_vs_prolog_key(&es->info,
shader_es.info.num_input_sgprs,
+ false,
&shader->key.part.gs.vs_prolog,
shader, &vs_prolog_key);
vs_prolog_key.vs_prolog.is_monolithic = true;
LLVMValueRef ret, func;
int num_returns, i;
unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
- unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
+ unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4 +
+ (key->vs_prolog.has_ngg_cull_inputs ? 1 : 0);
struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
- struct ac_arg input_vgpr_param[9];
- LLVMValueRef input_vgprs[9];
+ struct ac_arg input_vgpr_param[13];
+ LLVMValueRef input_vgprs[13];
unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
num_input_vgprs;
unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
/* Get the prolog. */
union si_shader_part_key prolog_key;
- si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
+ si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs, false,
key, shader, &prolog_key);
shader->prolog =