radeonsi/gfx10: generate geometry shaders for NGG
authorNicolai Hähnle <nicolai.haehnle@amd.com>
Wed, 23 May 2018 20:20:15 +0000 (22:20 +0200)
committerMarek Olšák <marek.olsak@amd.com>
Wed, 3 Jul 2019 19:51:12 +0000 (15:51 -0400)
Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
src/gallium/drivers/radeonsi/gfx10_shader_ngg.c
src/gallium/drivers/radeonsi/si_shader.c
src/gallium/drivers/radeonsi/si_shader_internal.h
src/gallium/drivers/radeonsi/si_state_shaders.c

index f5774b217ef1804dd7ec5b7685ec03d22b21cd3e..014fe1f96c92259ff8afa222f709e59a7d61b89a 100644 (file)
 #include "sid.h"
 
 #include "util/u_memory.h"
+#include "util/u_prim.h"
 
 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
 {
        return si_unpack_param(ctx, ctx->param_merged_wave_info, 24, 4);
 }
 
+static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
+{
+       return si_unpack_param(ctx, ctx->param_merged_wave_info, 28, 4);
+}
+
+static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
+{
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef tmp;
+       tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
+                          LLVMConstInt(ctx->ac.i32, 64, false), "");
+       return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
+}
+
 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
 {
        return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
@@ -263,3 +278,376 @@ void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
 
        FREE(outputs);
 }
+
+static LLVMValueRef
+ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
+{
+       const struct si_shader_selector *sel = ctx->shader->selector;
+       const struct tgsi_shader_info *info = &sel->info;
+
+       LLVMTypeRef elements[2] = {
+               LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
+               LLVMArrayType(ctx->ac.i8, 4),
+       };
+       LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
+       type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
+       return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
+}
+
+/**
+ * Return a pointer to the LDS storage reserved for the N'th vertex, where N
+ * is in emit order; that is:
+ * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
+ * - during vertex emit, i.e. while the API GS shader invocation is running,
+ *   N = threadidx * gs_max_out_vertices + emitidx
+ *
+ * Goals of the LDS memory layout:
+ * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
+ *    in uniform control flow
+ * 2. Eliminate bank conflicts on read for export if, additionally, there is no
+ *    culling
+ * 3. Agnostic to the number of waves (since we don't know it before compiling)
+ * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
+ * 5. Avoid wasting memory.
+ *
+ * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
+ * layout, elimination of bank conflicts requires that each vertex occupy an
+ * odd number of dwords. We use the additional dword to store the output stream
+ * index as well as a flag to indicate whether this vertex ends a primitive
+ * for rasterization.
+ *
+ * Swizzling is required to satisfy points 1 and 2 simultaneously.
+ *
+ * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
+ * Indices are swizzled in groups of 32, which ensures point 1 without
+ * disturbing point 2.
+ *
+ * \return an LDS pointer to type {[N x i32], [4 x i8]}
+ */
+static LLVMValueRef
+ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
+{
+       struct si_shader_selector *sel = ctx->shader->selector;
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
+
+       /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
+       unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
+       if (write_stride_2exp) {
+               LLVMValueRef row =
+                       LLVMBuildLShr(builder, vertexidx,
+                                     LLVMConstInt(ctx->ac.i32, 5, false), "");
+               LLVMValueRef swizzle =
+                       LLVMBuildAnd(builder, row,
+                                    LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
+                                                 false), "");
+               vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
+       }
+
+       return ac_build_gep0(&ctx->ac, storage, vertexidx);
+}
+
+static LLVMValueRef
+ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
+                      LLVMValueRef emitidx)
+{
+       struct si_shader_selector *sel = ctx->shader->selector;
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef tmp;
+
+       tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
+       tmp = LLVMBuildMul(builder, tmp, gsthread, "");
+       const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
+       return ngg_gs_vertex_ptr(ctx, vertexidx);
+}
+
+void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
+                             unsigned stream,
+                             LLVMValueRef *addrs)
+{
+       const struct si_shader_selector *sel = ctx->shader->selector;
+       const struct tgsi_shader_info *info = &sel->info;
+       LLVMBuilderRef builder = ctx->ac.builder;
+       struct lp_build_if_state if_state;
+       LLVMValueRef tmp;
+       const LLVMValueRef vertexidx =
+               LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
+
+       /* If this thread has already emitted the declared maximum number of
+        * vertices, skip the write: excessive vertex emissions are not
+        * supposed to have any effect.
+        */
+       const LLVMValueRef can_emit =
+               LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
+                             LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
+
+       tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
+       tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
+       LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
+
+       lp_build_if(&if_state, &ctx->gallivm, can_emit);
+
+       const LLVMValueRef vertexptr =
+               ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
+       unsigned out_idx = 0;
+       for (unsigned i = 0; i < info->num_outputs; i++) {
+               for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
+                       if (!(info->output_usagemask[i] & (1 << chan)) ||
+                           ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
+                               continue;
+
+                       LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
+                       LLVMValueRef gep_idx[3] = {
+                               ctx->ac.i32_0, /* implied C-style array */
+                               ctx->ac.i32_0, /* first entry of struct */
+                               LLVMConstInt(ctx->ac.i32, out_idx, false),
+                       };
+                       LLVMValueRef ptr = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
+
+                       out_val = ac_to_integer(&ctx->ac, out_val);
+                       LLVMBuildStore(builder, out_val, ptr);
+               }
+       }
+       assert(out_idx * 4 == sel->gsvs_vertex_size);
+
+       /* Determine and store whether this vertex completed a primitive. */
+       const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
+
+       tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
+       const LLVMValueRef iscompleteprim =
+               LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
+
+       tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
+       LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
+
+       LLVMValueRef gep_idx[3] = {
+               ctx->ac.i32_0, /* implied C-style array */
+               ctx->ac.i32_1, /* second struct entry */
+               LLVMConstInt(ctx->ac.i32, stream, false),
+       };
+       const LLVMValueRef primflagptr =
+               LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
+
+       tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
+       LLVMBuildStore(builder, tmp, primflagptr);
+
+       lp_build_endif(&if_state);
+}
+
+void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
+{
+       const struct si_shader_selector *sel = ctx->shader->selector;
+       const struct tgsi_shader_info *info = &sel->info;
+       const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
+       LLVMValueRef tmp, tmp2;
+
+       /* Zero out remaining (non-emitted) primitive flags.
+        *
+        * Note: Alternatively, we could pass the relevant gs_next_vertex to
+        *       the emit threads via LDS. This is likely worse in the expected
+        *       typical case where each GS thread emits the full set of
+        *       vertices.
+        */
+       for (unsigned stream = 0; stream < 4; ++stream) {
+               if (!info->num_stream_output_components[stream])
+                       continue;
+
+               const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
+
+               ac_build_bgnloop(&ctx->ac, 5100);
+
+               const LLVMValueRef vertexidx =
+                       LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
+               tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
+                       LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
+               ac_build_ifcc(&ctx->ac, tmp, 5101);
+               ac_build_break(&ctx->ac);
+               ac_build_endif(&ctx->ac, 5101);
+
+               tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
+               LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
+
+               tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
+               LLVMValueRef gep_idx[3] = {
+                       ctx->ac.i32_0, /* implied C-style array */
+                       ctx->ac.i32_1, /* second entry of struct */
+                       LLVMConstInt(ctx->ac.i32, stream, false),
+               };
+               tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
+               LLVMBuildStore(builder, i8_0, tmp);
+
+               ac_build_endloop(&ctx->ac, 5100);
+       }
+
+       lp_build_endif(&ctx->merged_wrap_if_state);
+
+       ac_build_s_barrier(&ctx->ac);
+
+       const LLVMValueRef tid = get_thread_id_in_tg(ctx);
+       LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
+
+       /* TODO: streamout */
+
+       /* TODO: culling */
+
+       /* Determine vertex liveness. */
+       LLVMValueRef vertliveptr = lp_build_alloca(&ctx->gallivm, ctx->ac.i1, "vertexlive");
+
+       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
+       ac_build_ifcc(&ctx->ac, tmp, 5120);
+       {
+               for (unsigned i = 0; i < verts_per_prim; ++i) {
+                       const LLVMValueRef primidx =
+                               LLVMBuildAdd(builder, tid,
+                                            LLVMConstInt(ctx->ac.i32, i, false), "");
+
+                       if (i > 0) {
+                               tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
+                               ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
+                       }
+
+                       /* Load primitive liveness */
+                       tmp = ngg_gs_vertex_ptr(ctx, primidx);
+                       LLVMValueRef gep_idx[3] = {
+                               ctx->ac.i32_0, /* implicit C-style array */
+                               ctx->ac.i32_1, /* second value of struct */
+                               ctx->ac.i32_0, /* stream 0 */
+                       };
+                       tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
+                       tmp = LLVMBuildLoad(builder, tmp, "");
+                       const LLVMValueRef primlive =
+                               LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+
+                       tmp = LLVMBuildLoad(builder, vertliveptr, "");
+                       tmp = LLVMBuildOr(builder, tmp, primlive, ""),
+                       LLVMBuildStore(builder, tmp, vertliveptr);
+
+                       if (i > 0)
+                               ac_build_endif(&ctx->ac, 5121 + i);
+               }
+       }
+       ac_build_endif(&ctx->ac, 5120);
+
+       /* Inclusive scan addition across the current wave. */
+       LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
+       struct ac_wg_scan vertlive_scan = {};
+       vertlive_scan.op = nir_op_iadd;
+       vertlive_scan.enable_reduce = true;
+       vertlive_scan.enable_exclusive = true;
+       vertlive_scan.src = vertlive;
+       vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
+       vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
+       vertlive_scan.numwaves = get_tgsize(ctx);
+       vertlive_scan.maxwaves = 8;
+
+       ac_build_wg_scan(&ctx->ac, &vertlive_scan);
+
+       /* Skip all exports (including index exports) when possible. At least on
+        * early gfx10 revisions this is also to avoid hangs.
+        */
+       LLVMValueRef have_exports =
+               LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
+       num_emit_threads =
+               LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
+
+       /* Allocate export space. Send this message as early as possible, to
+        * hide the latency of the SQ <-> SPI roundtrip.
+        *
+        * Note: We could consider compacting primitives for export as well.
+        *       PA processes 1 non-null prim / clock, but it fetches 4 DW of
+        *       prim data per clock and skips null primitives at no additional
+        *       cost. So compacting primitives can only be beneficial when
+        *       there are 4 or more contiguous null primitives in the export
+        *       (in the common case of single-dword prim exports).
+        */
+       build_sendmsg_gs_alloc_req(ctx, vertlive_scan.result_reduce, num_emit_threads);
+
+       /* Setup the reverse vertex compaction permutation. We re-use stream 1
+        * of the primitive liveness flags, relying on the fact that each
+        * threadgroup can have at most 256 threads. */
+       ac_build_ifcc(&ctx->ac, vertlive, 5130);
+       {
+               tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
+               LLVMValueRef gep_idx[3] = {
+                       ctx->ac.i32_0, /* implicit C-style array */
+                       ctx->ac.i32_1, /* second value of struct */
+                       ctx->ac.i32_1, /* stream 1 */
+               };
+               tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
+               tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
+               LLVMBuildStore(builder, tmp2, tmp);
+       }
+       ac_build_endif(&ctx->ac, 5130);
+
+       ac_build_s_barrier(&ctx->ac);
+
+       /* Export primitive data */
+       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
+       ac_build_ifcc(&ctx->ac, tmp, 5140);
+       {
+               struct ngg_prim prim = {};
+               prim.num_vertices = verts_per_prim;
+
+               tmp = ngg_gs_vertex_ptr(ctx, tid);
+               LLVMValueRef gep_idx[3] = {
+                       ctx->ac.i32_0, /* implicit C-style array */
+                       ctx->ac.i32_1, /* second value of struct */
+                       ctx->ac.i32_0, /* primflag */
+               };
+               tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
+               tmp = LLVMBuildLoad(builder, tmp, "");
+               prim.isnull = LLVMBuildICmp(builder, LLVMIntEQ, tmp,
+                                           LLVMConstInt(ctx->ac.i8, 0, false), "");
+
+               for (unsigned i = 0; i < verts_per_prim; ++i) {
+                       prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
+                               LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
+                       prim.edgeflag[i] = ctx->ac.i1false;
+               }
+
+               build_export_prim(ctx, &prim);
+       }
+       ac_build_endif(&ctx->ac, 5140);
+
+       /* Export position and parameter data */
+       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
+       ac_build_ifcc(&ctx->ac, tmp, 5145);
+       {
+               struct si_shader_output_values *outputs = NULL;
+               outputs = MALLOC(info->num_outputs * sizeof(outputs[0]));
+
+               tmp = ngg_gs_vertex_ptr(ctx, tid);
+               LLVMValueRef gep_idx[3] = {
+                       ctx->ac.i32_0, /* implicit C-style array */
+                       ctx->ac.i32_1, /* second value of struct */
+                       ctx->ac.i32_1, /* stream 1: source data index */
+               };
+               tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
+               tmp = LLVMBuildLoad(builder, tmp, "");
+               tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
+               const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
+
+               unsigned out_idx = 0;
+               gep_idx[1] = ctx->ac.i32_0;
+               for (unsigned i = 0; i < info->num_outputs; i++) {
+                       outputs[i].semantic_name = info->output_semantic_name[i];
+                       outputs[i].semantic_index = info->output_semantic_index[i];
+
+                       for (unsigned j = 0; j < 4; j++, out_idx++) {
+                               gep_idx[2] = LLVMConstInt(ctx->ac.i32, out_idx, false);
+                               tmp = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
+                               tmp = LLVMBuildLoad(builder, tmp, "");
+                               outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
+                               outputs[i].vertex_stream[j] =
+                                       (info->output_streams[i] >> (2 * j)) & 3;
+                       }
+               }
+
+               si_llvm_export_vs(ctx, outputs, info->num_outputs);
+
+               FREE(outputs);
+       }
+       ac_build_endif(&ctx->ac, 5145);
+}
index 2ab1833579ec782e129e7fb7bcaacb86d23badbd..cc05b33ae1b5be9355aa007adf9b967514babade 100644 (file)
@@ -3401,11 +3401,15 @@ static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
 /* Pass GS inputs from ES to GS on GFX9. */
 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
 {
+       LLVMBuilderRef builder = ctx->ac.builder;
        LLVMValueRef ret = ctx->return_value;
 
        ret = si_insert_input_ptr(ctx, ret, 0, 0);
        ret = si_insert_input_ptr(ctx, ret, 1, 1);
-       ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
+       if (ctx->shader->key.as_ngg)
+               ret = LLVMBuildInsertValue(builder, ret, ctx->gs_tg_info, 2, "");
+       else
+               ret = si_insert_input_ret(ctx, ret, ctx->param_gs2vs_offset, 2);
        ret = si_insert_input_ret(ctx, ret, ctx->param_merged_wave_info, 3);
        ret = si_insert_input_ret(ctx, ret, ctx->param_merged_scratch_offset, 5);
 
@@ -3555,6 +3559,11 @@ static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
 
 static void emit_gs_epilogue(struct si_shader_context *ctx)
 {
+       if (ctx->shader->key.as_ngg) {
+               gfx10_ngg_gs_emit_epilogue(ctx);
+               return;
+       }
+
        ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
                         si_get_gs_wave_id(ctx));
 
@@ -4192,6 +4201,12 @@ static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
                                LLVMValueRef *addrs)
 {
        struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+
+       if (ctx->shader->key.as_ngg) {
+               gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
+               return;
+       }
+
        struct tgsi_shader_info *info = &ctx->shader->selector->info;
        struct si_shader *shader = ctx->shader;
        struct lp_build_if_state if_state;
@@ -4284,6 +4299,11 @@ static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
 {
        struct si_shader_context *ctx = si_shader_context_from_abi(abi);
 
+       if (ctx->shader->key.as_ngg) {
+               LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
+               return;
+       }
+
        /* Signal primitive cut */
        ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
                         si_get_gs_wave_id(ctx));
@@ -6087,11 +6107,27 @@ static bool si_compile_tgsi_main(struct si_shader_context *ctx)
        }
 
        if (ctx->type == PIPE_SHADER_GEOMETRY) {
-               int i;
-               for (i = 0; i < 4; i++) {
+               for (unsigned i = 0; i < 4; i++) {
                        ctx->gs_next_vertex[i] =
                                ac_build_alloca(&ctx->ac, ctx->i32, "");
                }
+               if (shader->key.as_ngg) {
+                       for (unsigned i = 0; i < 4; ++i) {
+                               ctx->gs_curprim_verts[i] =
+                                       lp_build_alloca(&ctx->gallivm, ctx->ac.i32, "");
+                       }
+
+                       LLVMTypeRef a8i32 = LLVMArrayType(ctx->i32, 8);
+                       ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
+                               a8i32, "ngg_scratch", AC_ADDR_SPACE_LDS);
+                       LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(a8i32));
+                       LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
+
+                       ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
+                               LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
+                       LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
+                       LLVMSetAlignment(ctx->gs_ngg_emit, 4);
+               }
        }
 
        if (sel->force_correct_derivs_after_kill) {
index 5419a7312b1c60975793e4d4d466f79b396d036f..55f32c66117da59ca56d6505f9f0ed203af01ef6 100644 (file)
@@ -213,6 +213,9 @@ struct si_shader_context {
 
        LLVMValueRef invoc0_tess_factors[6]; /* outer[4], inner[2] */
        LLVMValueRef gs_next_vertex[4];
+       LLVMValueRef gs_curprim_verts[4];
+       LLVMValueRef gs_ngg_emit;
+       LLVMValueRef gs_ngg_scratch;
        LLVMValueRef postponed_kill;
        LLVMValueRef return_value;
 
@@ -382,5 +385,9 @@ LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
                             unsigned max_outputs,
                             LLVMValueRef *addrs);
+void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
+                             unsigned stream,
+                             LLVMValueRef *addrs);
+void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx);
 
 #endif
index 2537dd90b5a6463f18891f4812a8175f131889e6..27835811cb7cdeda0bad6c17b78ea092cd5750f0 100644 (file)
@@ -2386,7 +2386,11 @@ static void si_init_shader_selector_async(void *job, int thread_index)
                }
        }
 
-       /* The GS copy shader is always pre-compiled. */
+       /* The GS copy shader is always pre-compiled.
+        *
+        * TODO-GFX10: We could compile the GS copy shader on demand, since it
+        * is only used in the (rare) non-NGG case.
+        */
        if (sel->type == PIPE_SHADER_GEOMETRY) {
                sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, compiler, sel, debug);
                if (!sel->gs_copy_shader) {