2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
32 static LLVMValueRef
get_wave_id_in_tg(struct si_shader_context
*ctx
)
34 return si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 24, 4);
37 static LLVMValueRef
get_tgsize(struct si_shader_context
*ctx
)
39 return si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 28, 4);
42 static LLVMValueRef
get_thread_id_in_tg(struct si_shader_context
*ctx
)
44 LLVMBuilderRef builder
= ctx
->ac
.builder
;
46 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
47 LLVMConstInt(ctx
->ac
.i32
, 64, false), "");
48 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
51 static LLVMValueRef
ngg_get_vtx_cnt(struct si_shader_context
*ctx
)
53 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
54 LLVMConstInt(ctx
->ac
.i32
, 12, false),
55 LLVMConstInt(ctx
->ac
.i32
, 9, false),
59 static LLVMValueRef
ngg_get_prim_cnt(struct si_shader_context
*ctx
)
61 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
62 LLVMConstInt(ctx
->ac
.i32
, 22, false),
63 LLVMConstInt(ctx
->ac
.i32
, 9, false),
67 static LLVMValueRef
ngg_get_query_buf(struct si_shader_context
*ctx
)
69 LLVMValueRef buf_ptr
= LLVMGetParam(ctx
->main_fn
,
70 ctx
->param_rw_buffers
);
72 return ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
73 LLVMConstInt(ctx
->i32
, GFX10_GS_QUERY_BUF
, false));
76 /* Send GS Alloc Req message from the first wave of the group to SPI.
78 * - bits 0..10: vertices in group
79 * - bits 12..22: primitives in group
81 static void build_sendmsg_gs_alloc_req(struct si_shader_context
*ctx
,
83 LLVMValueRef prim_cnt
)
85 LLVMBuilderRef builder
= ctx
->ac
.builder
;
88 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
89 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
91 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
92 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
93 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
95 ac_build_endif(&ctx
->ac
, 5020);
99 unsigned num_vertices
;
101 LLVMValueRef index
[3];
102 LLVMValueRef edgeflag
[3];
105 static void build_export_prim(struct si_shader_context
*ctx
,
106 const struct ngg_prim
*prim
)
108 LLVMBuilderRef builder
= ctx
->ac
.builder
;
109 struct ac_export_args args
;
112 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
113 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
115 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
116 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
117 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
118 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
119 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
120 tmp
= LLVMBuildShl(builder
, tmp
,
121 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
122 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
125 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
126 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
127 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
128 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
130 args
.target
= V_008DFC_SQ_EXP_PRIM
;
131 args
.enabled_channels
= 1;
133 args
.valid_mask
= false;
136 ac_build_export(&ctx
->ac
, &args
);
140 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
142 void gfx10_emit_ngg_epilogue(struct ac_shader_abi
*abi
,
143 unsigned max_outputs
,
146 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
147 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
148 struct si_shader_output_values
*outputs
= NULL
;
149 LLVMBuilderRef builder
= ctx
->ac
.builder
;
150 struct lp_build_if_state if_state
;
153 assert(!ctx
->shader
->is_gs_copy_shader
);
154 assert(info
->num_outputs
<= max_outputs
);
156 outputs
= MALLOC((info
->num_outputs
+ 1) * sizeof(outputs
[0]));
158 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
159 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
160 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
162 /* This is used only by streamout. */
163 for (unsigned j
= 0; j
< 4; j
++) {
164 outputs
[i
].values
[j
] =
165 LLVMBuildLoad(builder
,
168 outputs
[i
].vertex_stream
[j
] =
169 (info
->output_streams
[i
] >> (2 * j
)) & 3;
173 lp_build_endif(&ctx
->merged_wrap_if_state
);
175 LLVMValueRef prims_in_wave
= si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 8, 8);
176 LLVMValueRef vtx_in_wave
= si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 0, 8);
177 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
178 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
179 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
180 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
181 LLVMValueRef vtxindex
[] = {
182 si_unpack_param(ctx
, ctx
->param_gs_vtx01_offset
, 0, 16),
183 si_unpack_param(ctx
, ctx
->param_gs_vtx01_offset
, 16, 16),
184 si_unpack_param(ctx
, ctx
->param_gs_vtx23_offset
, 0, 16),
187 /* Determine the number of vertices per primitive. */
188 unsigned num_vertices
;
189 LLVMValueRef num_vertices_val
;
191 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
192 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
]) {
193 /* Blits always use axis-aligned rectangles with 3 vertices. */
195 num_vertices_val
= LLVMConstInt(ctx
->i32
, 3, 0);
197 /* Extract OUTPRIM field. */
198 tmp
= si_unpack_param(ctx
, ctx
->param_vs_state_bits
, 2, 2);
199 num_vertices_val
= LLVMBuildAdd(builder
, tmp
, ctx
->i32_1
, "");
200 num_vertices
= 3; /* TODO: optimize for points & lines */
203 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
205 if (info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
])
207 else if (info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
212 num_vertices_val
= LLVMConstInt(ctx
->i32
, num_vertices
, false);
215 /* TODO: streamout */
217 /* TODO: primitive culling */
219 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
221 /* Update query buffer */
222 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
223 ac_build_ifcc(&ctx
->ac
, tmp
, 5030);
224 tmp
= LLVMBuildICmp(builder
, LLVMIntULE
, ac_get_thread_id(&ctx
->ac
), ctx
->ac
.i32_0
, "");
225 ac_build_ifcc(&ctx
->ac
, tmp
, 5031);
227 LLVMValueRef args
[] = {
228 ngg_get_prim_cnt(ctx
),
229 ngg_get_query_buf(ctx
),
230 LLVMConstInt(ctx
->i32
, 16, false), /* offset of stream[0].generated_primitives */
231 ctx
->i32_0
, /* soffset */
232 ctx
->i32_0
, /* cachepolicy */
235 /* TODO: should this be 64-bit atomics? */
236 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
237 ctx
->i32
, args
, 5, 0);
239 ac_build_endif(&ctx
->ac
, 5031);
240 ac_build_endif(&ctx
->ac
, 5030);
242 /* Export primitive data to the index buffer. Format is:
243 * - bits 0..8: index 0
244 * - bit 9: edge flag 0
245 * - bits 10..18: index 1
246 * - bit 19: edge flag 1
247 * - bits 20..28: index 2
248 * - bit 29: edge flag 2
249 * - bit 31: null primitive (skip)
251 * For the first version, we will always build up all three indices
252 * independent of the primitive type. The additional garbage data
255 * TODO: culling depends on the primitive type, so can have some
258 lp_build_if(&if_state
, &ctx
->gallivm
, is_gs_thread
);
260 struct ngg_prim prim
= {};
262 prim
.num_vertices
= num_vertices
;
263 prim
.isnull
= ctx
->ac
.i1false
;
264 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
266 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
267 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
268 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
269 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
272 build_export_prim(ctx
, &prim
);
274 lp_build_endif(&if_state
);
276 /* Export per-vertex data (positions and parameters). */
277 lp_build_if(&if_state
, &ctx
->gallivm
, is_es_thread
);
281 /* Unconditionally (re-)load the values for proper SSA form. */
282 for (i
= 0; i
< info
->num_outputs
; i
++) {
283 for (unsigned j
= 0; j
< 4; j
++) {
284 outputs
[i
].values
[j
] =
285 LLVMBuildLoad(builder
,
291 /* TODO: Vertex shaders have to get PrimitiveID from GS VGPRs. */
292 if (ctx
->type
== PIPE_SHADER_TESS_EVAL
&&
293 ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
294 outputs
[i
].semantic_name
= TGSI_SEMANTIC_PRIMID
;
295 outputs
[i
].semantic_index
= 0;
296 outputs
[i
].values
[0] = ac_to_float(&ctx
->ac
, si_get_primitive_id(ctx
, 0));
297 for (unsigned j
= 1; j
< 4; j
++)
298 outputs
[i
].values
[j
] = LLVMGetUndef(ctx
->f32
);
300 memset(outputs
[i
].vertex_stream
, 0,
301 sizeof(outputs
[i
].vertex_stream
));
305 si_llvm_export_vs(ctx
, outputs
, i
);
307 lp_build_endif(&if_state
);
313 ngg_gs_get_vertex_storage(struct si_shader_context
*ctx
)
315 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
316 const struct tgsi_shader_info
*info
= &sel
->info
;
318 LLVMTypeRef elements
[2] = {
319 LLVMArrayType(ctx
->ac
.i32
, 4 * info
->num_outputs
),
320 LLVMArrayType(ctx
->ac
.i8
, 4),
322 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
323 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
324 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
328 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
329 * is in emit order; that is:
330 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
331 * - during vertex emit, i.e. while the API GS shader invocation is running,
332 * N = threadidx * gs_max_out_vertices + emitidx
334 * Goals of the LDS memory layout:
335 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
336 * in uniform control flow
337 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
339 * 3. Agnostic to the number of waves (since we don't know it before compiling)
340 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
341 * 5. Avoid wasting memory.
343 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
344 * layout, elimination of bank conflicts requires that each vertex occupy an
345 * odd number of dwords. We use the additional dword to store the output stream
346 * index as well as a flag to indicate whether this vertex ends a primitive
349 * Swizzling is required to satisfy points 1 and 2 simultaneously.
351 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
352 * Indices are swizzled in groups of 32, which ensures point 1 without
353 * disturbing point 2.
355 * \return an LDS pointer to type {[N x i32], [4 x i8]}
358 ngg_gs_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexidx
)
360 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
361 LLVMBuilderRef builder
= ctx
->ac
.builder
;
362 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
364 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
365 unsigned write_stride_2exp
= ffs(sel
->gs_max_out_vertices
) - 1;
366 if (write_stride_2exp
) {
368 LLVMBuildLShr(builder
, vertexidx
,
369 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
370 LLVMValueRef swizzle
=
371 LLVMBuildAnd(builder
, row
,
372 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
374 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
377 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
381 ngg_gs_emit_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef gsthread
,
382 LLVMValueRef emitidx
)
384 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
385 LLVMBuilderRef builder
= ctx
->ac
.builder
;
388 tmp
= LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false);
389 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
390 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
391 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
394 void gfx10_ngg_gs_emit_vertex(struct si_shader_context
*ctx
,
398 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
399 const struct tgsi_shader_info
*info
= &sel
->info
;
400 LLVMBuilderRef builder
= ctx
->ac
.builder
;
401 struct lp_build_if_state if_state
;
403 const LLVMValueRef vertexidx
=
404 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
406 /* If this thread has already emitted the declared maximum number of
407 * vertices, skip the write: excessive vertex emissions are not
408 * supposed to have any effect.
410 const LLVMValueRef can_emit
=
411 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
412 LLVMConstInt(ctx
->i32
, sel
->gs_max_out_vertices
, false), "");
414 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
415 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
416 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
418 lp_build_if(&if_state
, &ctx
->gallivm
, can_emit
);
420 const LLVMValueRef vertexptr
=
421 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
422 unsigned out_idx
= 0;
423 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
424 for (unsigned chan
= 0; chan
< 4; chan
++, out_idx
++) {
425 if (!(info
->output_usagemask
[i
] & (1 << chan
)) ||
426 ((info
->output_streams
[i
] >> (2 * chan
)) & 3) != stream
)
429 LLVMValueRef out_val
= LLVMBuildLoad(builder
, addrs
[4 * i
+ chan
], "");
430 LLVMValueRef gep_idx
[3] = {
431 ctx
->ac
.i32_0
, /* implied C-style array */
432 ctx
->ac
.i32_0
, /* first entry of struct */
433 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
435 LLVMValueRef ptr
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
437 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
438 LLVMBuildStore(builder
, out_val
, ptr
);
441 assert(out_idx
* 4 == sel
->gsvs_vertex_size
);
443 /* Determine and store whether this vertex completed a primitive. */
444 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
446 tmp
= LLVMConstInt(ctx
->ac
.i32
, u_vertices_per_prim(sel
->gs_output_prim
) - 1, false);
447 const LLVMValueRef iscompleteprim
=
448 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
450 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
451 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
453 LLVMValueRef gep_idx
[3] = {
454 ctx
->ac
.i32_0
, /* implied C-style array */
455 ctx
->ac
.i32_1
, /* second struct entry */
456 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
458 const LLVMValueRef primflagptr
=
459 LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
461 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
462 LLVMBuildStore(builder
, tmp
, primflagptr
);
464 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
465 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
466 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
468 lp_build_endif(&if_state
);
471 void gfx10_ngg_gs_emit_prologue(struct si_shader_context
*ctx
)
473 /* Zero out the part of LDS scratch that is used to accumulate the
474 * per-stream generated primitive count.
476 LLVMBuilderRef builder
= ctx
->ac
.builder
;
477 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
478 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
481 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
482 ac_build_ifcc(&ctx
->ac
, tmp
, 5090);
484 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
485 LLVMBuildStore(builder
, ctx
->i32_0
, ptr
);
487 ac_build_endif(&ctx
->ac
, 5090);
489 ac_build_s_barrier(&ctx
->ac
);
492 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context
*ctx
)
494 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
495 const struct tgsi_shader_info
*info
= &sel
->info
;
496 const unsigned verts_per_prim
= u_vertices_per_prim(sel
->gs_output_prim
);
497 LLVMBuilderRef builder
= ctx
->ac
.builder
;
498 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
499 LLVMValueRef tmp
, tmp2
;
501 /* Zero out remaining (non-emitted) primitive flags.
503 * Note: Alternatively, we could pass the relevant gs_next_vertex to
504 * the emit threads via LDS. This is likely worse in the expected
505 * typical case where each GS thread emits the full set of
508 for (unsigned stream
= 0; stream
< 4; ++stream
) {
509 if (!info
->num_stream_output_components
[stream
])
512 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
514 ac_build_bgnloop(&ctx
->ac
, 5100);
516 const LLVMValueRef vertexidx
=
517 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
518 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
519 LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false), "");
520 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
521 ac_build_break(&ctx
->ac
);
522 ac_build_endif(&ctx
->ac
, 5101);
524 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
525 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
527 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
528 LLVMValueRef gep_idx
[3] = {
529 ctx
->ac
.i32_0
, /* implied C-style array */
530 ctx
->ac
.i32_1
, /* second entry of struct */
531 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
533 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
534 LLVMBuildStore(builder
, i8_0
, tmp
);
536 ac_build_endloop(&ctx
->ac
, 5100);
539 /* Accumulate generated primitives counts across the entire threadgroup. */
540 for (unsigned stream
= 0; stream
< 4; ++stream
) {
541 if (!info
->num_stream_output_components
[stream
])
544 LLVMValueRef numprims
=
545 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
546 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, 64);
548 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->i32_0
, "");
549 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
551 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
552 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
553 LLVMConstInt(ctx
->i32
, stream
, false)),
554 numprims
, LLVMAtomicOrderingMonotonic
, false);
556 ac_build_endif(&ctx
->ac
, 5105);
559 lp_build_endif(&ctx
->merged_wrap_if_state
);
561 ac_build_s_barrier(&ctx
->ac
);
563 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
564 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
566 /* TODO: streamout */
568 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
569 ac_build_ifcc(&ctx
->ac
, tmp
, 5110);
573 if (sel
->so
.num_outputs
)
574 tmp
= LLVMBuildAnd(builder
, tmp
, LLVMConstInt(ctx
->i32
, 3, false), "");
575 offset
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 32, false), "");
576 if (sel
->so
.num_outputs
) {
577 tmp
= LLVMBuildLShr(builder
, tid
, LLVMConstInt(ctx
->i32
, 2, false), "");
578 tmp
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 8, false), "");
579 offset
= LLVMBuildAdd(builder
, offset
, tmp
, "");
582 tmp
= LLVMBuildLoad(builder
, ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
), "");
583 LLVMValueRef args
[] = {
585 ngg_get_query_buf(ctx
),
587 LLVMConstInt(ctx
->i32
, 16, false), /* soffset */
588 ctx
->i32_0
, /* cachepolicy */
590 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
591 ctx
->i32
, args
, 5, 0);
593 ac_build_endif(&ctx
->ac
, 5110);
597 /* Determine vertex liveness. */
598 LLVMValueRef vertliveptr
= lp_build_alloca(&ctx
->gallivm
, ctx
->ac
.i1
, "vertexlive");
600 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
601 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
603 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
604 const LLVMValueRef primidx
=
605 LLVMBuildAdd(builder
, tid
,
606 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
609 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
610 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
613 /* Load primitive liveness */
614 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
615 LLVMValueRef gep_idx
[3] = {
616 ctx
->ac
.i32_0
, /* implicit C-style array */
617 ctx
->ac
.i32_1
, /* second value of struct */
618 ctx
->ac
.i32_0
, /* stream 0 */
620 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
621 tmp
= LLVMBuildLoad(builder
, tmp
, "");
622 const LLVMValueRef primlive
=
623 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
625 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
626 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
627 LLVMBuildStore(builder
, tmp
, vertliveptr
);
630 ac_build_endif(&ctx
->ac
, 5121 + i
);
633 ac_build_endif(&ctx
->ac
, 5120);
635 /* Inclusive scan addition across the current wave. */
636 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
637 struct ac_wg_scan vertlive_scan
= {};
638 vertlive_scan
.op
= nir_op_iadd
;
639 vertlive_scan
.enable_reduce
= true;
640 vertlive_scan
.enable_exclusive
= true;
641 vertlive_scan
.src
= vertlive
;
642 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->i32_0
);
643 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
644 vertlive_scan
.numwaves
= get_tgsize(ctx
);
645 vertlive_scan
.maxwaves
= 8;
647 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
649 /* Skip all exports (including index exports) when possible. At least on
650 * early gfx10 revisions this is also to avoid hangs.
652 LLVMValueRef have_exports
=
653 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
655 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
657 /* Allocate export space. Send this message as early as possible, to
658 * hide the latency of the SQ <-> SPI roundtrip.
660 * Note: We could consider compacting primitives for export as well.
661 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
662 * prim data per clock and skips null primitives at no additional
663 * cost. So compacting primitives can only be beneficial when
664 * there are 4 or more contiguous null primitives in the export
665 * (in the common case of single-dword prim exports).
667 build_sendmsg_gs_alloc_req(ctx
, vertlive_scan
.result_reduce
, num_emit_threads
);
669 /* Setup the reverse vertex compaction permutation. We re-use stream 1
670 * of the primitive liveness flags, relying on the fact that each
671 * threadgroup can have at most 256 threads. */
672 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
674 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
675 LLVMValueRef gep_idx
[3] = {
676 ctx
->ac
.i32_0
, /* implicit C-style array */
677 ctx
->ac
.i32_1
, /* second value of struct */
678 ctx
->ac
.i32_1
, /* stream 1 */
680 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
681 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
682 LLVMBuildStore(builder
, tmp2
, tmp
);
684 ac_build_endif(&ctx
->ac
, 5130);
686 ac_build_s_barrier(&ctx
->ac
);
688 /* Export primitive data */
689 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
690 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
692 struct ngg_prim prim
= {};
693 prim
.num_vertices
= verts_per_prim
;
695 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
696 LLVMValueRef gep_idx
[3] = {
697 ctx
->ac
.i32_0
, /* implicit C-style array */
698 ctx
->ac
.i32_1
, /* second value of struct */
699 ctx
->ac
.i32_0
, /* primflag */
701 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
702 tmp
= LLVMBuildLoad(builder
, tmp
, "");
703 prim
.isnull
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
,
704 LLVMConstInt(ctx
->ac
.i8
, 0, false), "");
706 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
707 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
708 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
709 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
712 build_export_prim(ctx
, &prim
);
714 ac_build_endif(&ctx
->ac
, 5140);
716 /* Export position and parameter data */
717 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
718 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
720 struct si_shader_output_values
*outputs
= NULL
;
721 outputs
= MALLOC(info
->num_outputs
* sizeof(outputs
[0]));
723 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
724 LLVMValueRef gep_idx
[3] = {
725 ctx
->ac
.i32_0
, /* implicit C-style array */
726 ctx
->ac
.i32_1
, /* second value of struct */
727 ctx
->ac
.i32_1
, /* stream 1: source data index */
729 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
730 tmp
= LLVMBuildLoad(builder
, tmp
, "");
731 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
732 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
734 unsigned out_idx
= 0;
735 gep_idx
[1] = ctx
->ac
.i32_0
;
736 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
737 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
738 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
740 for (unsigned j
= 0; j
< 4; j
++, out_idx
++) {
741 gep_idx
[2] = LLVMConstInt(ctx
->ac
.i32
, out_idx
, false);
742 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
743 tmp
= LLVMBuildLoad(builder
, tmp
, "");
744 outputs
[i
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
745 outputs
[i
].vertex_stream
[j
] =
746 (info
->output_streams
[i
] >> (2 * j
)) & 3;
750 si_llvm_export_vs(ctx
, outputs
, info
->num_outputs
);
754 ac_build_endif(&ctx
->ac
, 5145);
757 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
758 unsigned min_verts_per_prim
, bool use_adjacency
)
760 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
763 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
767 * Determine subgroup information like maximum number of vertices and prims.
769 * This happens before the shader is uploaded, since LDS relocations during
770 * upload depend on the subgroup size.
772 void gfx10_ngg_calculate_subgroup_info(struct si_shader
*shader
)
774 const struct si_shader_selector
*gs_sel
= shader
->selector
;
775 const struct si_shader_selector
*es_sel
=
776 shader
->previous_stage_sel
? shader
->previous_stage_sel
: gs_sel
;
777 const enum pipe_shader_type gs_type
= gs_sel
->type
;
778 const unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
779 /* TODO: Specialize for known primitive type without GS. */
780 const unsigned input_prim
= gs_type
== PIPE_SHADER_GEOMETRY
?
781 gs_sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
] :
783 const bool use_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
784 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
785 const unsigned max_verts_per_prim
= u_vertices_per_prim(input_prim
);
786 const unsigned min_verts_per_prim
=
787 gs_type
== PIPE_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
789 /* All these are in dwords: */
790 /* We can't allow using the whole LDS, because GS waves compete with
791 * other shader stages for LDS space.
793 * Streamout can increase the ESGS buffer size later on, so be more
794 * conservative with streamout and use 4K dwords. This may be suboptimal.
796 * Otherwise, use the limit of 7K dwords. The reason is that we need
797 * to leave some headroom for the max_esverts increase at the end.
799 * TODO: We should really take the shader's internal LDS use into
800 * account. The linker will fail if the size is greater than
803 const unsigned max_lds_size
= (gs_sel
->so
.num_outputs
? 4 : 7) * 1024 - 128;
804 const unsigned target_lds_size
= max_lds_size
;
805 unsigned esvert_lds_size
= 0;
806 unsigned gsprim_lds_size
= 0;
808 /* All these are per subgroup: */
809 bool max_vert_out_per_gs_instance
= false;
810 unsigned max_esverts_base
= 256;
811 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
813 /* Hardware has the following non-natural restrictions on the value
814 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
816 * - at most 252 for any line input primitive type
817 * - at most 251 for any quad input primitive type
818 * - at most 251 for triangle strips with adjacency (this happens to
819 * be the natural limit for triangle *lists* with adjacency)
821 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
823 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
824 unsigned max_out_verts_per_gsprim
=
825 gs_sel
->gs_max_out_vertices
* gs_num_invocations
;
827 if (max_out_verts_per_gsprim
<= 256) {
828 if (max_out_verts_per_gsprim
) {
829 max_gsprims_base
= MIN2(max_gsprims_base
,
830 256 / max_out_verts_per_gsprim
);
833 /* Use special multi-cycling mode in which each GS
834 * instance gets its own subgroup. Does not work with
836 max_vert_out_per_gs_instance
= true;
837 max_gsprims_base
= 1;
838 max_out_verts_per_gsprim
= gs_sel
->gs_max_out_vertices
;
841 esvert_lds_size
= es_sel
->esgs_itemsize
/ 4;
842 gsprim_lds_size
= (gs_sel
->gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
844 /* TODO: This needs to be adjusted once LDS use for compaction
845 * after culling is implemented. */
848 unsigned max_gsprims
= max_gsprims_base
;
849 unsigned max_esverts
= max_esverts_base
;
852 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
854 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
856 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
857 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, use_adjacency
);
858 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
860 if (esvert_lds_size
|| gsprim_lds_size
) {
861 /* Now that we have a rough proportionality between esverts
862 * and gsprims based on the primitive type, scale both of them
863 * down simultaneously based on required LDS space.
865 * We could be smarter about this if we knew how much vertex
868 unsigned lds_total
= max_esverts
* esvert_lds_size
+
869 max_gsprims
* gsprim_lds_size
;
870 if (lds_total
> target_lds_size
) {
871 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
872 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
874 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
875 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
876 min_verts_per_prim
, use_adjacency
);
877 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
881 /* Round up towards full wave sizes for better ALU utilization. */
882 if (!max_vert_out_per_gs_instance
) {
883 const unsigned wavesize
= 64;
884 unsigned orig_max_esverts
;
885 unsigned orig_max_gsprims
;
887 orig_max_esverts
= max_esverts
;
888 orig_max_gsprims
= max_gsprims
;
890 max_esverts
= align(max_esverts
, wavesize
);
891 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
893 max_esverts
= MIN2(max_esverts
,
894 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
896 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
898 max_gsprims
= align(max_gsprims
, wavesize
);
899 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
901 max_gsprims
= MIN2(max_gsprims
,
902 (max_lds_size
- max_esverts
* esvert_lds_size
) /
904 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
905 min_verts_per_prim
, use_adjacency
);
906 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
907 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
910 /* Hardware restriction: minimum value of max_esverts */
911 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
913 unsigned max_out_vertices
=
914 max_vert_out_per_gs_instance
? gs_sel
->gs_max_out_vertices
:
915 gs_type
== PIPE_SHADER_GEOMETRY
?
916 max_gsprims
* gs_num_invocations
* gs_sel
->gs_max_out_vertices
:
918 assert(max_out_vertices
<= 256);
920 unsigned prim_amp_factor
= 1;
921 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
922 /* Number of output primitives per GS input primitive after
924 prim_amp_factor
= gs_sel
->gs_max_out_vertices
;
927 /* The GE only checks against the maximum number of ES verts after
928 * allocating a full GS primitive. So we need to ensure that whenever
929 * this check passes, there is enough space for a full primitive without
932 shader
->ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
933 shader
->ngg
.max_gsprims
= max_gsprims
;
934 shader
->ngg
.max_out_verts
= max_out_vertices
;
935 shader
->ngg
.prim_amp_factor
= prim_amp_factor
;
936 shader
->ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
938 shader
->gs_info
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
939 shader
->ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
941 assert(shader
->ngg
.hw_max_esverts
>= 24); /* HW limitation */