2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
32 static LLVMValueRef
get_wave_id_in_tg(struct si_shader_context
*ctx
)
34 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 24, 4);
37 static LLVMValueRef
get_tgsize(struct si_shader_context
*ctx
)
39 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 28, 4);
42 static LLVMValueRef
get_thread_id_in_tg(struct si_shader_context
*ctx
)
44 LLVMBuilderRef builder
= ctx
->ac
.builder
;
46 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
47 LLVMConstInt(ctx
->ac
.i32
, ctx
->ac
.wave_size
, false), "");
48 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
51 static LLVMValueRef
ngg_get_vtx_cnt(struct si_shader_context
*ctx
)
53 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 12, 9);
56 static LLVMValueRef
ngg_get_prim_cnt(struct si_shader_context
*ctx
)
58 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 22, 9);
61 static LLVMValueRef
ngg_get_ordered_id(struct si_shader_context
*ctx
)
63 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 0, 12);
66 static LLVMValueRef
ngg_get_query_buf(struct si_shader_context
*ctx
)
68 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
70 return ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
71 LLVMConstInt(ctx
->i32
, GFX10_GS_QUERY_BUF
, false));
74 static LLVMValueRef
ngg_get_initial_edgeflag(struct si_shader_context
*ctx
, unsigned index
)
76 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
78 tmp
= LLVMBuildLShr(ctx
->ac
.builder
,
79 ac_get_arg(&ctx
->ac
, ctx
->args
.gs_invocation_id
),
80 LLVMConstInt(ctx
->ac
.i32
, 8 + index
, false), "");
81 return LLVMBuildTrunc(ctx
->ac
.builder
, tmp
, ctx
->ac
.i1
, "");
87 * Return the number of vertices as a constant in \p num_vertices,
88 * and return a more precise value as LLVMValueRef from the function.
90 static LLVMValueRef
ngg_get_vertices_per_prim(struct si_shader_context
*ctx
,
91 unsigned *num_vertices
)
93 const struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
95 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
96 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
97 /* Blits always use axis-aligned rectangles with 3 vertices. */
99 return LLVMConstInt(ctx
->i32
, 3, 0);
101 /* We always build up all three indices for the prim export
102 * independent of the primitive type. The additional garbage
103 * data shouldn't hurt. This number doesn't matter with
108 /* Extract OUTPRIM field. */
109 LLVMValueRef num
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 2, 2);
110 return LLVMBuildAdd(ctx
->ac
.builder
, num
, ctx
->i32_1
, "");
113 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
115 if (info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
])
117 else if (info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
122 return LLVMConstInt(ctx
->i32
, *num_vertices
, false);
126 bool gfx10_ngg_export_prim_early(struct si_shader
*shader
)
128 struct si_shader_selector
*sel
= shader
->selector
;
130 assert(shader
->key
.as_ngg
&& !shader
->key
.as_es
);
132 return sel
->type
!= PIPE_SHADER_GEOMETRY
&&
133 !sel
->info
.writes_edgeflag
;
136 void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context
*ctx
)
138 ac_build_sendmsg_gs_alloc_req(&ctx
->ac
, get_wave_id_in_tg(ctx
),
139 ngg_get_vtx_cnt(ctx
),
140 ngg_get_prim_cnt(ctx
));
143 void gfx10_ngg_build_export_prim(struct si_shader_context
*ctx
,
144 LLVMValueRef user_edgeflags
[3])
146 if (gfx10_is_ngg_passthrough(ctx
->shader
)) {
147 ac_build_ifcc(&ctx
->ac
, si_is_gs_thread(ctx
), 6001);
149 struct ac_ngg_prim prim
= {};
151 prim
.passthrough
= ac_get_arg(&ctx
->ac
, ctx
->gs_vtx01_offset
);
152 ac_build_export_prim(&ctx
->ac
, &prim
);
154 ac_build_endif(&ctx
->ac
, 6001);
158 ac_build_ifcc(&ctx
->ac
, si_is_gs_thread(ctx
), 6001);
160 struct ac_ngg_prim prim
= {};
162 ngg_get_vertices_per_prim(ctx
, &prim
.num_vertices
);
164 prim
.isnull
= ctx
->ac
.i1false
;
165 prim
.index
[0] = si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 0, 16);
166 prim
.index
[1] = si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 16, 16);
167 prim
.index
[2] = si_unpack_param(ctx
, ctx
->gs_vtx23_offset
, 0, 16);
169 for (unsigned i
= 0; i
< prim
.num_vertices
; ++i
) {
170 prim
.edgeflag
[i
] = ngg_get_initial_edgeflag(ctx
, i
);
172 if (ctx
->shader
->selector
->info
.writes_edgeflag
) {
175 edge
= LLVMBuildLoad(ctx
->ac
.builder
, user_edgeflags
[i
], "");
176 edge
= LLVMBuildAnd(ctx
->ac
.builder
, prim
.edgeflag
[i
], edge
, "");
177 prim
.edgeflag
[i
] = edge
;
181 ac_build_export_prim(&ctx
->ac
, &prim
);
183 ac_build_endif(&ctx
->ac
, 6001);
186 static void build_streamout_vertex(struct si_shader_context
*ctx
,
187 LLVMValueRef
*so_buffer
, LLVMValueRef
*wg_offset_dw
,
188 unsigned stream
, LLVMValueRef offset_vtx
,
189 LLVMValueRef vertexptr
)
191 struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
192 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
193 LLVMBuilderRef builder
= ctx
->ac
.builder
;
194 LLVMValueRef offset
[4] = {};
197 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
198 if (!wg_offset_dw
[buffer
])
201 tmp
= LLVMBuildMul(builder
, offset_vtx
,
202 LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false), "");
203 tmp
= LLVMBuildAdd(builder
, wg_offset_dw
[buffer
], tmp
, "");
204 offset
[buffer
] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->i32
, 2, false), "");
207 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
208 if (so
->output
[i
].stream
!= stream
)
211 unsigned reg
= so
->output
[i
].register_index
;
212 struct si_shader_output_values out
;
213 out
.semantic_name
= info
->output_semantic_name
[reg
];
214 out
.semantic_index
= info
->output_semantic_index
[reg
];
216 for (unsigned comp
= 0; comp
< 4; comp
++) {
217 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
218 LLVMConstInt(ctx
->i32
, 4 * reg
+ comp
, false));
219 out
.values
[comp
] = LLVMBuildLoad(builder
, tmp
, "");
220 out
.vertex_stream
[comp
] =
221 (info
->output_streams
[reg
] >> (2 * comp
)) & 3;
224 si_emit_streamout_output(ctx
, so_buffer
, offset
, &so
->output
[i
], &out
);
228 struct ngg_streamout
{
229 LLVMValueRef num_vertices
;
231 /* per-thread data */
232 LLVMValueRef prim_enable
[4]; /* i1 per stream */
233 LLVMValueRef vertices
[3]; /* [N x i32] addrspace(LDS)* */
236 LLVMValueRef emit
[4]; /* per-stream emitted primitives (only valid for used streams) */
240 * Build streamout logic.
244 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
246 * Clobbers gs_ngg_scratch[8:].
248 static void build_streamout(struct si_shader_context
*ctx
,
249 struct ngg_streamout
*nggso
)
251 struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
252 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
253 LLVMBuilderRef builder
= ctx
->ac
.builder
;
254 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
255 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
256 LLVMValueRef tmp
, tmp2
;
257 LLVMValueRef i32_2
= LLVMConstInt(ctx
->i32
, 2, false);
258 LLVMValueRef i32_4
= LLVMConstInt(ctx
->i32
, 4, false);
259 LLVMValueRef i32_8
= LLVMConstInt(ctx
->i32
, 8, false);
260 LLVMValueRef so_buffer
[4] = {};
261 unsigned max_num_vertices
= 1 + (nggso
->vertices
[1] ? 1 : 0) +
262 (nggso
->vertices
[2] ? 1 : 0);
263 LLVMValueRef prim_stride_dw
[4] = {};
264 LLVMValueRef prim_stride_dw_vgpr
= LLVMGetUndef(ctx
->i32
);
265 int stream_for_buffer
[4] = { -1, -1, -1, -1 };
266 unsigned bufmask_for_stream
[4] = {};
267 bool isgs
= ctx
->type
== PIPE_SHADER_GEOMETRY
;
268 unsigned scratch_emit_base
= isgs
? 4 : 0;
269 LLVMValueRef scratch_emit_basev
= isgs
? i32_4
: ctx
->i32_0
;
270 unsigned scratch_offset_base
= isgs
? 8 : 4;
271 LLVMValueRef scratch_offset_basev
= isgs
? i32_8
: i32_4
;
273 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", 256);
275 /* Determine the mapping of streamout buffers to vertex streams. */
276 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
277 unsigned buf
= so
->output
[i
].output_buffer
;
278 unsigned stream
= so
->output
[i
].stream
;
279 assert(stream_for_buffer
[buf
] < 0 || stream_for_buffer
[buf
] == stream
);
280 stream_for_buffer
[buf
] = stream
;
281 bufmask_for_stream
[stream
] |= 1 << buf
;
284 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
285 if (stream_for_buffer
[buffer
] == -1)
288 assert(so
->stride
[buffer
]);
290 tmp
= LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false);
291 prim_stride_dw
[buffer
] = LLVMBuildMul(builder
, tmp
, nggso
->num_vertices
, "");
292 prim_stride_dw_vgpr
= ac_build_writelane(
293 &ctx
->ac
, prim_stride_dw_vgpr
, prim_stride_dw
[buffer
],
294 LLVMConstInt(ctx
->i32
, buffer
, false));
296 so_buffer
[buffer
] = ac_build_load_to_sgpr(
298 LLVMConstInt(ctx
->i32
, SI_VS_STREAMOUT_BUF0
+ buffer
, false));
301 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->i32_0
, "");
302 ac_build_ifcc(&ctx
->ac
, tmp
, 5200);
304 LLVMTypeRef gdsptr
= LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
);
305 LLVMValueRef gdsbase
= LLVMBuildIntToPtr(builder
, ctx
->i32_0
, gdsptr
, "");
307 /* Advance the streamout offsets in GDS. */
308 LLVMValueRef offsets_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
309 LLVMValueRef generated_by_stream_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
311 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
312 ac_build_ifcc(&ctx
->ac
, tmp
, 5210);
315 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
);
316 tmp
= LLVMBuildLoad(builder
, tmp
, "");
318 tmp
= ac_build_writelane(&ctx
->ac
, ctx
->i32_0
,
319 ngg_get_prim_cnt(ctx
), ctx
->i32_0
);
321 LLVMBuildStore(builder
, tmp
, generated_by_stream_vgpr
);
324 int unused_stream
= -1;
325 for (unsigned stream
= 0; stream
< 4; ++stream
) {
326 if (!info
->num_stream_output_components
[stream
]) {
327 unused_stream
= stream
;
331 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
332 if (stream_for_buffer
[buffer
] >= 0) {
333 swizzle
[buffer
] = stream_for_buffer
[buffer
];
335 assert(unused_stream
>= 0);
336 swizzle
[buffer
] = unused_stream
;
340 tmp
= ac_build_quad_swizzle(&ctx
->ac
, tmp
,
341 swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
342 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
344 LLVMValueRef args
[] = {
345 LLVMBuildIntToPtr(builder
, ngg_get_ordered_id(ctx
), gdsptr
, ""),
347 ctx
->i32_0
, // ordering
349 ctx
->ac
.i1false
, // isVolatile
350 LLVMConstInt(ctx
->i32
, 4 << 24, false), // OA index
351 ctx
->ac
.i1true
, // wave release
352 ctx
->ac
.i1true
, // wave done
354 tmp
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ds.ordered.add",
355 ctx
->i32
, args
, ARRAY_SIZE(args
), 0);
357 /* Keep offsets in a VGPR for quick retrieval via readlane by
358 * the first wave for bounds checking, and also store in LDS
359 * for retrieval by all waves later. */
360 LLVMBuildStore(builder
, tmp
, offsets_vgpr
);
362 tmp2
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
363 scratch_offset_basev
, "");
364 tmp2
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp2
);
365 LLVMBuildStore(builder
, tmp
, tmp2
);
367 ac_build_endif(&ctx
->ac
, 5210);
369 /* Determine the max emit per buffer. This is done via the SALU, in part
370 * because LLVM can't generate divide-by-multiply if we try to do this
371 * via VALU with one lane per buffer.
373 LLVMValueRef max_emit
[4] = {};
374 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
375 if (stream_for_buffer
[buffer
] == -1)
378 LLVMValueRef bufsize_dw
=
379 LLVMBuildLShr(builder
,
380 LLVMBuildExtractElement(builder
, so_buffer
[buffer
], i32_2
, ""),
383 tmp
= LLVMBuildLoad(builder
, offsets_vgpr
, "");
384 LLVMValueRef offset_dw
=
385 ac_build_readlane(&ctx
->ac
, tmp
,
386 LLVMConstInt(ctx
->i32
, buffer
, false));
388 tmp
= LLVMBuildSub(builder
, bufsize_dw
, offset_dw
, "");
389 tmp
= LLVMBuildUDiv(builder
, tmp
, prim_stride_dw
[buffer
], "");
391 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, bufsize_dw
, offset_dw
, "");
392 max_emit
[buffer
] = LLVMBuildSelect(builder
, tmp2
, ctx
->i32_0
, tmp
, "");
395 /* Determine the number of emitted primitives per stream and fixup the
396 * GDS counter if necessary.
398 * This is complicated by the fact that a single stream can emit to
399 * multiple buffers (but luckily not vice versa).
401 LLVMValueRef emit_vgpr
= ctx
->i32_0
;
403 for (unsigned stream
= 0; stream
< 4; ++stream
) {
404 if (!info
->num_stream_output_components
[stream
])
407 tmp
= LLVMBuildLoad(builder
, generated_by_stream_vgpr
, "");
408 LLVMValueRef generated
=
409 ac_build_readlane(&ctx
->ac
, tmp
,
410 LLVMConstInt(ctx
->i32
, stream
, false));
412 LLVMValueRef emit
= generated
;
413 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
414 if (stream_for_buffer
[buffer
] == stream
)
415 emit
= ac_build_umin(&ctx
->ac
, emit
, max_emit
[buffer
]);
418 emit_vgpr
= ac_build_writelane(&ctx
->ac
, emit_vgpr
, emit
,
419 LLVMConstInt(ctx
->i32
, stream
, false));
421 /* Fixup the offset using a plain GDS atomic if we overflowed. */
422 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, emit
, generated
, "");
423 ac_build_ifcc(&ctx
->ac
, tmp
, 5221); /* scalar branch */
424 tmp
= LLVMBuildLShr(builder
,
425 LLVMConstInt(ctx
->i32
, bufmask_for_stream
[stream
], false),
426 ac_get_thread_id(&ctx
->ac
), "");
427 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
428 ac_build_ifcc(&ctx
->ac
, tmp
, 5222);
430 tmp
= LLVMBuildSub(builder
, generated
, emit
, "");
431 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
432 tmp2
= LLVMBuildGEP(builder
, gdsbase
, &tid
, 1, "");
433 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpSub
, tmp2
, tmp
,
434 LLVMAtomicOrderingMonotonic
, false);
436 ac_build_endif(&ctx
->ac
, 5222);
437 ac_build_endif(&ctx
->ac
, 5221);
440 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
441 ac_build_ifcc(&ctx
->ac
, tmp
, 5225);
443 tmp
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
444 scratch_emit_basev
, "");
445 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp
);
446 LLVMBuildStore(builder
, emit_vgpr
, tmp
);
448 ac_build_endif(&ctx
->ac
, 5225);
450 ac_build_endif(&ctx
->ac
, 5200);
452 /* Determine the workgroup-relative per-thread / primitive offset into
453 * the streamout buffers */
454 struct ac_wg_scan primemit_scan
[4] = {};
457 for (unsigned stream
= 0; stream
< 4; ++stream
) {
458 if (!info
->num_stream_output_components
[stream
])
461 primemit_scan
[stream
].enable_exclusive
= true;
462 primemit_scan
[stream
].op
= nir_op_iadd
;
463 primemit_scan
[stream
].src
= nggso
->prim_enable
[stream
];
464 primemit_scan
[stream
].scratch
=
465 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
466 LLVMConstInt(ctx
->i32
, 12 + 8 * stream
, false));
467 primemit_scan
[stream
].waveidx
= get_wave_id_in_tg(ctx
);
468 primemit_scan
[stream
].numwaves
= get_tgsize(ctx
);
469 primemit_scan
[stream
].maxwaves
= 8;
470 ac_build_wg_scan_top(&ctx
->ac
, &primemit_scan
[stream
]);
474 ac_build_s_barrier(&ctx
->ac
);
476 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
477 LLVMValueRef wgoffset_dw
[4] = {};
480 LLVMValueRef scratch_vgpr
;
482 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ac_get_thread_id(&ctx
->ac
));
483 scratch_vgpr
= LLVMBuildLoad(builder
, tmp
, "");
485 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
486 if (stream_for_buffer
[buffer
] >= 0) {
487 wgoffset_dw
[buffer
] = ac_build_readlane(
488 &ctx
->ac
, scratch_vgpr
,
489 LLVMConstInt(ctx
->i32
, scratch_offset_base
+ buffer
, false));
493 for (unsigned stream
= 0; stream
< 4; ++stream
) {
494 if (info
->num_stream_output_components
[stream
]) {
495 nggso
->emit
[stream
] = ac_build_readlane(
496 &ctx
->ac
, scratch_vgpr
,
497 LLVMConstInt(ctx
->i32
, scratch_emit_base
+ stream
, false));
502 /* Write out primitive data */
503 for (unsigned stream
= 0; stream
< 4; ++stream
) {
504 if (!info
->num_stream_output_components
[stream
])
508 ac_build_wg_scan_bottom(&ctx
->ac
, &primemit_scan
[stream
]);
510 primemit_scan
[stream
].result_exclusive
= tid
;
513 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
514 primemit_scan
[stream
].result_exclusive
,
515 nggso
->emit
[stream
], "");
516 tmp
= LLVMBuildAnd(builder
, tmp
, nggso
->prim_enable
[stream
], "");
517 ac_build_ifcc(&ctx
->ac
, tmp
, 5240);
519 LLVMValueRef offset_vtx
=
520 LLVMBuildMul(builder
, primemit_scan
[stream
].result_exclusive
,
521 nggso
->num_vertices
, "");
523 for (unsigned i
= 0; i
< max_num_vertices
; ++i
) {
524 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
525 LLVMConstInt(ctx
->i32
, i
, false),
526 nggso
->num_vertices
, "");
527 ac_build_ifcc(&ctx
->ac
, tmp
, 5241);
528 build_streamout_vertex(ctx
, so_buffer
, wgoffset_dw
,
529 stream
, offset_vtx
, nggso
->vertices
[i
]);
530 ac_build_endif(&ctx
->ac
, 5241);
531 offset_vtx
= LLVMBuildAdd(builder
, offset_vtx
, ctx
->i32_1
, "");
534 ac_build_endif(&ctx
->ac
, 5240);
538 static unsigned ngg_nogs_vertex_size(struct si_shader
*shader
)
540 unsigned lds_vertex_size
= 0;
542 /* The edgeflag is always stored in the last element that's also
543 * used for padding to reduce LDS bank conflicts. */
544 if (shader
->selector
->so
.num_outputs
)
545 lds_vertex_size
= 4 * shader
->selector
->info
.num_outputs
+ 1;
546 if (shader
->selector
->info
.writes_edgeflag
)
547 lds_vertex_size
= MAX2(lds_vertex_size
, 1);
549 /* LDS size for passing data from GS to ES.
550 * GS stores Primitive IDs into LDS at the address corresponding
551 * to the ES thread of the provoking vertex. All ES threads
552 * load and export PrimitiveID for their thread.
554 if (shader
->selector
->type
== PIPE_SHADER_VERTEX
&&
555 shader
->key
.mono
.u
.vs_export_prim_id
)
556 lds_vertex_size
= MAX2(lds_vertex_size
, 1);
558 return lds_vertex_size
;
562 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
563 * for the vertex outputs.
565 static LLVMValueRef
ngg_nogs_vertex_ptr(struct si_shader_context
*ctx
,
568 /* The extra dword is used to avoid LDS bank conflicts. */
569 unsigned vertex_size
= ngg_nogs_vertex_size(ctx
->shader
);
570 LLVMTypeRef ai32
= LLVMArrayType(ctx
->i32
, vertex_size
);
571 LLVMTypeRef pai32
= LLVMPointerType(ai32
, AC_ADDR_SPACE_LDS
);
572 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->esgs_ring
, pai32
, "");
573 return LLVMBuildGEP(ctx
->ac
.builder
, tmp
, &vtxid
, 1, "");
577 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
579 void gfx10_emit_ngg_epilogue(struct ac_shader_abi
*abi
,
580 unsigned max_outputs
,
583 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
584 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
585 struct si_shader_info
*info
= &sel
->info
;
586 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
587 LLVMBuilderRef builder
= ctx
->ac
.builder
;
588 LLVMValueRef tmp
, tmp2
;
590 assert(!ctx
->shader
->is_gs_copy_shader
);
591 assert(info
->num_outputs
<= max_outputs
);
593 LLVMValueRef vertex_ptr
= NULL
;
595 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
596 vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
598 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
599 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
600 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
602 for (unsigned j
= 0; j
< 4; j
++) {
603 outputs
[i
].vertex_stream
[j
] =
604 (info
->output_streams
[i
] >> (2 * j
)) & 3;
606 /* TODO: we may store more outputs than streamout needs,
607 * but streamout performance isn't that important.
609 if (sel
->so
.num_outputs
) {
610 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
,
611 LLVMConstInt(ctx
->i32
, 4 * i
+ j
, false));
612 tmp2
= LLVMBuildLoad(builder
, addrs
[4 * i
+ j
], "");
613 tmp2
= ac_to_integer(&ctx
->ac
, tmp2
);
614 LLVMBuildStore(builder
, tmp2
, tmp
);
618 /* Store the edgeflag at the end (if streamout is enabled) */
619 if (info
->output_semantic_name
[i
] == TGSI_SEMANTIC_EDGEFLAG
&&
620 sel
->info
.writes_edgeflag
) {
621 LLVMValueRef edgeflag
= LLVMBuildLoad(builder
, addrs
[4 * i
], "");
622 /* The output is a float, but the hw expects a 1-bit integer. */
623 edgeflag
= LLVMBuildFPToUI(ctx
->ac
.builder
, edgeflag
, ctx
->i32
, "");
624 edgeflag
= ac_build_umin(&ctx
->ac
, edgeflag
, ctx
->i32_1
);
626 tmp
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
627 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
, tmp
);
628 LLVMBuildStore(builder
, edgeflag
, tmp
);
632 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
634 LLVMValueRef is_gs_thread
= si_is_gs_thread(ctx
);
635 LLVMValueRef is_es_thread
= si_is_es_thread(ctx
);
636 LLVMValueRef vtxindex
[] = {
637 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 0, 16),
638 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 16, 16),
639 si_unpack_param(ctx
, ctx
->gs_vtx23_offset
, 0, 16),
642 /* Determine the number of vertices per primitive. */
643 unsigned num_vertices
;
644 LLVMValueRef num_vertices_val
= ngg_get_vertices_per_prim(ctx
, &num_vertices
);
647 LLVMValueRef emitted_prims
= NULL
;
649 if (sel
->so
.num_outputs
) {
650 struct ngg_streamout nggso
= {};
652 nggso
.num_vertices
= num_vertices_val
;
653 nggso
.prim_enable
[0] = is_gs_thread
;
655 for (unsigned i
= 0; i
< num_vertices
; ++i
)
656 nggso
.vertices
[i
] = ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
658 build_streamout(ctx
, &nggso
);
659 emitted_prims
= nggso
.emit
[0];
662 LLVMValueRef user_edgeflags
[3] = {};
664 if (sel
->info
.writes_edgeflag
) {
665 /* Streamout already inserted the barrier, so don't insert it again. */
666 if (!sel
->so
.num_outputs
)
667 ac_build_s_barrier(&ctx
->ac
);
669 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
670 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
671 for (unsigned i
= 0; i
< num_vertices
; i
++) {
672 tmp
= ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
673 tmp2
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
674 tmp
= ac_build_gep0(&ctx
->ac
, tmp
, tmp2
);
675 tmp
= LLVMBuildLoad(builder
, tmp
, "");
676 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
678 user_edgeflags
[i
] = ac_build_alloca_undef(&ctx
->ac
, ctx
->i1
, "");
679 LLVMBuildStore(builder
, tmp
, user_edgeflags
[i
]);
681 ac_build_endif(&ctx
->ac
, 5400);
684 /* Copy Primitive IDs from GS threads to the LDS address corresponding
685 * to the ES thread of the provoking vertex.
687 if (ctx
->type
== PIPE_SHADER_VERTEX
&&
688 ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
689 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
690 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
691 ac_build_s_barrier(&ctx
->ac
);
693 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
694 /* Extract the PROVOKING_VTX_INDEX field. */
695 LLVMValueRef provoking_vtx_in_prim
=
696 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2);
698 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
699 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
700 LLVMValueRef provoking_vtx_index
=
701 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
702 LLVMValueRef vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, provoking_vtx_index
);
704 LLVMBuildStore(builder
, ac_get_arg(&ctx
->ac
, ctx
->args
.gs_prim_id
),
705 ac_build_gep0(&ctx
->ac
, vertex_ptr
, ctx
->i32_0
));
706 ac_build_endif(&ctx
->ac
, 5400);
709 /* Update query buffer */
710 if (ctx
->screen
->use_ngg_streamout
&&
711 !info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
712 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
713 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
714 ac_build_ifcc(&ctx
->ac
, tmp
, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
715 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
716 ac_build_ifcc(&ctx
->ac
, tmp
, 5030);
717 tmp
= LLVMBuildICmp(builder
, LLVMIntULE
, ac_get_thread_id(&ctx
->ac
),
718 sel
->so
.num_outputs
? ctx
->ac
.i32_1
: ctx
->ac
.i32_0
, "");
719 ac_build_ifcc(&ctx
->ac
, tmp
, 5031);
721 LLVMValueRef args
[] = {
722 ngg_get_prim_cnt(ctx
),
723 ngg_get_query_buf(ctx
),
724 LLVMConstInt(ctx
->i32
, 16, false), /* offset of stream[0].generated_primitives */
725 ctx
->i32_0
, /* soffset */
726 ctx
->i32_0
, /* cachepolicy */
729 if (sel
->so
.num_outputs
) {
730 args
[0] = ac_build_writelane(&ctx
->ac
, args
[0], emitted_prims
, ctx
->i32_1
);
731 args
[2] = ac_build_writelane(&ctx
->ac
, args
[2],
732 LLVMConstInt(ctx
->i32
, 24, false), ctx
->i32_1
);
735 /* TODO: should this be 64-bit atomics? */
736 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
737 ctx
->i32
, args
, 5, 0);
739 ac_build_endif(&ctx
->ac
, 5031);
740 ac_build_endif(&ctx
->ac
, 5030);
741 ac_build_endif(&ctx
->ac
, 5029);
744 /* Build the primitive export. */
745 if (!gfx10_ngg_export_prim_early(ctx
->shader
))
746 gfx10_ngg_build_export_prim(ctx
, user_edgeflags
);
748 /* Export per-vertex data (positions and parameters). */
749 ac_build_ifcc(&ctx
->ac
, is_es_thread
, 6002);
753 /* Unconditionally (re-)load the values for proper SSA form. */
754 for (i
= 0; i
< info
->num_outputs
; i
++) {
755 for (unsigned j
= 0; j
< 4; j
++) {
756 outputs
[i
].values
[j
] =
757 LLVMBuildLoad(builder
,
763 if (ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
764 outputs
[i
].semantic_name
= TGSI_SEMANTIC_PRIMID
;
765 outputs
[i
].semantic_index
= 0;
767 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
768 /* Wait for GS stores to finish. */
769 ac_build_s_barrier(&ctx
->ac
);
771 tmp
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
772 tmp
= ac_build_gep0(&ctx
->ac
, tmp
, ctx
->i32_0
);
773 outputs
[i
].values
[0] = LLVMBuildLoad(builder
, tmp
, "");
775 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
776 outputs
[i
].values
[0] = si_get_primitive_id(ctx
, 0);
779 outputs
[i
].values
[0] = ac_to_float(&ctx
->ac
, outputs
[i
].values
[0]);
780 for (unsigned j
= 1; j
< 4; j
++)
781 outputs
[i
].values
[j
] = LLVMGetUndef(ctx
->f32
);
783 memset(outputs
[i
].vertex_stream
, 0,
784 sizeof(outputs
[i
].vertex_stream
));
788 si_llvm_export_vs(ctx
, outputs
, i
);
790 ac_build_endif(&ctx
->ac
, 6002);
794 ngg_gs_get_vertex_storage(struct si_shader_context
*ctx
)
796 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
797 const struct si_shader_info
*info
= &sel
->info
;
799 LLVMTypeRef elements
[2] = {
800 LLVMArrayType(ctx
->ac
.i32
, 4 * info
->num_outputs
),
801 LLVMArrayType(ctx
->ac
.i8
, 4),
803 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
804 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
805 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
809 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
810 * is in emit order; that is:
811 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
812 * - during vertex emit, i.e. while the API GS shader invocation is running,
813 * N = threadidx * gs_max_out_vertices + emitidx
815 * Goals of the LDS memory layout:
816 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
817 * in uniform control flow
818 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
820 * 3. Agnostic to the number of waves (since we don't know it before compiling)
821 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
822 * 5. Avoid wasting memory.
824 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
825 * layout, elimination of bank conflicts requires that each vertex occupy an
826 * odd number of dwords. We use the additional dword to store the output stream
827 * index as well as a flag to indicate whether this vertex ends a primitive
830 * Swizzling is required to satisfy points 1 and 2 simultaneously.
832 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
833 * Indices are swizzled in groups of 32, which ensures point 1 without
834 * disturbing point 2.
836 * \return an LDS pointer to type {[N x i32], [4 x i8]}
839 ngg_gs_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexidx
)
841 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
842 LLVMBuilderRef builder
= ctx
->ac
.builder
;
843 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
845 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
846 unsigned write_stride_2exp
= ffs(sel
->gs_max_out_vertices
) - 1;
847 if (write_stride_2exp
) {
849 LLVMBuildLShr(builder
, vertexidx
,
850 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
851 LLVMValueRef swizzle
=
852 LLVMBuildAnd(builder
, row
,
853 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
855 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
858 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
862 ngg_gs_emit_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef gsthread
,
863 LLVMValueRef emitidx
)
865 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
866 LLVMBuilderRef builder
= ctx
->ac
.builder
;
869 tmp
= LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false);
870 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
871 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
872 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
876 ngg_gs_get_emit_output_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
879 LLVMValueRef gep_idx
[3] = {
880 ctx
->ac
.i32_0
, /* implied C-style array */
881 ctx
->ac
.i32_0
, /* first struct entry */
882 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
884 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
888 ngg_gs_get_emit_primflag_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
891 LLVMValueRef gep_idx
[3] = {
892 ctx
->ac
.i32_0
, /* implied C-style array */
893 ctx
->ac
.i32_1
, /* second struct entry */
894 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
896 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
899 void gfx10_ngg_gs_emit_vertex(struct si_shader_context
*ctx
,
903 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
904 const struct si_shader_info
*info
= &sel
->info
;
905 LLVMBuilderRef builder
= ctx
->ac
.builder
;
907 const LLVMValueRef vertexidx
=
908 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
910 /* If this thread has already emitted the declared maximum number of
911 * vertices, skip the write: excessive vertex emissions are not
912 * supposed to have any effect.
914 const LLVMValueRef can_emit
=
915 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
916 LLVMConstInt(ctx
->i32
, sel
->gs_max_out_vertices
, false), "");
918 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
919 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
920 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
922 ac_build_ifcc(&ctx
->ac
, can_emit
, 9001);
924 const LLVMValueRef vertexptr
=
925 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
926 unsigned out_idx
= 0;
927 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
928 for (unsigned chan
= 0; chan
< 4; chan
++, out_idx
++) {
929 if (!(info
->output_usagemask
[i
] & (1 << chan
)) ||
930 ((info
->output_streams
[i
] >> (2 * chan
)) & 3) != stream
)
933 LLVMValueRef out_val
= LLVMBuildLoad(builder
, addrs
[4 * i
+ chan
], "");
934 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
935 LLVMBuildStore(builder
, out_val
,
936 ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
));
939 assert(out_idx
* 4 == sel
->gsvs_vertex_size
);
941 /* Determine and store whether this vertex completed a primitive. */
942 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
944 tmp
= LLVMConstInt(ctx
->ac
.i32
, u_vertices_per_prim(sel
->gs_output_prim
) - 1, false);
945 const LLVMValueRef iscompleteprim
=
946 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
948 /* Since the geometry shader emits triangle strips, we need to
949 * track which primitive is odd and swap vertex indices to get
950 * the correct vertex order.
952 LLVMValueRef is_odd
= ctx
->i1false
;
953 if (stream
== 0 && u_vertices_per_prim(sel
->gs_output_prim
) == 3) {
954 tmp
= LLVMBuildAnd(builder
, curverts
, ctx
->i32_1
, "");
955 is_odd
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
, ctx
->i32_1
, "");
958 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
959 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
961 /* The per-vertex primitive flag encoding:
962 * bit 0: whether this vertex finishes a primitive
963 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
965 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
966 tmp
= LLVMBuildOr(builder
, tmp
,
967 LLVMBuildShl(builder
,
968 LLVMBuildZExt(builder
, is_odd
, ctx
->ac
.i8
, ""),
969 ctx
->ac
.i8_1
, ""), "");
970 LLVMBuildStore(builder
, tmp
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
));
972 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
973 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
974 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
976 ac_build_endif(&ctx
->ac
, 9001);
979 void gfx10_ngg_gs_emit_prologue(struct si_shader_context
*ctx
)
981 /* Zero out the part of LDS scratch that is used to accumulate the
982 * per-stream generated primitive count.
984 LLVMBuilderRef builder
= ctx
->ac
.builder
;
985 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
986 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
989 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
990 ac_build_ifcc(&ctx
->ac
, tmp
, 5090);
992 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
993 LLVMBuildStore(builder
, ctx
->i32_0
, ptr
);
995 ac_build_endif(&ctx
->ac
, 5090);
997 ac_build_s_barrier(&ctx
->ac
);
1000 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context
*ctx
)
1002 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
1003 const struct si_shader_info
*info
= &sel
->info
;
1004 const unsigned verts_per_prim
= u_vertices_per_prim(sel
->gs_output_prim
);
1005 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1006 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
1007 LLVMValueRef tmp
, tmp2
;
1009 /* Zero out remaining (non-emitted) primitive flags.
1011 * Note: Alternatively, we could pass the relevant gs_next_vertex to
1012 * the emit threads via LDS. This is likely worse in the expected
1013 * typical case where each GS thread emits the full set of
1016 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1017 if (!info
->num_stream_output_components
[stream
])
1020 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
1022 ac_build_bgnloop(&ctx
->ac
, 5100);
1024 const LLVMValueRef vertexidx
=
1025 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
1026 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
1027 LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false), "");
1028 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
1029 ac_build_break(&ctx
->ac
);
1030 ac_build_endif(&ctx
->ac
, 5101);
1032 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
1033 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
1035 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
1036 LLVMBuildStore(builder
, i8_0
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, stream
));
1038 ac_build_endloop(&ctx
->ac
, 5100);
1041 /* Accumulate generated primitives counts across the entire threadgroup. */
1042 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1043 if (!info
->num_stream_output_components
[stream
])
1046 LLVMValueRef numprims
=
1047 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
1048 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, ctx
->ac
.wave_size
);
1050 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->i32_0
, "");
1051 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
1053 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
1054 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
1055 LLVMConstInt(ctx
->i32
, stream
, false)),
1056 numprims
, LLVMAtomicOrderingMonotonic
, false);
1058 ac_build_endif(&ctx
->ac
, 5105);
1061 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
1063 ac_build_s_barrier(&ctx
->ac
);
1065 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
1066 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
1069 if (sel
->so
.num_outputs
) {
1070 struct ngg_streamout nggso
= {};
1072 nggso
.num_vertices
= LLVMConstInt(ctx
->i32
, verts_per_prim
, false);
1074 LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tid
);
1075 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1076 if (!info
->num_stream_output_components
[stream
])
1079 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
), "");
1080 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1081 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1082 nggso
.prim_enable
[stream
] = LLVMBuildAnd(builder
, tmp
, tmp2
, "");
1085 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1086 tmp
= LLVMBuildSub(builder
, tid
,
1087 LLVMConstInt(ctx
->i32
, verts_per_prim
- i
- 1, false), "");
1088 tmp
= ngg_gs_vertex_ptr(ctx
, tmp
);
1089 nggso
.vertices
[i
] = ac_build_gep0(&ctx
->ac
, tmp
, ctx
->i32_0
);
1092 build_streamout(ctx
, &nggso
);
1095 /* Write shader query data. */
1096 if (ctx
->screen
->use_ngg_streamout
) {
1097 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
1098 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1099 ac_build_ifcc(&ctx
->ac
, tmp
, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1100 unsigned num_query_comps
= sel
->so
.num_outputs
? 8 : 4;
1101 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
,
1102 LLVMConstInt(ctx
->i32
, num_query_comps
, false), "");
1103 ac_build_ifcc(&ctx
->ac
, tmp
, 5110);
1105 LLVMValueRef offset
;
1107 if (sel
->so
.num_outputs
)
1108 tmp
= LLVMBuildAnd(builder
, tmp
, LLVMConstInt(ctx
->i32
, 3, false), "");
1109 offset
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 32, false), "");
1110 if (sel
->so
.num_outputs
) {
1111 tmp
= LLVMBuildLShr(builder
, tid
, LLVMConstInt(ctx
->i32
, 2, false), "");
1112 tmp
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 8, false), "");
1113 offset
= LLVMBuildAdd(builder
, offset
, tmp
, "");
1116 tmp
= LLVMBuildLoad(builder
, ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
), "");
1117 LLVMValueRef args
[] = {
1119 ngg_get_query_buf(ctx
),
1121 LLVMConstInt(ctx
->i32
, 16, false), /* soffset */
1122 ctx
->i32_0
, /* cachepolicy */
1124 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1125 ctx
->i32
, args
, 5, 0);
1127 ac_build_endif(&ctx
->ac
, 5110);
1128 ac_build_endif(&ctx
->ac
, 5109);
1131 /* Determine vertex liveness. */
1132 LLVMValueRef vertliveptr
= ac_build_alloca(&ctx
->ac
, ctx
->ac
.i1
, "vertexlive");
1134 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1135 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
1137 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1138 const LLVMValueRef primidx
=
1139 LLVMBuildAdd(builder
, tid
,
1140 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
1143 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
1144 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
1147 /* Load primitive liveness */
1148 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
1149 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1150 const LLVMValueRef primlive
=
1151 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
1153 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
1154 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
1155 LLVMBuildStore(builder
, tmp
, vertliveptr
);
1158 ac_build_endif(&ctx
->ac
, 5121 + i
);
1161 ac_build_endif(&ctx
->ac
, 5120);
1163 /* Inclusive scan addition across the current wave. */
1164 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
1165 struct ac_wg_scan vertlive_scan
= {};
1166 vertlive_scan
.op
= nir_op_iadd
;
1167 vertlive_scan
.enable_reduce
= true;
1168 vertlive_scan
.enable_exclusive
= true;
1169 vertlive_scan
.src
= vertlive
;
1170 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->i32_0
);
1171 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
1172 vertlive_scan
.numwaves
= get_tgsize(ctx
);
1173 vertlive_scan
.maxwaves
= 8;
1175 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
1177 /* Skip all exports (including index exports) when possible. At least on
1178 * early gfx10 revisions this is also to avoid hangs.
1180 LLVMValueRef have_exports
=
1181 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
1183 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
1185 /* Allocate export space. Send this message as early as possible, to
1186 * hide the latency of the SQ <-> SPI roundtrip.
1188 * Note: We could consider compacting primitives for export as well.
1189 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1190 * prim data per clock and skips null primitives at no additional
1191 * cost. So compacting primitives can only be beneficial when
1192 * there are 4 or more contiguous null primitives in the export
1193 * (in the common case of single-dword prim exports).
1195 ac_build_sendmsg_gs_alloc_req(&ctx
->ac
, get_wave_id_in_tg(ctx
),
1196 vertlive_scan
.result_reduce
, num_emit_threads
);
1198 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1199 * of the primitive liveness flags, relying on the fact that each
1200 * threadgroup can have at most 256 threads. */
1201 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
1203 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
1204 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
1205 LLVMBuildStore(builder
, tmp2
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1));
1207 ac_build_endif(&ctx
->ac
, 5130);
1209 ac_build_s_barrier(&ctx
->ac
);
1211 /* Export primitive data */
1212 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1213 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
1216 struct ac_ngg_prim prim
= {};
1217 prim
.num_vertices
= verts_per_prim
;
1219 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1220 flags
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1221 prim
.isnull
= LLVMBuildNot(builder
, LLVMBuildTrunc(builder
, flags
, ctx
->i1
, ""), "");
1223 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1224 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
1225 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
1226 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
1229 /* Geometry shaders output triangle strips, but NGG expects triangles.
1230 * We need to change the vertex order for odd triangles to get correct
1231 * front/back facing by swapping 2 vertex indices, but we also have to
1232 * keep the provoking vertex in the same place.
1234 * If the first vertex is provoking, swap index 1 and 2.
1235 * If the last vertex is provoking, swap index 0 and 1.
1237 if (verts_per_prim
== 3) {
1238 LLVMValueRef is_odd
= LLVMBuildLShr(builder
, flags
, ctx
->ac
.i8_1
, "");
1239 is_odd
= LLVMBuildTrunc(builder
, is_odd
, ctx
->i1
, "");
1240 LLVMValueRef flatshade_first
=
1241 LLVMBuildICmp(builder
, LLVMIntEQ
,
1242 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2),
1245 struct ac_ngg_prim in
= prim
;
1246 prim
.index
[0] = LLVMBuildSelect(builder
, flatshade_first
,
1248 LLVMBuildSelect(builder
, is_odd
,
1249 in
.index
[1], in
.index
[0], ""), "");
1250 prim
.index
[1] = LLVMBuildSelect(builder
, flatshade_first
,
1251 LLVMBuildSelect(builder
, is_odd
,
1252 in
.index
[2], in
.index
[1], ""),
1253 LLVMBuildSelect(builder
, is_odd
,
1254 in
.index
[0], in
.index
[1], ""), "");
1255 prim
.index
[2] = LLVMBuildSelect(builder
, flatshade_first
,
1256 LLVMBuildSelect(builder
, is_odd
,
1257 in
.index
[1], in
.index
[2], ""),
1261 ac_build_export_prim(&ctx
->ac
, &prim
);
1263 ac_build_endif(&ctx
->ac
, 5140);
1265 /* Export position and parameter data */
1266 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
1267 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
1269 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
1271 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1272 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1), "");
1273 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
1274 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
1276 unsigned out_idx
= 0;
1277 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
1278 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
1279 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
1281 for (unsigned j
= 0; j
< 4; j
++, out_idx
++) {
1282 tmp
= ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
);
1283 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1284 outputs
[i
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
1285 outputs
[i
].vertex_stream
[j
] =
1286 (info
->output_streams
[i
] >> (2 * j
)) & 3;
1290 si_llvm_export_vs(ctx
, outputs
, info
->num_outputs
);
1292 ac_build_endif(&ctx
->ac
, 5145);
1295 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1296 unsigned min_verts_per_prim
, bool use_adjacency
)
1298 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1301 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1305 * Determine subgroup information like maximum number of vertices and prims.
1307 * This happens before the shader is uploaded, since LDS relocations during
1308 * upload depend on the subgroup size.
1310 void gfx10_ngg_calculate_subgroup_info(struct si_shader
*shader
)
1312 const struct si_shader_selector
*gs_sel
= shader
->selector
;
1313 const struct si_shader_selector
*es_sel
=
1314 shader
->previous_stage_sel
? shader
->previous_stage_sel
: gs_sel
;
1315 const enum pipe_shader_type gs_type
= gs_sel
->type
;
1316 const unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
1317 const unsigned input_prim
= si_get_input_prim(gs_sel
);
1318 const bool use_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
1319 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
1320 const unsigned max_verts_per_prim
= u_vertices_per_prim(input_prim
);
1321 const unsigned min_verts_per_prim
=
1322 gs_type
== PIPE_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1324 /* All these are in dwords: */
1325 /* We can't allow using the whole LDS, because GS waves compete with
1326 * other shader stages for LDS space.
1328 * TODO: We should really take the shader's internal LDS use into
1329 * account. The linker will fail if the size is greater than
1332 const unsigned max_lds_size
= 8 * 1024 - 768;
1333 const unsigned target_lds_size
= max_lds_size
;
1334 unsigned esvert_lds_size
= 0;
1335 unsigned gsprim_lds_size
= 0;
1337 /* All these are per subgroup: */
1338 bool max_vert_out_per_gs_instance
= false;
1339 unsigned max_esverts_base
= 128;
1340 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1342 /* Hardware has the following non-natural restrictions on the value
1343 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1345 * - at most 252 for any line input primitive type
1346 * - at most 251 for any quad input primitive type
1347 * - at most 251 for triangle strips with adjacency (this happens to
1348 * be the natural limit for triangle *lists* with adjacency)
1350 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1352 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1353 unsigned max_out_verts_per_gsprim
=
1354 gs_sel
->gs_max_out_vertices
* gs_num_invocations
;
1356 if (max_out_verts_per_gsprim
<= 256) {
1357 if (max_out_verts_per_gsprim
) {
1358 max_gsprims_base
= MIN2(max_gsprims_base
,
1359 256 / max_out_verts_per_gsprim
);
1362 /* Use special multi-cycling mode in which each GS
1363 * instance gets its own subgroup. Does not work with
1365 max_vert_out_per_gs_instance
= true;
1366 max_gsprims_base
= 1;
1367 max_out_verts_per_gsprim
= gs_sel
->gs_max_out_vertices
;
1370 esvert_lds_size
= es_sel
->esgs_itemsize
/ 4;
1371 gsprim_lds_size
= (gs_sel
->gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1374 /* LDS size for passing data from ES to GS. */
1375 esvert_lds_size
= ngg_nogs_vertex_size(shader
);
1378 unsigned max_gsprims
= max_gsprims_base
;
1379 unsigned max_esverts
= max_esverts_base
;
1381 if (esvert_lds_size
)
1382 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1383 if (gsprim_lds_size
)
1384 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1386 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1387 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, use_adjacency
);
1388 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1390 if (esvert_lds_size
|| gsprim_lds_size
) {
1391 /* Now that we have a rough proportionality between esverts
1392 * and gsprims based on the primitive type, scale both of them
1393 * down simultaneously based on required LDS space.
1395 * We could be smarter about this if we knew how much vertex
1398 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1399 max_gsprims
* gsprim_lds_size
;
1400 if (lds_total
> target_lds_size
) {
1401 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1402 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1404 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1405 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1406 min_verts_per_prim
, use_adjacency
);
1407 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1411 /* Round up towards full wave sizes for better ALU utilization. */
1412 if (!max_vert_out_per_gs_instance
) {
1413 const unsigned wavesize
= gs_sel
->screen
->ge_wave_size
;
1414 unsigned orig_max_esverts
;
1415 unsigned orig_max_gsprims
;
1417 orig_max_esverts
= max_esverts
;
1418 orig_max_gsprims
= max_gsprims
;
1420 max_esverts
= align(max_esverts
, wavesize
);
1421 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1422 if (esvert_lds_size
)
1423 max_esverts
= MIN2(max_esverts
,
1424 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1426 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1428 max_gsprims
= align(max_gsprims
, wavesize
);
1429 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1430 if (gsprim_lds_size
)
1431 max_gsprims
= MIN2(max_gsprims
,
1432 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1434 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1435 min_verts_per_prim
, use_adjacency
);
1436 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1437 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1440 /* Hardware restriction: minimum value of max_esverts */
1441 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1443 unsigned max_out_vertices
=
1444 max_vert_out_per_gs_instance
? gs_sel
->gs_max_out_vertices
:
1445 gs_type
== PIPE_SHADER_GEOMETRY
?
1446 max_gsprims
* gs_num_invocations
* gs_sel
->gs_max_out_vertices
:
1448 assert(max_out_vertices
<= 256);
1450 unsigned prim_amp_factor
= 1;
1451 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1452 /* Number of output primitives per GS input primitive after
1454 prim_amp_factor
= gs_sel
->gs_max_out_vertices
;
1457 /* The GE only checks against the maximum number of ES verts after
1458 * allocating a full GS primitive. So we need to ensure that whenever
1459 * this check passes, there is enough space for a full primitive without
1462 shader
->ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1463 shader
->ngg
.max_gsprims
= max_gsprims
;
1464 shader
->ngg
.max_out_verts
= max_out_vertices
;
1465 shader
->ngg
.prim_amp_factor
= prim_amp_factor
;
1466 shader
->ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1468 shader
->gs_info
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1469 shader
->ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1471 assert(shader
->ngg
.hw_max_esverts
>= 24); /* HW limitation */