2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
32 static LLVMValueRef
get_wave_id_in_tg(struct si_shader_context
*ctx
)
34 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 24, 4);
37 static LLVMValueRef
get_tgsize(struct si_shader_context
*ctx
)
39 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 28, 4);
42 static LLVMValueRef
get_thread_id_in_tg(struct si_shader_context
*ctx
)
44 LLVMBuilderRef builder
= ctx
->ac
.builder
;
46 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
47 LLVMConstInt(ctx
->ac
.i32
, ctx
->ac
.wave_size
, false), "");
48 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
51 static LLVMValueRef
ngg_get_vtx_cnt(struct si_shader_context
*ctx
)
53 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 12, 9);
56 static LLVMValueRef
ngg_get_prim_cnt(struct si_shader_context
*ctx
)
58 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 22, 9);
61 static LLVMValueRef
ngg_get_ordered_id(struct si_shader_context
*ctx
)
63 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 0, 12);
66 static LLVMValueRef
ngg_get_query_buf(struct si_shader_context
*ctx
)
68 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
70 return ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
71 LLVMConstInt(ctx
->i32
, GFX10_GS_QUERY_BUF
, false));
74 static LLVMValueRef
ngg_get_initial_edgeflag(struct si_shader_context
*ctx
, unsigned index
)
76 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
78 tmp
= LLVMBuildLShr(ctx
->ac
.builder
,
79 ac_get_arg(&ctx
->ac
, ctx
->args
.gs_invocation_id
),
80 LLVMConstInt(ctx
->ac
.i32
, 8 + index
, false), "");
81 return LLVMBuildTrunc(ctx
->ac
.builder
, tmp
, ctx
->ac
.i1
, "");
87 * Return the number of vertices as a constant in \p num_vertices,
88 * and return a more precise value as LLVMValueRef from the function.
90 static LLVMValueRef
ngg_get_vertices_per_prim(struct si_shader_context
*ctx
,
91 unsigned *num_vertices
)
93 const struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
95 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
96 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
97 /* Blits always use axis-aligned rectangles with 3 vertices. */
99 return LLVMConstInt(ctx
->i32
, 3, 0);
101 /* We always build up all three indices for the prim export
102 * independent of the primitive type. The additional garbage
103 * data shouldn't hurt. This number doesn't matter with
108 /* Extract OUTPRIM field. */
109 LLVMValueRef num
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 2, 2);
110 return LLVMBuildAdd(ctx
->ac
.builder
, num
, ctx
->i32_1
, "");
113 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
115 if (info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
])
117 else if (info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
122 return LLVMConstInt(ctx
->i32
, *num_vertices
, false);
126 static void build_streamout_vertex(struct si_shader_context
*ctx
,
127 LLVMValueRef
*so_buffer
, LLVMValueRef
*wg_offset_dw
,
128 unsigned stream
, LLVMValueRef offset_vtx
,
129 LLVMValueRef vertexptr
)
131 struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
132 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
133 LLVMBuilderRef builder
= ctx
->ac
.builder
;
134 LLVMValueRef offset
[4] = {};
137 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
138 if (!wg_offset_dw
[buffer
])
141 tmp
= LLVMBuildMul(builder
, offset_vtx
,
142 LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false), "");
143 tmp
= LLVMBuildAdd(builder
, wg_offset_dw
[buffer
], tmp
, "");
144 offset
[buffer
] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->i32
, 2, false), "");
147 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
148 if (so
->output
[i
].stream
!= stream
)
151 unsigned reg
= so
->output
[i
].register_index
;
152 struct si_shader_output_values out
;
153 out
.semantic_name
= info
->output_semantic_name
[reg
];
154 out
.semantic_index
= info
->output_semantic_index
[reg
];
156 for (unsigned comp
= 0; comp
< 4; comp
++) {
157 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
158 LLVMConstInt(ctx
->i32
, 4 * reg
+ comp
, false));
159 out
.values
[comp
] = LLVMBuildLoad(builder
, tmp
, "");
160 out
.vertex_stream
[comp
] =
161 (info
->output_streams
[reg
] >> (2 * comp
)) & 3;
164 si_emit_streamout_output(ctx
, so_buffer
, offset
, &so
->output
[i
], &out
);
168 struct ngg_streamout
{
169 LLVMValueRef num_vertices
;
171 /* per-thread data */
172 LLVMValueRef prim_enable
[4]; /* i1 per stream */
173 LLVMValueRef vertices
[3]; /* [N x i32] addrspace(LDS)* */
176 LLVMValueRef emit
[4]; /* per-stream emitted primitives (only valid for used streams) */
180 * Build streamout logic.
184 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
186 * Clobbers gs_ngg_scratch[8:].
188 static void build_streamout(struct si_shader_context
*ctx
,
189 struct ngg_streamout
*nggso
)
191 struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
192 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
193 LLVMBuilderRef builder
= ctx
->ac
.builder
;
194 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
195 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
196 LLVMValueRef tmp
, tmp2
;
197 LLVMValueRef i32_2
= LLVMConstInt(ctx
->i32
, 2, false);
198 LLVMValueRef i32_4
= LLVMConstInt(ctx
->i32
, 4, false);
199 LLVMValueRef i32_8
= LLVMConstInt(ctx
->i32
, 8, false);
200 LLVMValueRef so_buffer
[4] = {};
201 unsigned max_num_vertices
= 1 + (nggso
->vertices
[1] ? 1 : 0) +
202 (nggso
->vertices
[2] ? 1 : 0);
203 LLVMValueRef prim_stride_dw
[4] = {};
204 LLVMValueRef prim_stride_dw_vgpr
= LLVMGetUndef(ctx
->i32
);
205 int stream_for_buffer
[4] = { -1, -1, -1, -1 };
206 unsigned bufmask_for_stream
[4] = {};
207 bool isgs
= ctx
->type
== PIPE_SHADER_GEOMETRY
;
208 unsigned scratch_emit_base
= isgs
? 4 : 0;
209 LLVMValueRef scratch_emit_basev
= isgs
? i32_4
: ctx
->i32_0
;
210 unsigned scratch_offset_base
= isgs
? 8 : 4;
211 LLVMValueRef scratch_offset_basev
= isgs
? i32_8
: i32_4
;
213 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", 256);
215 /* Determine the mapping of streamout buffers to vertex streams. */
216 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
217 unsigned buf
= so
->output
[i
].output_buffer
;
218 unsigned stream
= so
->output
[i
].stream
;
219 assert(stream_for_buffer
[buf
] < 0 || stream_for_buffer
[buf
] == stream
);
220 stream_for_buffer
[buf
] = stream
;
221 bufmask_for_stream
[stream
] |= 1 << buf
;
224 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
225 if (stream_for_buffer
[buffer
] == -1)
228 assert(so
->stride
[buffer
]);
230 tmp
= LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false);
231 prim_stride_dw
[buffer
] = LLVMBuildMul(builder
, tmp
, nggso
->num_vertices
, "");
232 prim_stride_dw_vgpr
= ac_build_writelane(
233 &ctx
->ac
, prim_stride_dw_vgpr
, prim_stride_dw
[buffer
],
234 LLVMConstInt(ctx
->i32
, buffer
, false));
236 so_buffer
[buffer
] = ac_build_load_to_sgpr(
238 LLVMConstInt(ctx
->i32
, SI_VS_STREAMOUT_BUF0
+ buffer
, false));
241 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->i32_0
, "");
242 ac_build_ifcc(&ctx
->ac
, tmp
, 5200);
244 LLVMTypeRef gdsptr
= LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
);
245 LLVMValueRef gdsbase
= LLVMBuildIntToPtr(builder
, ctx
->i32_0
, gdsptr
, "");
247 /* Advance the streamout offsets in GDS. */
248 LLVMValueRef offsets_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
249 LLVMValueRef generated_by_stream_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
251 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
252 ac_build_ifcc(&ctx
->ac
, tmp
, 5210);
255 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
);
256 tmp
= LLVMBuildLoad(builder
, tmp
, "");
258 tmp
= ac_build_writelane(&ctx
->ac
, ctx
->i32_0
,
259 ngg_get_prim_cnt(ctx
), ctx
->i32_0
);
261 LLVMBuildStore(builder
, tmp
, generated_by_stream_vgpr
);
264 int unused_stream
= -1;
265 for (unsigned stream
= 0; stream
< 4; ++stream
) {
266 if (!info
->num_stream_output_components
[stream
]) {
267 unused_stream
= stream
;
271 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
272 if (stream_for_buffer
[buffer
] >= 0) {
273 swizzle
[buffer
] = stream_for_buffer
[buffer
];
275 assert(unused_stream
>= 0);
276 swizzle
[buffer
] = unused_stream
;
280 tmp
= ac_build_quad_swizzle(&ctx
->ac
, tmp
,
281 swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
282 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
284 LLVMValueRef args
[] = {
285 LLVMBuildIntToPtr(builder
, ngg_get_ordered_id(ctx
), gdsptr
, ""),
287 ctx
->i32_0
, // ordering
289 ctx
->ac
.i1false
, // isVolatile
290 LLVMConstInt(ctx
->i32
, 4 << 24, false), // OA index
291 ctx
->ac
.i1true
, // wave release
292 ctx
->ac
.i1true
, // wave done
294 tmp
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ds.ordered.add",
295 ctx
->i32
, args
, ARRAY_SIZE(args
), 0);
297 /* Keep offsets in a VGPR for quick retrieval via readlane by
298 * the first wave for bounds checking, and also store in LDS
299 * for retrieval by all waves later. */
300 LLVMBuildStore(builder
, tmp
, offsets_vgpr
);
302 tmp2
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
303 scratch_offset_basev
, "");
304 tmp2
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp2
);
305 LLVMBuildStore(builder
, tmp
, tmp2
);
307 ac_build_endif(&ctx
->ac
, 5210);
309 /* Determine the max emit per buffer. This is done via the SALU, in part
310 * because LLVM can't generate divide-by-multiply if we try to do this
311 * via VALU with one lane per buffer.
313 LLVMValueRef max_emit
[4] = {};
314 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
315 if (stream_for_buffer
[buffer
] == -1)
318 LLVMValueRef bufsize_dw
=
319 LLVMBuildLShr(builder
,
320 LLVMBuildExtractElement(builder
, so_buffer
[buffer
], i32_2
, ""),
323 tmp
= LLVMBuildLoad(builder
, offsets_vgpr
, "");
324 LLVMValueRef offset_dw
=
325 ac_build_readlane(&ctx
->ac
, tmp
,
326 LLVMConstInt(ctx
->i32
, buffer
, false));
328 tmp
= LLVMBuildSub(builder
, bufsize_dw
, offset_dw
, "");
329 tmp
= LLVMBuildUDiv(builder
, tmp
, prim_stride_dw
[buffer
], "");
331 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, bufsize_dw
, offset_dw
, "");
332 max_emit
[buffer
] = LLVMBuildSelect(builder
, tmp2
, ctx
->i32_0
, tmp
, "");
335 /* Determine the number of emitted primitives per stream and fixup the
336 * GDS counter if necessary.
338 * This is complicated by the fact that a single stream can emit to
339 * multiple buffers (but luckily not vice versa).
341 LLVMValueRef emit_vgpr
= ctx
->i32_0
;
343 for (unsigned stream
= 0; stream
< 4; ++stream
) {
344 if (!info
->num_stream_output_components
[stream
])
347 tmp
= LLVMBuildLoad(builder
, generated_by_stream_vgpr
, "");
348 LLVMValueRef generated
=
349 ac_build_readlane(&ctx
->ac
, tmp
,
350 LLVMConstInt(ctx
->i32
, stream
, false));
352 LLVMValueRef emit
= generated
;
353 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
354 if (stream_for_buffer
[buffer
] == stream
)
355 emit
= ac_build_umin(&ctx
->ac
, emit
, max_emit
[buffer
]);
358 emit_vgpr
= ac_build_writelane(&ctx
->ac
, emit_vgpr
, emit
,
359 LLVMConstInt(ctx
->i32
, stream
, false));
361 /* Fixup the offset using a plain GDS atomic if we overflowed. */
362 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, emit
, generated
, "");
363 ac_build_ifcc(&ctx
->ac
, tmp
, 5221); /* scalar branch */
364 tmp
= LLVMBuildLShr(builder
,
365 LLVMConstInt(ctx
->i32
, bufmask_for_stream
[stream
], false),
366 ac_get_thread_id(&ctx
->ac
), "");
367 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
368 ac_build_ifcc(&ctx
->ac
, tmp
, 5222);
370 tmp
= LLVMBuildSub(builder
, generated
, emit
, "");
371 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
372 tmp2
= LLVMBuildGEP(builder
, gdsbase
, &tid
, 1, "");
373 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpSub
, tmp2
, tmp
,
374 LLVMAtomicOrderingMonotonic
, false);
376 ac_build_endif(&ctx
->ac
, 5222);
377 ac_build_endif(&ctx
->ac
, 5221);
380 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
381 ac_build_ifcc(&ctx
->ac
, tmp
, 5225);
383 tmp
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
384 scratch_emit_basev
, "");
385 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp
);
386 LLVMBuildStore(builder
, emit_vgpr
, tmp
);
388 ac_build_endif(&ctx
->ac
, 5225);
390 ac_build_endif(&ctx
->ac
, 5200);
392 /* Determine the workgroup-relative per-thread / primitive offset into
393 * the streamout buffers */
394 struct ac_wg_scan primemit_scan
[4] = {};
397 for (unsigned stream
= 0; stream
< 4; ++stream
) {
398 if (!info
->num_stream_output_components
[stream
])
401 primemit_scan
[stream
].enable_exclusive
= true;
402 primemit_scan
[stream
].op
= nir_op_iadd
;
403 primemit_scan
[stream
].src
= nggso
->prim_enable
[stream
];
404 primemit_scan
[stream
].scratch
=
405 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
406 LLVMConstInt(ctx
->i32
, 12 + 8 * stream
, false));
407 primemit_scan
[stream
].waveidx
= get_wave_id_in_tg(ctx
);
408 primemit_scan
[stream
].numwaves
= get_tgsize(ctx
);
409 primemit_scan
[stream
].maxwaves
= 8;
410 ac_build_wg_scan_top(&ctx
->ac
, &primemit_scan
[stream
]);
414 ac_build_s_barrier(&ctx
->ac
);
416 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
417 LLVMValueRef wgoffset_dw
[4] = {};
420 LLVMValueRef scratch_vgpr
;
422 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ac_get_thread_id(&ctx
->ac
));
423 scratch_vgpr
= LLVMBuildLoad(builder
, tmp
, "");
425 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
426 if (stream_for_buffer
[buffer
] >= 0) {
427 wgoffset_dw
[buffer
] = ac_build_readlane(
428 &ctx
->ac
, scratch_vgpr
,
429 LLVMConstInt(ctx
->i32
, scratch_offset_base
+ buffer
, false));
433 for (unsigned stream
= 0; stream
< 4; ++stream
) {
434 if (info
->num_stream_output_components
[stream
]) {
435 nggso
->emit
[stream
] = ac_build_readlane(
436 &ctx
->ac
, scratch_vgpr
,
437 LLVMConstInt(ctx
->i32
, scratch_emit_base
+ stream
, false));
442 /* Write out primitive data */
443 for (unsigned stream
= 0; stream
< 4; ++stream
) {
444 if (!info
->num_stream_output_components
[stream
])
448 ac_build_wg_scan_bottom(&ctx
->ac
, &primemit_scan
[stream
]);
450 primemit_scan
[stream
].result_exclusive
= tid
;
453 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
454 primemit_scan
[stream
].result_exclusive
,
455 nggso
->emit
[stream
], "");
456 tmp
= LLVMBuildAnd(builder
, tmp
, nggso
->prim_enable
[stream
], "");
457 ac_build_ifcc(&ctx
->ac
, tmp
, 5240);
459 LLVMValueRef offset_vtx
=
460 LLVMBuildMul(builder
, primemit_scan
[stream
].result_exclusive
,
461 nggso
->num_vertices
, "");
463 for (unsigned i
= 0; i
< max_num_vertices
; ++i
) {
464 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
465 LLVMConstInt(ctx
->i32
, i
, false),
466 nggso
->num_vertices
, "");
467 ac_build_ifcc(&ctx
->ac
, tmp
, 5241);
468 build_streamout_vertex(ctx
, so_buffer
, wgoffset_dw
,
469 stream
, offset_vtx
, nggso
->vertices
[i
]);
470 ac_build_endif(&ctx
->ac
, 5241);
471 offset_vtx
= LLVMBuildAdd(builder
, offset_vtx
, ctx
->i32_1
, "");
474 ac_build_endif(&ctx
->ac
, 5240);
478 static unsigned ngg_nogs_vertex_size(struct si_shader
*shader
)
480 unsigned lds_vertex_size
= 0;
482 /* The edgeflag is always stored in the last element that's also
483 * used for padding to reduce LDS bank conflicts. */
484 if (shader
->selector
->so
.num_outputs
)
485 lds_vertex_size
= 4 * shader
->selector
->info
.num_outputs
+ 1;
486 if (shader
->selector
->info
.writes_edgeflag
)
487 lds_vertex_size
= MAX2(lds_vertex_size
, 1);
489 return lds_vertex_size
;
493 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
494 * for the vertex outputs.
496 static LLVMValueRef
ngg_nogs_vertex_ptr(struct si_shader_context
*ctx
,
499 /* The extra dword is used to avoid LDS bank conflicts. */
500 unsigned vertex_size
= ngg_nogs_vertex_size(ctx
->shader
);
501 LLVMTypeRef ai32
= LLVMArrayType(ctx
->i32
, vertex_size
);
502 LLVMTypeRef pai32
= LLVMPointerType(ai32
, AC_ADDR_SPACE_LDS
);
503 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->esgs_ring
, pai32
, "");
504 return LLVMBuildGEP(ctx
->ac
.builder
, tmp
, &vtxid
, 1, "");
508 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
510 void gfx10_emit_ngg_epilogue(struct ac_shader_abi
*abi
,
511 unsigned max_outputs
,
514 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
515 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
516 struct si_shader_info
*info
= &sel
->info
;
517 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
518 LLVMBuilderRef builder
= ctx
->ac
.builder
;
519 LLVMValueRef tmp
, tmp2
;
521 assert(!ctx
->shader
->is_gs_copy_shader
);
522 assert(info
->num_outputs
<= max_outputs
);
524 LLVMValueRef vertex_ptr
= NULL
;
526 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
527 vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
529 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
530 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
531 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
533 for (unsigned j
= 0; j
< 4; j
++) {
534 outputs
[i
].vertex_stream
[j
] =
535 (info
->output_streams
[i
] >> (2 * j
)) & 3;
537 /* TODO: we may store more outputs than streamout needs,
538 * but streamout performance isn't that important.
540 if (sel
->so
.num_outputs
) {
541 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
,
542 LLVMConstInt(ctx
->i32
, 4 * i
+ j
, false));
543 tmp2
= LLVMBuildLoad(builder
, addrs
[4 * i
+ j
], "");
544 tmp2
= ac_to_integer(&ctx
->ac
, tmp2
);
545 LLVMBuildStore(builder
, tmp2
, tmp
);
549 /* Store the edgeflag at the end (if streamout is enabled) */
550 if (info
->output_semantic_name
[i
] == TGSI_SEMANTIC_EDGEFLAG
&&
551 sel
->info
.writes_edgeflag
) {
552 LLVMValueRef edgeflag
= LLVMBuildLoad(builder
, addrs
[4 * i
], "");
553 /* The output is a float, but the hw expects a 1-bit integer. */
554 edgeflag
= LLVMBuildFPToUI(ctx
->ac
.builder
, edgeflag
, ctx
->i32
, "");
555 edgeflag
= ac_build_umin(&ctx
->ac
, edgeflag
, ctx
->i32_1
);
557 tmp
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
558 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
, tmp
);
559 LLVMBuildStore(builder
, edgeflag
, tmp
);
563 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
565 LLVMValueRef is_gs_thread
= si_is_gs_thread(ctx
);
566 LLVMValueRef is_es_thread
= si_is_es_thread(ctx
);
567 LLVMValueRef vtxindex
[] = {
568 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 0, 16),
569 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 16, 16),
570 si_unpack_param(ctx
, ctx
->gs_vtx23_offset
, 0, 16),
573 /* Determine the number of vertices per primitive. */
574 unsigned num_vertices
;
575 LLVMValueRef num_vertices_val
= ngg_get_vertices_per_prim(ctx
, &num_vertices
);
578 LLVMValueRef emitted_prims
= NULL
;
580 if (sel
->so
.num_outputs
) {
581 struct ngg_streamout nggso
= {};
583 nggso
.num_vertices
= num_vertices_val
;
584 nggso
.prim_enable
[0] = is_gs_thread
;
586 for (unsigned i
= 0; i
< num_vertices
; ++i
)
587 nggso
.vertices
[i
] = ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
589 build_streamout(ctx
, &nggso
);
590 emitted_prims
= nggso
.emit
[0];
593 LLVMValueRef user_edgeflags
[3] = {};
595 if (sel
->info
.writes_edgeflag
) {
596 /* Streamout already inserted the barrier, so don't insert it again. */
597 if (!sel
->so
.num_outputs
)
598 ac_build_s_barrier(&ctx
->ac
);
600 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
601 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
602 for (unsigned i
= 0; i
< num_vertices
; i
++) {
603 tmp
= ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
604 tmp2
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
605 tmp
= ac_build_gep0(&ctx
->ac
, tmp
, tmp2
);
606 tmp
= LLVMBuildLoad(builder
, tmp
, "");
607 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
609 user_edgeflags
[i
] = ac_build_alloca_undef(&ctx
->ac
, ctx
->i1
, "");
610 LLVMBuildStore(builder
, tmp
, user_edgeflags
[i
]);
612 ac_build_endif(&ctx
->ac
, 5400);
615 /* Copy Primitive IDs from GS threads to the LDS address corresponding
616 * to the ES thread of the provoking vertex.
618 if (ctx
->type
== PIPE_SHADER_VERTEX
&&
619 ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
620 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
621 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
622 ac_build_s_barrier(&ctx
->ac
);
624 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
625 /* Extract the PROVOKING_VTX_INDEX field. */
626 LLVMValueRef provoking_vtx_in_prim
=
627 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2);
629 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
630 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
631 LLVMValueRef provoking_vtx_index
=
632 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
634 LLVMBuildStore(builder
, ac_get_arg(&ctx
->ac
, ctx
->args
.gs_prim_id
),
635 ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
, provoking_vtx_index
));
636 ac_build_endif(&ctx
->ac
, 5400);
639 ac_build_sendmsg_gs_alloc_req(&ctx
->ac
, get_wave_id_in_tg(ctx
),
640 ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
642 /* Update query buffer */
643 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
644 if (ctx
->screen
->use_ngg_streamout
&&
645 !info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
646 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
647 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
648 ac_build_ifcc(&ctx
->ac
, tmp
, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
649 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
650 ac_build_ifcc(&ctx
->ac
, tmp
, 5030);
651 tmp
= LLVMBuildICmp(builder
, LLVMIntULE
, ac_get_thread_id(&ctx
->ac
),
652 sel
->so
.num_outputs
? ctx
->ac
.i32_1
: ctx
->ac
.i32_0
, "");
653 ac_build_ifcc(&ctx
->ac
, tmp
, 5031);
655 LLVMValueRef args
[] = {
656 ngg_get_prim_cnt(ctx
),
657 ngg_get_query_buf(ctx
),
658 LLVMConstInt(ctx
->i32
, 16, false), /* offset of stream[0].generated_primitives */
659 ctx
->i32_0
, /* soffset */
660 ctx
->i32_0
, /* cachepolicy */
663 if (sel
->so
.num_outputs
) {
664 args
[0] = ac_build_writelane(&ctx
->ac
, args
[0], emitted_prims
, ctx
->i32_1
);
665 args
[2] = ac_build_writelane(&ctx
->ac
, args
[2],
666 LLVMConstInt(ctx
->i32
, 24, false), ctx
->i32_1
);
669 /* TODO: should this be 64-bit atomics? */
670 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
671 ctx
->i32
, args
, 5, 0);
673 ac_build_endif(&ctx
->ac
, 5031);
674 ac_build_endif(&ctx
->ac
, 5030);
675 ac_build_endif(&ctx
->ac
, 5029);
678 /* Build the primitive export.
680 * For the first version, we will always build up all three indices
681 * independent of the primitive type. The additional garbage data
684 * TODO: culling depends on the primitive type, so can have some
687 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 6001);
689 struct ac_ngg_prim prim
= {};
691 if (gfx10_is_ngg_passthrough(ctx
->shader
)) {
692 prim
.passthrough
= ac_get_arg(&ctx
->ac
, ctx
->gs_vtx01_offset
);
694 prim
.num_vertices
= num_vertices
;
695 prim
.isnull
= ctx
->ac
.i1false
;
696 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
698 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
699 prim
.edgeflag
[i
] = ngg_get_initial_edgeflag(ctx
, i
);
701 if (sel
->info
.writes_edgeflag
) {
702 tmp2
= LLVMBuildLoad(builder
, user_edgeflags
[i
], "");
703 prim
.edgeflag
[i
] = LLVMBuildAnd(builder
, prim
.edgeflag
[i
],
709 ac_build_export_prim(&ctx
->ac
, &prim
);
711 ac_build_endif(&ctx
->ac
, 6001);
713 /* Export per-vertex data (positions and parameters). */
714 ac_build_ifcc(&ctx
->ac
, is_es_thread
, 6002);
718 /* Unconditionally (re-)load the values for proper SSA form. */
719 for (i
= 0; i
< info
->num_outputs
; i
++) {
720 for (unsigned j
= 0; j
< 4; j
++) {
721 outputs
[i
].values
[j
] =
722 LLVMBuildLoad(builder
,
728 if (ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
729 outputs
[i
].semantic_name
= TGSI_SEMANTIC_PRIMID
;
730 outputs
[i
].semantic_index
= 0;
732 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
733 /* Wait for GS stores to finish. */
734 ac_build_s_barrier(&ctx
->ac
);
736 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
,
737 get_thread_id_in_tg(ctx
));
738 outputs
[i
].values
[0] = LLVMBuildLoad(builder
, tmp
, "");
740 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
741 outputs
[i
].values
[0] = si_get_primitive_id(ctx
, 0);
744 outputs
[i
].values
[0] = ac_to_float(&ctx
->ac
, outputs
[i
].values
[0]);
745 for (unsigned j
= 1; j
< 4; j
++)
746 outputs
[i
].values
[j
] = LLVMGetUndef(ctx
->f32
);
748 memset(outputs
[i
].vertex_stream
, 0,
749 sizeof(outputs
[i
].vertex_stream
));
753 si_llvm_export_vs(ctx
, outputs
, i
);
755 ac_build_endif(&ctx
->ac
, 6002);
759 ngg_gs_get_vertex_storage(struct si_shader_context
*ctx
)
761 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
762 const struct si_shader_info
*info
= &sel
->info
;
764 LLVMTypeRef elements
[2] = {
765 LLVMArrayType(ctx
->ac
.i32
, 4 * info
->num_outputs
),
766 LLVMArrayType(ctx
->ac
.i8
, 4),
768 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
769 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
770 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
774 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
775 * is in emit order; that is:
776 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
777 * - during vertex emit, i.e. while the API GS shader invocation is running,
778 * N = threadidx * gs_max_out_vertices + emitidx
780 * Goals of the LDS memory layout:
781 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
782 * in uniform control flow
783 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
785 * 3. Agnostic to the number of waves (since we don't know it before compiling)
786 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
787 * 5. Avoid wasting memory.
789 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
790 * layout, elimination of bank conflicts requires that each vertex occupy an
791 * odd number of dwords. We use the additional dword to store the output stream
792 * index as well as a flag to indicate whether this vertex ends a primitive
795 * Swizzling is required to satisfy points 1 and 2 simultaneously.
797 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
798 * Indices are swizzled in groups of 32, which ensures point 1 without
799 * disturbing point 2.
801 * \return an LDS pointer to type {[N x i32], [4 x i8]}
804 ngg_gs_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexidx
)
806 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
807 LLVMBuilderRef builder
= ctx
->ac
.builder
;
808 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
810 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
811 unsigned write_stride_2exp
= ffs(sel
->gs_max_out_vertices
) - 1;
812 if (write_stride_2exp
) {
814 LLVMBuildLShr(builder
, vertexidx
,
815 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
816 LLVMValueRef swizzle
=
817 LLVMBuildAnd(builder
, row
,
818 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
820 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
823 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
827 ngg_gs_emit_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef gsthread
,
828 LLVMValueRef emitidx
)
830 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
831 LLVMBuilderRef builder
= ctx
->ac
.builder
;
834 tmp
= LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false);
835 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
836 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
837 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
841 ngg_gs_get_emit_output_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
844 LLVMValueRef gep_idx
[3] = {
845 ctx
->ac
.i32_0
, /* implied C-style array */
846 ctx
->ac
.i32_0
, /* first struct entry */
847 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
849 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
853 ngg_gs_get_emit_primflag_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
856 LLVMValueRef gep_idx
[3] = {
857 ctx
->ac
.i32_0
, /* implied C-style array */
858 ctx
->ac
.i32_1
, /* second struct entry */
859 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
861 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
864 void gfx10_ngg_gs_emit_vertex(struct si_shader_context
*ctx
,
868 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
869 const struct si_shader_info
*info
= &sel
->info
;
870 LLVMBuilderRef builder
= ctx
->ac
.builder
;
872 const LLVMValueRef vertexidx
=
873 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
875 /* If this thread has already emitted the declared maximum number of
876 * vertices, skip the write: excessive vertex emissions are not
877 * supposed to have any effect.
879 const LLVMValueRef can_emit
=
880 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
881 LLVMConstInt(ctx
->i32
, sel
->gs_max_out_vertices
, false), "");
883 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
884 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
885 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
887 ac_build_ifcc(&ctx
->ac
, can_emit
, 9001);
889 const LLVMValueRef vertexptr
=
890 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
891 unsigned out_idx
= 0;
892 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
893 for (unsigned chan
= 0; chan
< 4; chan
++, out_idx
++) {
894 if (!(info
->output_usagemask
[i
] & (1 << chan
)) ||
895 ((info
->output_streams
[i
] >> (2 * chan
)) & 3) != stream
)
898 LLVMValueRef out_val
= LLVMBuildLoad(builder
, addrs
[4 * i
+ chan
], "");
899 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
900 LLVMBuildStore(builder
, out_val
,
901 ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
));
904 assert(out_idx
* 4 == sel
->gsvs_vertex_size
);
906 /* Determine and store whether this vertex completed a primitive. */
907 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
909 tmp
= LLVMConstInt(ctx
->ac
.i32
, u_vertices_per_prim(sel
->gs_output_prim
) - 1, false);
910 const LLVMValueRef iscompleteprim
=
911 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
913 /* Since the geometry shader emits triangle strips, we need to
914 * track which primitive is odd and swap vertex indices to get
915 * the correct vertex order.
917 LLVMValueRef is_odd
= ctx
->i1false
;
918 if (stream
== 0 && u_vertices_per_prim(sel
->gs_output_prim
) == 3) {
919 tmp
= LLVMBuildAnd(builder
, curverts
, ctx
->i32_1
, "");
920 is_odd
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
, ctx
->i32_1
, "");
923 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
924 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
926 /* The per-vertex primitive flag encoding:
927 * bit 0: whether this vertex finishes a primitive
928 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
930 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
931 tmp
= LLVMBuildOr(builder
, tmp
,
932 LLVMBuildShl(builder
,
933 LLVMBuildZExt(builder
, is_odd
, ctx
->ac
.i8
, ""),
934 ctx
->ac
.i8_1
, ""), "");
935 LLVMBuildStore(builder
, tmp
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
));
937 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
938 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
939 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
941 ac_build_endif(&ctx
->ac
, 9001);
944 void gfx10_ngg_gs_emit_prologue(struct si_shader_context
*ctx
)
946 /* Zero out the part of LDS scratch that is used to accumulate the
947 * per-stream generated primitive count.
949 LLVMBuilderRef builder
= ctx
->ac
.builder
;
950 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
951 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
954 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
955 ac_build_ifcc(&ctx
->ac
, tmp
, 5090);
957 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
958 LLVMBuildStore(builder
, ctx
->i32_0
, ptr
);
960 ac_build_endif(&ctx
->ac
, 5090);
962 ac_build_s_barrier(&ctx
->ac
);
965 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context
*ctx
)
967 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
968 const struct si_shader_info
*info
= &sel
->info
;
969 const unsigned verts_per_prim
= u_vertices_per_prim(sel
->gs_output_prim
);
970 LLVMBuilderRef builder
= ctx
->ac
.builder
;
971 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
972 LLVMValueRef tmp
, tmp2
;
974 /* Zero out remaining (non-emitted) primitive flags.
976 * Note: Alternatively, we could pass the relevant gs_next_vertex to
977 * the emit threads via LDS. This is likely worse in the expected
978 * typical case where each GS thread emits the full set of
981 for (unsigned stream
= 0; stream
< 4; ++stream
) {
982 if (!info
->num_stream_output_components
[stream
])
985 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
987 ac_build_bgnloop(&ctx
->ac
, 5100);
989 const LLVMValueRef vertexidx
=
990 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
991 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
992 LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false), "");
993 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
994 ac_build_break(&ctx
->ac
);
995 ac_build_endif(&ctx
->ac
, 5101);
997 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
998 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
1000 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
1001 LLVMBuildStore(builder
, i8_0
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, stream
));
1003 ac_build_endloop(&ctx
->ac
, 5100);
1006 /* Accumulate generated primitives counts across the entire threadgroup. */
1007 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1008 if (!info
->num_stream_output_components
[stream
])
1011 LLVMValueRef numprims
=
1012 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
1013 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, ctx
->ac
.wave_size
);
1015 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->i32_0
, "");
1016 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
1018 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
1019 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
1020 LLVMConstInt(ctx
->i32
, stream
, false)),
1021 numprims
, LLVMAtomicOrderingMonotonic
, false);
1023 ac_build_endif(&ctx
->ac
, 5105);
1026 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
1028 ac_build_s_barrier(&ctx
->ac
);
1030 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
1031 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
1034 if (sel
->so
.num_outputs
) {
1035 struct ngg_streamout nggso
= {};
1037 nggso
.num_vertices
= LLVMConstInt(ctx
->i32
, verts_per_prim
, false);
1039 LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tid
);
1040 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1041 if (!info
->num_stream_output_components
[stream
])
1044 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
), "");
1045 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1046 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1047 nggso
.prim_enable
[stream
] = LLVMBuildAnd(builder
, tmp
, tmp2
, "");
1050 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1051 tmp
= LLVMBuildSub(builder
, tid
,
1052 LLVMConstInt(ctx
->i32
, verts_per_prim
- i
- 1, false), "");
1053 tmp
= ngg_gs_vertex_ptr(ctx
, tmp
);
1054 nggso
.vertices
[i
] = ac_build_gep0(&ctx
->ac
, tmp
, ctx
->i32_0
);
1057 build_streamout(ctx
, &nggso
);
1060 /* Write shader query data. */
1061 if (ctx
->screen
->use_ngg_streamout
) {
1062 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
1063 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1064 ac_build_ifcc(&ctx
->ac
, tmp
, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1065 unsigned num_query_comps
= sel
->so
.num_outputs
? 8 : 4;
1066 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
,
1067 LLVMConstInt(ctx
->i32
, num_query_comps
, false), "");
1068 ac_build_ifcc(&ctx
->ac
, tmp
, 5110);
1070 LLVMValueRef offset
;
1072 if (sel
->so
.num_outputs
)
1073 tmp
= LLVMBuildAnd(builder
, tmp
, LLVMConstInt(ctx
->i32
, 3, false), "");
1074 offset
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 32, false), "");
1075 if (sel
->so
.num_outputs
) {
1076 tmp
= LLVMBuildLShr(builder
, tid
, LLVMConstInt(ctx
->i32
, 2, false), "");
1077 tmp
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 8, false), "");
1078 offset
= LLVMBuildAdd(builder
, offset
, tmp
, "");
1081 tmp
= LLVMBuildLoad(builder
, ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
), "");
1082 LLVMValueRef args
[] = {
1084 ngg_get_query_buf(ctx
),
1086 LLVMConstInt(ctx
->i32
, 16, false), /* soffset */
1087 ctx
->i32_0
, /* cachepolicy */
1089 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1090 ctx
->i32
, args
, 5, 0);
1092 ac_build_endif(&ctx
->ac
, 5110);
1093 ac_build_endif(&ctx
->ac
, 5109);
1098 /* Determine vertex liveness. */
1099 LLVMValueRef vertliveptr
= ac_build_alloca(&ctx
->ac
, ctx
->ac
.i1
, "vertexlive");
1101 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1102 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
1104 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1105 const LLVMValueRef primidx
=
1106 LLVMBuildAdd(builder
, tid
,
1107 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
1110 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
1111 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
1114 /* Load primitive liveness */
1115 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
1116 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1117 const LLVMValueRef primlive
=
1118 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
1120 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
1121 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
1122 LLVMBuildStore(builder
, tmp
, vertliveptr
);
1125 ac_build_endif(&ctx
->ac
, 5121 + i
);
1128 ac_build_endif(&ctx
->ac
, 5120);
1130 /* Inclusive scan addition across the current wave. */
1131 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
1132 struct ac_wg_scan vertlive_scan
= {};
1133 vertlive_scan
.op
= nir_op_iadd
;
1134 vertlive_scan
.enable_reduce
= true;
1135 vertlive_scan
.enable_exclusive
= true;
1136 vertlive_scan
.src
= vertlive
;
1137 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->i32_0
);
1138 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
1139 vertlive_scan
.numwaves
= get_tgsize(ctx
);
1140 vertlive_scan
.maxwaves
= 8;
1142 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
1144 /* Skip all exports (including index exports) when possible. At least on
1145 * early gfx10 revisions this is also to avoid hangs.
1147 LLVMValueRef have_exports
=
1148 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
1150 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
1152 /* Allocate export space. Send this message as early as possible, to
1153 * hide the latency of the SQ <-> SPI roundtrip.
1155 * Note: We could consider compacting primitives for export as well.
1156 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1157 * prim data per clock and skips null primitives at no additional
1158 * cost. So compacting primitives can only be beneficial when
1159 * there are 4 or more contiguous null primitives in the export
1160 * (in the common case of single-dword prim exports).
1162 ac_build_sendmsg_gs_alloc_req(&ctx
->ac
, get_wave_id_in_tg(ctx
),
1163 vertlive_scan
.result_reduce
, num_emit_threads
);
1165 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1166 * of the primitive liveness flags, relying on the fact that each
1167 * threadgroup can have at most 256 threads. */
1168 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
1170 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
1171 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
1172 LLVMBuildStore(builder
, tmp2
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1));
1174 ac_build_endif(&ctx
->ac
, 5130);
1176 ac_build_s_barrier(&ctx
->ac
);
1178 /* Export primitive data */
1179 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1180 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
1183 struct ac_ngg_prim prim
= {};
1184 prim
.num_vertices
= verts_per_prim
;
1186 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1187 flags
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1188 prim
.isnull
= LLVMBuildNot(builder
, LLVMBuildTrunc(builder
, flags
, ctx
->i1
, ""), "");
1190 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1191 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
1192 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
1193 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
1196 /* Geometry shaders output triangle strips, but NGG expects triangles.
1197 * We need to change the vertex order for odd triangles to get correct
1198 * front/back facing by swapping 2 vertex indices, but we also have to
1199 * keep the provoking vertex in the same place.
1201 * If the first vertex is provoking, swap index 1 and 2.
1202 * If the last vertex is provoking, swap index 0 and 1.
1204 if (verts_per_prim
== 3) {
1205 LLVMValueRef is_odd
= LLVMBuildLShr(builder
, flags
, ctx
->ac
.i8_1
, "");
1206 is_odd
= LLVMBuildTrunc(builder
, is_odd
, ctx
->i1
, "");
1207 LLVMValueRef flatshade_first
=
1208 LLVMBuildICmp(builder
, LLVMIntEQ
,
1209 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2),
1212 struct ac_ngg_prim in
= prim
;
1213 prim
.index
[0] = LLVMBuildSelect(builder
, flatshade_first
,
1215 LLVMBuildSelect(builder
, is_odd
,
1216 in
.index
[1], in
.index
[0], ""), "");
1217 prim
.index
[1] = LLVMBuildSelect(builder
, flatshade_first
,
1218 LLVMBuildSelect(builder
, is_odd
,
1219 in
.index
[2], in
.index
[1], ""),
1220 LLVMBuildSelect(builder
, is_odd
,
1221 in
.index
[0], in
.index
[1], ""), "");
1222 prim
.index
[2] = LLVMBuildSelect(builder
, flatshade_first
,
1223 LLVMBuildSelect(builder
, is_odd
,
1224 in
.index
[1], in
.index
[2], ""),
1228 ac_build_export_prim(&ctx
->ac
, &prim
);
1230 ac_build_endif(&ctx
->ac
, 5140);
1232 /* Export position and parameter data */
1233 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
1234 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
1236 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
1238 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1239 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1), "");
1240 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
1241 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
1243 unsigned out_idx
= 0;
1244 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
1245 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
1246 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
1248 for (unsigned j
= 0; j
< 4; j
++, out_idx
++) {
1249 tmp
= ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
);
1250 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1251 outputs
[i
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
1252 outputs
[i
].vertex_stream
[j
] =
1253 (info
->output_streams
[i
] >> (2 * j
)) & 3;
1257 si_llvm_export_vs(ctx
, outputs
, info
->num_outputs
);
1259 ac_build_endif(&ctx
->ac
, 5145);
1262 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1263 unsigned min_verts_per_prim
, bool use_adjacency
)
1265 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1268 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1272 * Determine subgroup information like maximum number of vertices and prims.
1274 * This happens before the shader is uploaded, since LDS relocations during
1275 * upload depend on the subgroup size.
1277 void gfx10_ngg_calculate_subgroup_info(struct si_shader
*shader
)
1279 const struct si_shader_selector
*gs_sel
= shader
->selector
;
1280 const struct si_shader_selector
*es_sel
=
1281 shader
->previous_stage_sel
? shader
->previous_stage_sel
: gs_sel
;
1282 const enum pipe_shader_type gs_type
= gs_sel
->type
;
1283 const unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
1284 const unsigned input_prim
= si_get_input_prim(gs_sel
);
1285 const bool use_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
1286 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
1287 const unsigned max_verts_per_prim
= u_vertices_per_prim(input_prim
);
1288 const unsigned min_verts_per_prim
=
1289 gs_type
== PIPE_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1291 /* All these are in dwords: */
1292 /* We can't allow using the whole LDS, because GS waves compete with
1293 * other shader stages for LDS space.
1295 * TODO: We should really take the shader's internal LDS use into
1296 * account. The linker will fail if the size is greater than
1299 const unsigned max_lds_size
= 8 * 1024 - 768;
1300 const unsigned target_lds_size
= max_lds_size
;
1301 unsigned esvert_lds_size
= 0;
1302 unsigned gsprim_lds_size
= 0;
1304 /* All these are per subgroup: */
1305 bool max_vert_out_per_gs_instance
= false;
1306 unsigned max_esverts_base
= 128;
1307 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1309 /* Hardware has the following non-natural restrictions on the value
1310 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1312 * - at most 252 for any line input primitive type
1313 * - at most 251 for any quad input primitive type
1314 * - at most 251 for triangle strips with adjacency (this happens to
1315 * be the natural limit for triangle *lists* with adjacency)
1317 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1319 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1320 unsigned max_out_verts_per_gsprim
=
1321 gs_sel
->gs_max_out_vertices
* gs_num_invocations
;
1323 if (max_out_verts_per_gsprim
<= 256) {
1324 if (max_out_verts_per_gsprim
) {
1325 max_gsprims_base
= MIN2(max_gsprims_base
,
1326 256 / max_out_verts_per_gsprim
);
1329 /* Use special multi-cycling mode in which each GS
1330 * instance gets its own subgroup. Does not work with
1332 max_vert_out_per_gs_instance
= true;
1333 max_gsprims_base
= 1;
1334 max_out_verts_per_gsprim
= gs_sel
->gs_max_out_vertices
;
1337 esvert_lds_size
= es_sel
->esgs_itemsize
/ 4;
1338 gsprim_lds_size
= (gs_sel
->gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1341 /* LDS size for passing data from ES to GS. */
1342 esvert_lds_size
= ngg_nogs_vertex_size(shader
);
1344 /* LDS size for passing data from GS to ES.
1345 * GS stores Primitive IDs into LDS at the address corresponding
1346 * to the ES thread of the provoking vertex. All ES threads
1347 * load and export PrimitiveID for their thread.
1349 if (gs_sel
->type
== PIPE_SHADER_VERTEX
&&
1350 shader
->key
.mono
.u
.vs_export_prim_id
)
1351 esvert_lds_size
= MAX2(esvert_lds_size
, 1);
1354 unsigned max_gsprims
= max_gsprims_base
;
1355 unsigned max_esverts
= max_esverts_base
;
1357 if (esvert_lds_size
)
1358 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1359 if (gsprim_lds_size
)
1360 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1362 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1363 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, use_adjacency
);
1364 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1366 if (esvert_lds_size
|| gsprim_lds_size
) {
1367 /* Now that we have a rough proportionality between esverts
1368 * and gsprims based on the primitive type, scale both of them
1369 * down simultaneously based on required LDS space.
1371 * We could be smarter about this if we knew how much vertex
1374 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1375 max_gsprims
* gsprim_lds_size
;
1376 if (lds_total
> target_lds_size
) {
1377 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1378 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1380 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1381 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1382 min_verts_per_prim
, use_adjacency
);
1383 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1387 /* Round up towards full wave sizes for better ALU utilization. */
1388 if (!max_vert_out_per_gs_instance
) {
1389 const unsigned wavesize
= gs_sel
->screen
->ge_wave_size
;
1390 unsigned orig_max_esverts
;
1391 unsigned orig_max_gsprims
;
1393 orig_max_esverts
= max_esverts
;
1394 orig_max_gsprims
= max_gsprims
;
1396 max_esverts
= align(max_esverts
, wavesize
);
1397 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1398 if (esvert_lds_size
)
1399 max_esverts
= MIN2(max_esverts
,
1400 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1402 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1404 max_gsprims
= align(max_gsprims
, wavesize
);
1405 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1406 if (gsprim_lds_size
)
1407 max_gsprims
= MIN2(max_gsprims
,
1408 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1410 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1411 min_verts_per_prim
, use_adjacency
);
1412 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1413 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1416 /* Hardware restriction: minimum value of max_esverts */
1417 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1419 unsigned max_out_vertices
=
1420 max_vert_out_per_gs_instance
? gs_sel
->gs_max_out_vertices
:
1421 gs_type
== PIPE_SHADER_GEOMETRY
?
1422 max_gsprims
* gs_num_invocations
* gs_sel
->gs_max_out_vertices
:
1424 assert(max_out_vertices
<= 256);
1426 unsigned prim_amp_factor
= 1;
1427 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1428 /* Number of output primitives per GS input primitive after
1430 prim_amp_factor
= gs_sel
->gs_max_out_vertices
;
1433 /* The GE only checks against the maximum number of ES verts after
1434 * allocating a full GS primitive. So we need to ensure that whenever
1435 * this check passes, there is enough space for a full primitive without
1438 shader
->ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1439 shader
->ngg
.max_gsprims
= max_gsprims
;
1440 shader
->ngg
.max_out_verts
= max_out_vertices
;
1441 shader
->ngg
.prim_amp_factor
= prim_amp_factor
;
1442 shader
->ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1444 shader
->gs_info
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1445 shader
->ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1447 assert(shader
->ngg
.hw_max_esverts
>= 24); /* HW limitation */