2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
32 static LLVMValueRef
get_wave_id_in_tg(struct si_shader_context
*ctx
)
34 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 24, 4);
37 static LLVMValueRef
get_tgsize(struct si_shader_context
*ctx
)
39 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 28, 4);
42 static LLVMValueRef
get_thread_id_in_tg(struct si_shader_context
*ctx
)
44 LLVMBuilderRef builder
= ctx
->ac
.builder
;
46 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
47 LLVMConstInt(ctx
->ac
.i32
, ctx
->ac
.wave_size
, false), "");
48 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
51 static LLVMValueRef
ngg_get_vtx_cnt(struct si_shader_context
*ctx
)
53 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 12, 9);
56 static LLVMValueRef
ngg_get_prim_cnt(struct si_shader_context
*ctx
)
58 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 22, 9);
61 static LLVMValueRef
ngg_get_ordered_id(struct si_shader_context
*ctx
)
63 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 0, 12);
66 static LLVMValueRef
ngg_get_query_buf(struct si_shader_context
*ctx
)
68 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
70 return ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
71 LLVMConstInt(ctx
->i32
, GFX10_GS_QUERY_BUF
, false));
74 static void build_streamout_vertex(struct si_shader_context
*ctx
,
75 LLVMValueRef
*so_buffer
, LLVMValueRef
*wg_offset_dw
,
76 unsigned stream
, LLVMValueRef offset_vtx
,
77 LLVMValueRef vertexptr
)
79 struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
80 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
81 LLVMBuilderRef builder
= ctx
->ac
.builder
;
82 LLVMValueRef offset
[4] = {};
85 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
86 if (!wg_offset_dw
[buffer
])
89 tmp
= LLVMBuildMul(builder
, offset_vtx
,
90 LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false), "");
91 tmp
= LLVMBuildAdd(builder
, wg_offset_dw
[buffer
], tmp
, "");
92 offset
[buffer
] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->i32
, 2, false), "");
95 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
96 if (so
->output
[i
].stream
!= stream
)
99 unsigned reg
= so
->output
[i
].register_index
;
100 struct si_shader_output_values out
;
101 out
.semantic_name
= info
->output_semantic_name
[reg
];
102 out
.semantic_index
= info
->output_semantic_index
[reg
];
104 for (unsigned comp
= 0; comp
< 4; comp
++) {
105 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
106 LLVMConstInt(ctx
->i32
, 4 * reg
+ comp
, false));
107 out
.values
[comp
] = LLVMBuildLoad(builder
, tmp
, "");
108 out
.vertex_stream
[comp
] =
109 (info
->output_streams
[reg
] >> (2 * comp
)) & 3;
112 si_emit_streamout_output(ctx
, so_buffer
, offset
, &so
->output
[i
], &out
);
116 struct ngg_streamout
{
117 LLVMValueRef num_vertices
;
119 /* per-thread data */
120 LLVMValueRef prim_enable
[4]; /* i1 per stream */
121 LLVMValueRef vertices
[3]; /* [N x i32] addrspace(LDS)* */
124 LLVMValueRef emit
[4]; /* per-stream emitted primitives (only valid for used streams) */
128 * Build streamout logic.
132 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
134 * Clobbers gs_ngg_scratch[8:].
136 static void build_streamout(struct si_shader_context
*ctx
,
137 struct ngg_streamout
*nggso
)
139 struct si_shader_info
*info
= &ctx
->shader
->selector
->info
;
140 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
141 LLVMBuilderRef builder
= ctx
->ac
.builder
;
142 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
143 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
144 LLVMValueRef tmp
, tmp2
;
145 LLVMValueRef i32_2
= LLVMConstInt(ctx
->i32
, 2, false);
146 LLVMValueRef i32_4
= LLVMConstInt(ctx
->i32
, 4, false);
147 LLVMValueRef i32_8
= LLVMConstInt(ctx
->i32
, 8, false);
148 LLVMValueRef so_buffer
[4] = {};
149 unsigned max_num_vertices
= 1 + (nggso
->vertices
[1] ? 1 : 0) +
150 (nggso
->vertices
[2] ? 1 : 0);
151 LLVMValueRef prim_stride_dw
[4] = {};
152 LLVMValueRef prim_stride_dw_vgpr
= LLVMGetUndef(ctx
->i32
);
153 int stream_for_buffer
[4] = { -1, -1, -1, -1 };
154 unsigned bufmask_for_stream
[4] = {};
155 bool isgs
= ctx
->type
== PIPE_SHADER_GEOMETRY
;
156 unsigned scratch_emit_base
= isgs
? 4 : 0;
157 LLVMValueRef scratch_emit_basev
= isgs
? i32_4
: ctx
->i32_0
;
158 unsigned scratch_offset_base
= isgs
? 8 : 4;
159 LLVMValueRef scratch_offset_basev
= isgs
? i32_8
: i32_4
;
161 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", 256);
163 /* Determine the mapping of streamout buffers to vertex streams. */
164 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
165 unsigned buf
= so
->output
[i
].output_buffer
;
166 unsigned stream
= so
->output
[i
].stream
;
167 assert(stream_for_buffer
[buf
] < 0 || stream_for_buffer
[buf
] == stream
);
168 stream_for_buffer
[buf
] = stream
;
169 bufmask_for_stream
[stream
] |= 1 << buf
;
172 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
173 if (stream_for_buffer
[buffer
] == -1)
176 assert(so
->stride
[buffer
]);
178 tmp
= LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false);
179 prim_stride_dw
[buffer
] = LLVMBuildMul(builder
, tmp
, nggso
->num_vertices
, "");
180 prim_stride_dw_vgpr
= ac_build_writelane(
181 &ctx
->ac
, prim_stride_dw_vgpr
, prim_stride_dw
[buffer
],
182 LLVMConstInt(ctx
->i32
, buffer
, false));
184 so_buffer
[buffer
] = ac_build_load_to_sgpr(
186 LLVMConstInt(ctx
->i32
, SI_VS_STREAMOUT_BUF0
+ buffer
, false));
189 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->i32_0
, "");
190 ac_build_ifcc(&ctx
->ac
, tmp
, 5200);
192 LLVMTypeRef gdsptr
= LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
);
193 LLVMValueRef gdsbase
= LLVMBuildIntToPtr(builder
, ctx
->i32_0
, gdsptr
, "");
195 /* Advance the streamout offsets in GDS. */
196 LLVMValueRef offsets_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
197 LLVMValueRef generated_by_stream_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
199 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
200 ac_build_ifcc(&ctx
->ac
, tmp
, 5210);
203 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
);
204 tmp
= LLVMBuildLoad(builder
, tmp
, "");
206 tmp
= ac_build_writelane(&ctx
->ac
, ctx
->i32_0
,
207 ngg_get_prim_cnt(ctx
), ctx
->i32_0
);
209 LLVMBuildStore(builder
, tmp
, generated_by_stream_vgpr
);
212 int unused_stream
= -1;
213 for (unsigned stream
= 0; stream
< 4; ++stream
) {
214 if (!info
->num_stream_output_components
[stream
]) {
215 unused_stream
= stream
;
219 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
220 if (stream_for_buffer
[buffer
] >= 0) {
221 swizzle
[buffer
] = stream_for_buffer
[buffer
];
223 assert(unused_stream
>= 0);
224 swizzle
[buffer
] = unused_stream
;
228 tmp
= ac_build_quad_swizzle(&ctx
->ac
, tmp
,
229 swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
230 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
232 LLVMValueRef args
[] = {
233 LLVMBuildIntToPtr(builder
, ngg_get_ordered_id(ctx
), gdsptr
, ""),
235 ctx
->i32_0
, // ordering
237 ctx
->ac
.i1false
, // isVolatile
238 LLVMConstInt(ctx
->i32
, 4 << 24, false), // OA index
239 ctx
->ac
.i1true
, // wave release
240 ctx
->ac
.i1true
, // wave done
242 tmp
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ds.ordered.add",
243 ctx
->i32
, args
, ARRAY_SIZE(args
), 0);
245 /* Keep offsets in a VGPR for quick retrieval via readlane by
246 * the first wave for bounds checking, and also store in LDS
247 * for retrieval by all waves later. */
248 LLVMBuildStore(builder
, tmp
, offsets_vgpr
);
250 tmp2
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
251 scratch_offset_basev
, "");
252 tmp2
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp2
);
253 LLVMBuildStore(builder
, tmp
, tmp2
);
255 ac_build_endif(&ctx
->ac
, 5210);
257 /* Determine the max emit per buffer. This is done via the SALU, in part
258 * because LLVM can't generate divide-by-multiply if we try to do this
259 * via VALU with one lane per buffer.
261 LLVMValueRef max_emit
[4] = {};
262 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
263 if (stream_for_buffer
[buffer
] == -1)
266 LLVMValueRef bufsize_dw
=
267 LLVMBuildLShr(builder
,
268 LLVMBuildExtractElement(builder
, so_buffer
[buffer
], i32_2
, ""),
271 tmp
= LLVMBuildLoad(builder
, offsets_vgpr
, "");
272 LLVMValueRef offset_dw
=
273 ac_build_readlane(&ctx
->ac
, tmp
,
274 LLVMConstInt(ctx
->i32
, buffer
, false));
276 tmp
= LLVMBuildSub(builder
, bufsize_dw
, offset_dw
, "");
277 tmp
= LLVMBuildUDiv(builder
, tmp
, prim_stride_dw
[buffer
], "");
279 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, bufsize_dw
, offset_dw
, "");
280 max_emit
[buffer
] = LLVMBuildSelect(builder
, tmp2
, ctx
->i32_0
, tmp
, "");
283 /* Determine the number of emitted primitives per stream and fixup the
284 * GDS counter if necessary.
286 * This is complicated by the fact that a single stream can emit to
287 * multiple buffers (but luckily not vice versa).
289 LLVMValueRef emit_vgpr
= ctx
->i32_0
;
291 for (unsigned stream
= 0; stream
< 4; ++stream
) {
292 if (!info
->num_stream_output_components
[stream
])
295 tmp
= LLVMBuildLoad(builder
, generated_by_stream_vgpr
, "");
296 LLVMValueRef generated
=
297 ac_build_readlane(&ctx
->ac
, tmp
,
298 LLVMConstInt(ctx
->i32
, stream
, false));
300 LLVMValueRef emit
= generated
;
301 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
302 if (stream_for_buffer
[buffer
] == stream
)
303 emit
= ac_build_umin(&ctx
->ac
, emit
, max_emit
[buffer
]);
306 emit_vgpr
= ac_build_writelane(&ctx
->ac
, emit_vgpr
, emit
,
307 LLVMConstInt(ctx
->i32
, stream
, false));
309 /* Fixup the offset using a plain GDS atomic if we overflowed. */
310 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, emit
, generated
, "");
311 ac_build_ifcc(&ctx
->ac
, tmp
, 5221); /* scalar branch */
312 tmp
= LLVMBuildLShr(builder
,
313 LLVMConstInt(ctx
->i32
, bufmask_for_stream
[stream
], false),
314 ac_get_thread_id(&ctx
->ac
), "");
315 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
316 ac_build_ifcc(&ctx
->ac
, tmp
, 5222);
318 tmp
= LLVMBuildSub(builder
, generated
, emit
, "");
319 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
320 tmp2
= LLVMBuildGEP(builder
, gdsbase
, &tid
, 1, "");
321 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpSub
, tmp2
, tmp
,
322 LLVMAtomicOrderingMonotonic
, false);
324 ac_build_endif(&ctx
->ac
, 5222);
325 ac_build_endif(&ctx
->ac
, 5221);
328 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
329 ac_build_ifcc(&ctx
->ac
, tmp
, 5225);
331 tmp
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
332 scratch_emit_basev
, "");
333 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp
);
334 LLVMBuildStore(builder
, emit_vgpr
, tmp
);
336 ac_build_endif(&ctx
->ac
, 5225);
338 ac_build_endif(&ctx
->ac
, 5200);
340 /* Determine the workgroup-relative per-thread / primitive offset into
341 * the streamout buffers */
342 struct ac_wg_scan primemit_scan
[4] = {};
345 for (unsigned stream
= 0; stream
< 4; ++stream
) {
346 if (!info
->num_stream_output_components
[stream
])
349 primemit_scan
[stream
].enable_exclusive
= true;
350 primemit_scan
[stream
].op
= nir_op_iadd
;
351 primemit_scan
[stream
].src
= nggso
->prim_enable
[stream
];
352 primemit_scan
[stream
].scratch
=
353 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
354 LLVMConstInt(ctx
->i32
, 12 + 8 * stream
, false));
355 primemit_scan
[stream
].waveidx
= get_wave_id_in_tg(ctx
);
356 primemit_scan
[stream
].numwaves
= get_tgsize(ctx
);
357 primemit_scan
[stream
].maxwaves
= 8;
358 ac_build_wg_scan_top(&ctx
->ac
, &primemit_scan
[stream
]);
362 ac_build_s_barrier(&ctx
->ac
);
364 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
365 LLVMValueRef wgoffset_dw
[4] = {};
368 LLVMValueRef scratch_vgpr
;
370 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ac_get_thread_id(&ctx
->ac
));
371 scratch_vgpr
= LLVMBuildLoad(builder
, tmp
, "");
373 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
374 if (stream_for_buffer
[buffer
] >= 0) {
375 wgoffset_dw
[buffer
] = ac_build_readlane(
376 &ctx
->ac
, scratch_vgpr
,
377 LLVMConstInt(ctx
->i32
, scratch_offset_base
+ buffer
, false));
381 for (unsigned stream
= 0; stream
< 4; ++stream
) {
382 if (info
->num_stream_output_components
[stream
]) {
383 nggso
->emit
[stream
] = ac_build_readlane(
384 &ctx
->ac
, scratch_vgpr
,
385 LLVMConstInt(ctx
->i32
, scratch_emit_base
+ stream
, false));
390 /* Write out primitive data */
391 for (unsigned stream
= 0; stream
< 4; ++stream
) {
392 if (!info
->num_stream_output_components
[stream
])
396 ac_build_wg_scan_bottom(&ctx
->ac
, &primemit_scan
[stream
]);
398 primemit_scan
[stream
].result_exclusive
= tid
;
401 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
402 primemit_scan
[stream
].result_exclusive
,
403 nggso
->emit
[stream
], "");
404 tmp
= LLVMBuildAnd(builder
, tmp
, nggso
->prim_enable
[stream
], "");
405 ac_build_ifcc(&ctx
->ac
, tmp
, 5240);
407 LLVMValueRef offset_vtx
=
408 LLVMBuildMul(builder
, primemit_scan
[stream
].result_exclusive
,
409 nggso
->num_vertices
, "");
411 for (unsigned i
= 0; i
< max_num_vertices
; ++i
) {
412 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
413 LLVMConstInt(ctx
->i32
, i
, false),
414 nggso
->num_vertices
, "");
415 ac_build_ifcc(&ctx
->ac
, tmp
, 5241);
416 build_streamout_vertex(ctx
, so_buffer
, wgoffset_dw
,
417 stream
, offset_vtx
, nggso
->vertices
[i
]);
418 ac_build_endif(&ctx
->ac
, 5241);
419 offset_vtx
= LLVMBuildAdd(builder
, offset_vtx
, ctx
->i32_1
, "");
422 ac_build_endif(&ctx
->ac
, 5240);
426 static unsigned ngg_nogs_vertex_size(struct si_shader
*shader
)
428 unsigned lds_vertex_size
= 0;
430 /* The edgeflag is always stored in the last element that's also
431 * used for padding to reduce LDS bank conflicts. */
432 if (shader
->selector
->so
.num_outputs
)
433 lds_vertex_size
= 4 * shader
->selector
->info
.num_outputs
+ 1;
434 if (shader
->selector
->info
.writes_edgeflag
)
435 lds_vertex_size
= MAX2(lds_vertex_size
, 1);
437 return lds_vertex_size
;
441 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
442 * for the vertex outputs.
444 static LLVMValueRef
ngg_nogs_vertex_ptr(struct si_shader_context
*ctx
,
447 /* The extra dword is used to avoid LDS bank conflicts. */
448 unsigned vertex_size
= ngg_nogs_vertex_size(ctx
->shader
);
449 LLVMTypeRef ai32
= LLVMArrayType(ctx
->i32
, vertex_size
);
450 LLVMTypeRef pai32
= LLVMPointerType(ai32
, AC_ADDR_SPACE_LDS
);
451 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->esgs_ring
, pai32
, "");
452 return LLVMBuildGEP(ctx
->ac
.builder
, tmp
, &vtxid
, 1, "");
456 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
458 void gfx10_emit_ngg_epilogue(struct ac_shader_abi
*abi
,
459 unsigned max_outputs
,
462 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
463 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
464 struct si_shader_info
*info
= &sel
->info
;
465 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
466 LLVMBuilderRef builder
= ctx
->ac
.builder
;
467 LLVMValueRef tmp
, tmp2
;
469 assert(!ctx
->shader
->is_gs_copy_shader
);
470 assert(info
->num_outputs
<= max_outputs
);
472 LLVMValueRef vertex_ptr
= NULL
;
474 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
475 vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
477 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
478 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
479 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
481 for (unsigned j
= 0; j
< 4; j
++) {
482 outputs
[i
].vertex_stream
[j
] =
483 (info
->output_streams
[i
] >> (2 * j
)) & 3;
485 /* TODO: we may store more outputs than streamout needs,
486 * but streamout performance isn't that important.
488 if (sel
->so
.num_outputs
) {
489 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
,
490 LLVMConstInt(ctx
->i32
, 4 * i
+ j
, false));
491 tmp2
= LLVMBuildLoad(builder
, addrs
[4 * i
+ j
], "");
492 tmp2
= ac_to_integer(&ctx
->ac
, tmp2
);
493 LLVMBuildStore(builder
, tmp2
, tmp
);
497 /* Store the edgeflag at the end (if streamout is enabled) */
498 if (info
->output_semantic_name
[i
] == TGSI_SEMANTIC_EDGEFLAG
&&
499 sel
->info
.writes_edgeflag
) {
500 LLVMValueRef edgeflag
= LLVMBuildLoad(builder
, addrs
[4 * i
], "");
501 /* The output is a float, but the hw expects a 1-bit integer. */
502 edgeflag
= LLVMBuildFPToUI(ctx
->ac
.builder
, edgeflag
, ctx
->i32
, "");
503 edgeflag
= ac_build_umin(&ctx
->ac
, edgeflag
, ctx
->i32_1
);
505 tmp
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
506 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
, tmp
);
507 LLVMBuildStore(builder
, edgeflag
, tmp
);
511 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
513 LLVMValueRef is_gs_thread
= si_is_gs_thread(ctx
);
514 LLVMValueRef is_es_thread
= si_is_es_thread(ctx
);
515 LLVMValueRef vtxindex
[] = {
516 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 0, 16),
517 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 16, 16),
518 si_unpack_param(ctx
, ctx
->gs_vtx23_offset
, 0, 16),
521 /* Determine the number of vertices per primitive. */
522 unsigned num_vertices
;
523 LLVMValueRef num_vertices_val
;
525 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
526 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
527 /* Blits always use axis-aligned rectangles with 3 vertices. */
529 num_vertices_val
= LLVMConstInt(ctx
->i32
, 3, 0);
531 /* Extract OUTPRIM field. */
532 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 2, 2);
533 num_vertices_val
= LLVMBuildAdd(builder
, tmp
, ctx
->i32_1
, "");
534 num_vertices
= 3; /* TODO: optimize for points & lines */
537 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
539 if (info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
])
541 else if (info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
546 num_vertices_val
= LLVMConstInt(ctx
->i32
, num_vertices
, false);
550 LLVMValueRef emitted_prims
= NULL
;
552 if (sel
->so
.num_outputs
) {
553 struct ngg_streamout nggso
= {};
555 nggso
.num_vertices
= num_vertices_val
;
556 nggso
.prim_enable
[0] = is_gs_thread
;
558 for (unsigned i
= 0; i
< num_vertices
; ++i
)
559 nggso
.vertices
[i
] = ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
561 build_streamout(ctx
, &nggso
);
562 emitted_prims
= nggso
.emit
[0];
565 LLVMValueRef user_edgeflags
[3] = {};
567 if (sel
->info
.writes_edgeflag
) {
568 /* Streamout already inserted the barrier, so don't insert it again. */
569 if (!sel
->so
.num_outputs
)
570 ac_build_s_barrier(&ctx
->ac
);
572 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
573 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
574 for (unsigned i
= 0; i
< num_vertices
; i
++) {
575 tmp
= ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
576 tmp2
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
577 tmp
= ac_build_gep0(&ctx
->ac
, tmp
, tmp2
);
578 tmp
= LLVMBuildLoad(builder
, tmp
, "");
579 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
581 user_edgeflags
[i
] = ac_build_alloca_undef(&ctx
->ac
, ctx
->i1
, "");
582 LLVMBuildStore(builder
, tmp
, user_edgeflags
[i
]);
584 ac_build_endif(&ctx
->ac
, 5400);
587 /* Copy Primitive IDs from GS threads to the LDS address corresponding
588 * to the ES thread of the provoking vertex.
590 if (ctx
->type
== PIPE_SHADER_VERTEX
&&
591 ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
592 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
593 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
594 ac_build_s_barrier(&ctx
->ac
);
596 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
597 /* Extract the PROVOKING_VTX_INDEX field. */
598 LLVMValueRef provoking_vtx_in_prim
=
599 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2);
601 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
602 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
603 LLVMValueRef provoking_vtx_index
=
604 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
606 LLVMBuildStore(builder
, ac_get_arg(&ctx
->ac
, ctx
->args
.gs_prim_id
),
607 ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
, provoking_vtx_index
));
608 ac_build_endif(&ctx
->ac
, 5400);
611 ac_build_sendmsg_gs_alloc_req(&ctx
->ac
, get_wave_id_in_tg(ctx
),
612 ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
614 /* Update query buffer */
615 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
616 if (ctx
->screen
->use_ngg_streamout
&&
617 !info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
618 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
619 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
620 ac_build_ifcc(&ctx
->ac
, tmp
, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
621 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
622 ac_build_ifcc(&ctx
->ac
, tmp
, 5030);
623 tmp
= LLVMBuildICmp(builder
, LLVMIntULE
, ac_get_thread_id(&ctx
->ac
),
624 sel
->so
.num_outputs
? ctx
->ac
.i32_1
: ctx
->ac
.i32_0
, "");
625 ac_build_ifcc(&ctx
->ac
, tmp
, 5031);
627 LLVMValueRef args
[] = {
628 ngg_get_prim_cnt(ctx
),
629 ngg_get_query_buf(ctx
),
630 LLVMConstInt(ctx
->i32
, 16, false), /* offset of stream[0].generated_primitives */
631 ctx
->i32_0
, /* soffset */
632 ctx
->i32_0
, /* cachepolicy */
635 if (sel
->so
.num_outputs
) {
636 args
[0] = ac_build_writelane(&ctx
->ac
, args
[0], emitted_prims
, ctx
->i32_1
);
637 args
[2] = ac_build_writelane(&ctx
->ac
, args
[2],
638 LLVMConstInt(ctx
->i32
, 24, false), ctx
->i32_1
);
641 /* TODO: should this be 64-bit atomics? */
642 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
643 ctx
->i32
, args
, 5, 0);
645 ac_build_endif(&ctx
->ac
, 5031);
646 ac_build_endif(&ctx
->ac
, 5030);
647 ac_build_endif(&ctx
->ac
, 5029);
650 /* Build the primitive export.
652 * For the first version, we will always build up all three indices
653 * independent of the primitive type. The additional garbage data
656 * TODO: culling depends on the primitive type, so can have some
659 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 6001);
661 struct ac_ngg_prim prim
= {};
663 if (gfx10_is_ngg_passthrough(ctx
->shader
)) {
664 prim
.passthrough
= ac_get_arg(&ctx
->ac
, ctx
->gs_vtx01_offset
);
666 prim
.num_vertices
= num_vertices
;
667 prim
.isnull
= ctx
->ac
.i1false
;
668 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
670 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
671 if (ctx
->type
!= PIPE_SHADER_VERTEX
) {
672 prim
.edgeflag
[i
] = ctx
->i1false
;
676 tmp
= LLVMBuildLShr(builder
,
677 ac_get_arg(&ctx
->ac
, ctx
->args
.gs_invocation_id
),
678 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
679 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
681 if (sel
->info
.writes_edgeflag
) {
682 tmp2
= LLVMBuildLoad(builder
, user_edgeflags
[i
], "");
683 prim
.edgeflag
[i
] = LLVMBuildAnd(builder
, prim
.edgeflag
[i
],
689 ac_build_export_prim(&ctx
->ac
, &prim
);
691 ac_build_endif(&ctx
->ac
, 6001);
693 /* Export per-vertex data (positions and parameters). */
694 ac_build_ifcc(&ctx
->ac
, is_es_thread
, 6002);
698 /* Unconditionally (re-)load the values for proper SSA form. */
699 for (i
= 0; i
< info
->num_outputs
; i
++) {
700 for (unsigned j
= 0; j
< 4; j
++) {
701 outputs
[i
].values
[j
] =
702 LLVMBuildLoad(builder
,
708 if (ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
709 outputs
[i
].semantic_name
= TGSI_SEMANTIC_PRIMID
;
710 outputs
[i
].semantic_index
= 0;
712 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
713 /* Wait for GS stores to finish. */
714 ac_build_s_barrier(&ctx
->ac
);
716 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
,
717 get_thread_id_in_tg(ctx
));
718 outputs
[i
].values
[0] = LLVMBuildLoad(builder
, tmp
, "");
720 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
721 outputs
[i
].values
[0] = si_get_primitive_id(ctx
, 0);
724 outputs
[i
].values
[0] = ac_to_float(&ctx
->ac
, outputs
[i
].values
[0]);
725 for (unsigned j
= 1; j
< 4; j
++)
726 outputs
[i
].values
[j
] = LLVMGetUndef(ctx
->f32
);
728 memset(outputs
[i
].vertex_stream
, 0,
729 sizeof(outputs
[i
].vertex_stream
));
733 si_llvm_export_vs(ctx
, outputs
, i
);
735 ac_build_endif(&ctx
->ac
, 6002);
739 ngg_gs_get_vertex_storage(struct si_shader_context
*ctx
)
741 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
742 const struct si_shader_info
*info
= &sel
->info
;
744 LLVMTypeRef elements
[2] = {
745 LLVMArrayType(ctx
->ac
.i32
, 4 * info
->num_outputs
),
746 LLVMArrayType(ctx
->ac
.i8
, 4),
748 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
749 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
750 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
754 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
755 * is in emit order; that is:
756 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
757 * - during vertex emit, i.e. while the API GS shader invocation is running,
758 * N = threadidx * gs_max_out_vertices + emitidx
760 * Goals of the LDS memory layout:
761 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
762 * in uniform control flow
763 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
765 * 3. Agnostic to the number of waves (since we don't know it before compiling)
766 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
767 * 5. Avoid wasting memory.
769 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
770 * layout, elimination of bank conflicts requires that each vertex occupy an
771 * odd number of dwords. We use the additional dword to store the output stream
772 * index as well as a flag to indicate whether this vertex ends a primitive
775 * Swizzling is required to satisfy points 1 and 2 simultaneously.
777 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
778 * Indices are swizzled in groups of 32, which ensures point 1 without
779 * disturbing point 2.
781 * \return an LDS pointer to type {[N x i32], [4 x i8]}
784 ngg_gs_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexidx
)
786 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
787 LLVMBuilderRef builder
= ctx
->ac
.builder
;
788 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
790 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
791 unsigned write_stride_2exp
= ffs(sel
->gs_max_out_vertices
) - 1;
792 if (write_stride_2exp
) {
794 LLVMBuildLShr(builder
, vertexidx
,
795 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
796 LLVMValueRef swizzle
=
797 LLVMBuildAnd(builder
, row
,
798 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
800 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
803 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
807 ngg_gs_emit_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef gsthread
,
808 LLVMValueRef emitidx
)
810 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
811 LLVMBuilderRef builder
= ctx
->ac
.builder
;
814 tmp
= LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false);
815 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
816 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
817 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
821 ngg_gs_get_emit_output_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
824 LLVMValueRef gep_idx
[3] = {
825 ctx
->ac
.i32_0
, /* implied C-style array */
826 ctx
->ac
.i32_0
, /* first struct entry */
827 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
829 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
833 ngg_gs_get_emit_primflag_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
836 LLVMValueRef gep_idx
[3] = {
837 ctx
->ac
.i32_0
, /* implied C-style array */
838 ctx
->ac
.i32_1
, /* second struct entry */
839 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
841 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
844 void gfx10_ngg_gs_emit_vertex(struct si_shader_context
*ctx
,
848 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
849 const struct si_shader_info
*info
= &sel
->info
;
850 LLVMBuilderRef builder
= ctx
->ac
.builder
;
852 const LLVMValueRef vertexidx
=
853 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
855 /* If this thread has already emitted the declared maximum number of
856 * vertices, skip the write: excessive vertex emissions are not
857 * supposed to have any effect.
859 const LLVMValueRef can_emit
=
860 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
861 LLVMConstInt(ctx
->i32
, sel
->gs_max_out_vertices
, false), "");
863 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
864 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
865 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
867 ac_build_ifcc(&ctx
->ac
, can_emit
, 9001);
869 const LLVMValueRef vertexptr
=
870 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
871 unsigned out_idx
= 0;
872 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
873 for (unsigned chan
= 0; chan
< 4; chan
++, out_idx
++) {
874 if (!(info
->output_usagemask
[i
] & (1 << chan
)) ||
875 ((info
->output_streams
[i
] >> (2 * chan
)) & 3) != stream
)
878 LLVMValueRef out_val
= LLVMBuildLoad(builder
, addrs
[4 * i
+ chan
], "");
879 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
880 LLVMBuildStore(builder
, out_val
,
881 ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
));
884 assert(out_idx
* 4 == sel
->gsvs_vertex_size
);
886 /* Determine and store whether this vertex completed a primitive. */
887 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
889 tmp
= LLVMConstInt(ctx
->ac
.i32
, u_vertices_per_prim(sel
->gs_output_prim
) - 1, false);
890 const LLVMValueRef iscompleteprim
=
891 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
893 /* Since the geometry shader emits triangle strips, we need to
894 * track which primitive is odd and swap vertex indices to get
895 * the correct vertex order.
897 LLVMValueRef is_odd
= ctx
->i1false
;
898 if (stream
== 0 && u_vertices_per_prim(sel
->gs_output_prim
) == 3) {
899 tmp
= LLVMBuildAnd(builder
, curverts
, ctx
->i32_1
, "");
900 is_odd
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
, ctx
->i32_1
, "");
903 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
904 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
906 /* The per-vertex primitive flag encoding:
907 * bit 0: whether this vertex finishes a primitive
908 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
910 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
911 tmp
= LLVMBuildOr(builder
, tmp
,
912 LLVMBuildShl(builder
,
913 LLVMBuildZExt(builder
, is_odd
, ctx
->ac
.i8
, ""),
914 ctx
->ac
.i8_1
, ""), "");
915 LLVMBuildStore(builder
, tmp
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
));
917 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
918 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
919 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
921 ac_build_endif(&ctx
->ac
, 9001);
924 void gfx10_ngg_gs_emit_prologue(struct si_shader_context
*ctx
)
926 /* Zero out the part of LDS scratch that is used to accumulate the
927 * per-stream generated primitive count.
929 LLVMBuilderRef builder
= ctx
->ac
.builder
;
930 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
931 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
934 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
935 ac_build_ifcc(&ctx
->ac
, tmp
, 5090);
937 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
938 LLVMBuildStore(builder
, ctx
->i32_0
, ptr
);
940 ac_build_endif(&ctx
->ac
, 5090);
942 ac_build_s_barrier(&ctx
->ac
);
945 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context
*ctx
)
947 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
948 const struct si_shader_info
*info
= &sel
->info
;
949 const unsigned verts_per_prim
= u_vertices_per_prim(sel
->gs_output_prim
);
950 LLVMBuilderRef builder
= ctx
->ac
.builder
;
951 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
952 LLVMValueRef tmp
, tmp2
;
954 /* Zero out remaining (non-emitted) primitive flags.
956 * Note: Alternatively, we could pass the relevant gs_next_vertex to
957 * the emit threads via LDS. This is likely worse in the expected
958 * typical case where each GS thread emits the full set of
961 for (unsigned stream
= 0; stream
< 4; ++stream
) {
962 if (!info
->num_stream_output_components
[stream
])
965 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
967 ac_build_bgnloop(&ctx
->ac
, 5100);
969 const LLVMValueRef vertexidx
=
970 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
971 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
972 LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false), "");
973 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
974 ac_build_break(&ctx
->ac
);
975 ac_build_endif(&ctx
->ac
, 5101);
977 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
978 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
980 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
981 LLVMBuildStore(builder
, i8_0
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, stream
));
983 ac_build_endloop(&ctx
->ac
, 5100);
986 /* Accumulate generated primitives counts across the entire threadgroup. */
987 for (unsigned stream
= 0; stream
< 4; ++stream
) {
988 if (!info
->num_stream_output_components
[stream
])
991 LLVMValueRef numprims
=
992 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
993 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, ctx
->ac
.wave_size
);
995 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->i32_0
, "");
996 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
998 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
999 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
1000 LLVMConstInt(ctx
->i32
, stream
, false)),
1001 numprims
, LLVMAtomicOrderingMonotonic
, false);
1003 ac_build_endif(&ctx
->ac
, 5105);
1006 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
1008 ac_build_s_barrier(&ctx
->ac
);
1010 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
1011 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
1014 if (sel
->so
.num_outputs
) {
1015 struct ngg_streamout nggso
= {};
1017 nggso
.num_vertices
= LLVMConstInt(ctx
->i32
, verts_per_prim
, false);
1019 LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tid
);
1020 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1021 if (!info
->num_stream_output_components
[stream
])
1024 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
), "");
1025 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1026 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1027 nggso
.prim_enable
[stream
] = LLVMBuildAnd(builder
, tmp
, tmp2
, "");
1030 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1031 tmp
= LLVMBuildSub(builder
, tid
,
1032 LLVMConstInt(ctx
->i32
, verts_per_prim
- i
- 1, false), "");
1033 tmp
= ngg_gs_vertex_ptr(ctx
, tmp
);
1034 nggso
.vertices
[i
] = ac_build_gep0(&ctx
->ac
, tmp
, ctx
->i32_0
);
1037 build_streamout(ctx
, &nggso
);
1040 /* Write shader query data. */
1041 if (ctx
->screen
->use_ngg_streamout
) {
1042 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
1043 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1044 ac_build_ifcc(&ctx
->ac
, tmp
, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1045 unsigned num_query_comps
= sel
->so
.num_outputs
? 8 : 4;
1046 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
,
1047 LLVMConstInt(ctx
->i32
, num_query_comps
, false), "");
1048 ac_build_ifcc(&ctx
->ac
, tmp
, 5110);
1050 LLVMValueRef offset
;
1052 if (sel
->so
.num_outputs
)
1053 tmp
= LLVMBuildAnd(builder
, tmp
, LLVMConstInt(ctx
->i32
, 3, false), "");
1054 offset
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 32, false), "");
1055 if (sel
->so
.num_outputs
) {
1056 tmp
= LLVMBuildLShr(builder
, tid
, LLVMConstInt(ctx
->i32
, 2, false), "");
1057 tmp
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 8, false), "");
1058 offset
= LLVMBuildAdd(builder
, offset
, tmp
, "");
1061 tmp
= LLVMBuildLoad(builder
, ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
), "");
1062 LLVMValueRef args
[] = {
1064 ngg_get_query_buf(ctx
),
1066 LLVMConstInt(ctx
->i32
, 16, false), /* soffset */
1067 ctx
->i32_0
, /* cachepolicy */
1069 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1070 ctx
->i32
, args
, 5, 0);
1072 ac_build_endif(&ctx
->ac
, 5110);
1073 ac_build_endif(&ctx
->ac
, 5109);
1078 /* Determine vertex liveness. */
1079 LLVMValueRef vertliveptr
= ac_build_alloca(&ctx
->ac
, ctx
->ac
.i1
, "vertexlive");
1081 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1082 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
1084 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1085 const LLVMValueRef primidx
=
1086 LLVMBuildAdd(builder
, tid
,
1087 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
1090 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
1091 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
1094 /* Load primitive liveness */
1095 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
1096 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1097 const LLVMValueRef primlive
=
1098 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
1100 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
1101 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
1102 LLVMBuildStore(builder
, tmp
, vertliveptr
);
1105 ac_build_endif(&ctx
->ac
, 5121 + i
);
1108 ac_build_endif(&ctx
->ac
, 5120);
1110 /* Inclusive scan addition across the current wave. */
1111 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
1112 struct ac_wg_scan vertlive_scan
= {};
1113 vertlive_scan
.op
= nir_op_iadd
;
1114 vertlive_scan
.enable_reduce
= true;
1115 vertlive_scan
.enable_exclusive
= true;
1116 vertlive_scan
.src
= vertlive
;
1117 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->i32_0
);
1118 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
1119 vertlive_scan
.numwaves
= get_tgsize(ctx
);
1120 vertlive_scan
.maxwaves
= 8;
1122 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
1124 /* Skip all exports (including index exports) when possible. At least on
1125 * early gfx10 revisions this is also to avoid hangs.
1127 LLVMValueRef have_exports
=
1128 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
1130 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
1132 /* Allocate export space. Send this message as early as possible, to
1133 * hide the latency of the SQ <-> SPI roundtrip.
1135 * Note: We could consider compacting primitives for export as well.
1136 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1137 * prim data per clock and skips null primitives at no additional
1138 * cost. So compacting primitives can only be beneficial when
1139 * there are 4 or more contiguous null primitives in the export
1140 * (in the common case of single-dword prim exports).
1142 ac_build_sendmsg_gs_alloc_req(&ctx
->ac
, get_wave_id_in_tg(ctx
),
1143 vertlive_scan
.result_reduce
, num_emit_threads
);
1145 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1146 * of the primitive liveness flags, relying on the fact that each
1147 * threadgroup can have at most 256 threads. */
1148 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
1150 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
1151 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
1152 LLVMBuildStore(builder
, tmp2
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1));
1154 ac_build_endif(&ctx
->ac
, 5130);
1156 ac_build_s_barrier(&ctx
->ac
);
1158 /* Export primitive data */
1159 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1160 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
1163 struct ac_ngg_prim prim
= {};
1164 prim
.num_vertices
= verts_per_prim
;
1166 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1167 flags
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1168 prim
.isnull
= LLVMBuildNot(builder
, LLVMBuildTrunc(builder
, flags
, ctx
->i1
, ""), "");
1170 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1171 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
1172 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
1173 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
1176 /* Geometry shaders output triangle strips, but NGG expects triangles.
1177 * We need to change the vertex order for odd triangles to get correct
1178 * front/back facing by swapping 2 vertex indices, but we also have to
1179 * keep the provoking vertex in the same place.
1181 * If the first vertex is provoking, swap index 1 and 2.
1182 * If the last vertex is provoking, swap index 0 and 1.
1184 if (verts_per_prim
== 3) {
1185 LLVMValueRef is_odd
= LLVMBuildLShr(builder
, flags
, ctx
->ac
.i8_1
, "");
1186 is_odd
= LLVMBuildTrunc(builder
, is_odd
, ctx
->i1
, "");
1187 LLVMValueRef flatshade_first
=
1188 LLVMBuildICmp(builder
, LLVMIntEQ
,
1189 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2),
1192 struct ac_ngg_prim in
= prim
;
1193 prim
.index
[0] = LLVMBuildSelect(builder
, flatshade_first
,
1195 LLVMBuildSelect(builder
, is_odd
,
1196 in
.index
[1], in
.index
[0], ""), "");
1197 prim
.index
[1] = LLVMBuildSelect(builder
, flatshade_first
,
1198 LLVMBuildSelect(builder
, is_odd
,
1199 in
.index
[2], in
.index
[1], ""),
1200 LLVMBuildSelect(builder
, is_odd
,
1201 in
.index
[0], in
.index
[1], ""), "");
1202 prim
.index
[2] = LLVMBuildSelect(builder
, flatshade_first
,
1203 LLVMBuildSelect(builder
, is_odd
,
1204 in
.index
[1], in
.index
[2], ""),
1208 ac_build_export_prim(&ctx
->ac
, &prim
);
1210 ac_build_endif(&ctx
->ac
, 5140);
1212 /* Export position and parameter data */
1213 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
1214 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
1216 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
1218 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1219 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1), "");
1220 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
1221 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
1223 unsigned out_idx
= 0;
1224 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
1225 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
1226 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
1228 for (unsigned j
= 0; j
< 4; j
++, out_idx
++) {
1229 tmp
= ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
);
1230 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1231 outputs
[i
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
1232 outputs
[i
].vertex_stream
[j
] =
1233 (info
->output_streams
[i
] >> (2 * j
)) & 3;
1237 si_llvm_export_vs(ctx
, outputs
, info
->num_outputs
);
1239 ac_build_endif(&ctx
->ac
, 5145);
1242 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1243 unsigned min_verts_per_prim
, bool use_adjacency
)
1245 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1248 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1252 * Determine subgroup information like maximum number of vertices and prims.
1254 * This happens before the shader is uploaded, since LDS relocations during
1255 * upload depend on the subgroup size.
1257 void gfx10_ngg_calculate_subgroup_info(struct si_shader
*shader
)
1259 const struct si_shader_selector
*gs_sel
= shader
->selector
;
1260 const struct si_shader_selector
*es_sel
=
1261 shader
->previous_stage_sel
? shader
->previous_stage_sel
: gs_sel
;
1262 const enum pipe_shader_type gs_type
= gs_sel
->type
;
1263 const unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
1264 const unsigned input_prim
= si_get_input_prim(gs_sel
);
1265 const bool use_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
1266 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
1267 const unsigned max_verts_per_prim
= u_vertices_per_prim(input_prim
);
1268 const unsigned min_verts_per_prim
=
1269 gs_type
== PIPE_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1271 /* All these are in dwords: */
1272 /* We can't allow using the whole LDS, because GS waves compete with
1273 * other shader stages for LDS space.
1275 * TODO: We should really take the shader's internal LDS use into
1276 * account. The linker will fail if the size is greater than
1279 const unsigned max_lds_size
= 8 * 1024 - 768;
1280 const unsigned target_lds_size
= max_lds_size
;
1281 unsigned esvert_lds_size
= 0;
1282 unsigned gsprim_lds_size
= 0;
1284 /* All these are per subgroup: */
1285 bool max_vert_out_per_gs_instance
= false;
1286 unsigned max_esverts_base
= 128;
1287 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1289 /* Hardware has the following non-natural restrictions on the value
1290 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1292 * - at most 252 for any line input primitive type
1293 * - at most 251 for any quad input primitive type
1294 * - at most 251 for triangle strips with adjacency (this happens to
1295 * be the natural limit for triangle *lists* with adjacency)
1297 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1299 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1300 unsigned max_out_verts_per_gsprim
=
1301 gs_sel
->gs_max_out_vertices
* gs_num_invocations
;
1303 if (max_out_verts_per_gsprim
<= 256) {
1304 if (max_out_verts_per_gsprim
) {
1305 max_gsprims_base
= MIN2(max_gsprims_base
,
1306 256 / max_out_verts_per_gsprim
);
1309 /* Use special multi-cycling mode in which each GS
1310 * instance gets its own subgroup. Does not work with
1312 max_vert_out_per_gs_instance
= true;
1313 max_gsprims_base
= 1;
1314 max_out_verts_per_gsprim
= gs_sel
->gs_max_out_vertices
;
1317 esvert_lds_size
= es_sel
->esgs_itemsize
/ 4;
1318 gsprim_lds_size
= (gs_sel
->gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1321 /* LDS size for passing data from ES to GS. */
1322 esvert_lds_size
= ngg_nogs_vertex_size(shader
);
1324 /* LDS size for passing data from GS to ES.
1325 * GS stores Primitive IDs into LDS at the address corresponding
1326 * to the ES thread of the provoking vertex. All ES threads
1327 * load and export PrimitiveID for their thread.
1329 if (gs_sel
->type
== PIPE_SHADER_VERTEX
&&
1330 shader
->key
.mono
.u
.vs_export_prim_id
)
1331 esvert_lds_size
= MAX2(esvert_lds_size
, 1);
1334 unsigned max_gsprims
= max_gsprims_base
;
1335 unsigned max_esverts
= max_esverts_base
;
1337 if (esvert_lds_size
)
1338 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1339 if (gsprim_lds_size
)
1340 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1342 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1343 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, use_adjacency
);
1344 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1346 if (esvert_lds_size
|| gsprim_lds_size
) {
1347 /* Now that we have a rough proportionality between esverts
1348 * and gsprims based on the primitive type, scale both of them
1349 * down simultaneously based on required LDS space.
1351 * We could be smarter about this if we knew how much vertex
1354 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1355 max_gsprims
* gsprim_lds_size
;
1356 if (lds_total
> target_lds_size
) {
1357 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1358 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1360 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1361 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1362 min_verts_per_prim
, use_adjacency
);
1363 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1367 /* Round up towards full wave sizes for better ALU utilization. */
1368 if (!max_vert_out_per_gs_instance
) {
1369 const unsigned wavesize
= gs_sel
->screen
->ge_wave_size
;
1370 unsigned orig_max_esverts
;
1371 unsigned orig_max_gsprims
;
1373 orig_max_esverts
= max_esverts
;
1374 orig_max_gsprims
= max_gsprims
;
1376 max_esverts
= align(max_esverts
, wavesize
);
1377 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1378 if (esvert_lds_size
)
1379 max_esverts
= MIN2(max_esverts
,
1380 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1382 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1384 max_gsprims
= align(max_gsprims
, wavesize
);
1385 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1386 if (gsprim_lds_size
)
1387 max_gsprims
= MIN2(max_gsprims
,
1388 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1390 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1391 min_verts_per_prim
, use_adjacency
);
1392 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1393 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1396 /* Hardware restriction: minimum value of max_esverts */
1397 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1399 unsigned max_out_vertices
=
1400 max_vert_out_per_gs_instance
? gs_sel
->gs_max_out_vertices
:
1401 gs_type
== PIPE_SHADER_GEOMETRY
?
1402 max_gsprims
* gs_num_invocations
* gs_sel
->gs_max_out_vertices
:
1404 assert(max_out_vertices
<= 256);
1406 unsigned prim_amp_factor
= 1;
1407 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1408 /* Number of output primitives per GS input primitive after
1410 prim_amp_factor
= gs_sel
->gs_max_out_vertices
;
1413 /* The GE only checks against the maximum number of ES verts after
1414 * allocating a full GS primitive. So we need to ensure that whenever
1415 * this check passes, there is enough space for a full primitive without
1418 shader
->ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1419 shader
->ngg
.max_gsprims
= max_gsprims
;
1420 shader
->ngg
.max_out_verts
= max_out_vertices
;
1421 shader
->ngg
.prim_amp_factor
= prim_amp_factor
;
1422 shader
->ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1424 shader
->gs_info
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1425 shader
->ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1427 assert(shader
->ngg
.hw_max_esverts
>= 24); /* HW limitation */