2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
32 static LLVMValueRef
get_wave_id_in_tg(struct si_shader_context
*ctx
)
34 return si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 24, 4);
37 static LLVMValueRef
get_tgsize(struct si_shader_context
*ctx
)
39 return si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 28, 4);
42 static LLVMValueRef
get_thread_id_in_tg(struct si_shader_context
*ctx
)
44 LLVMBuilderRef builder
= ctx
->ac
.builder
;
46 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
47 LLVMConstInt(ctx
->ac
.i32
, 64, false), "");
48 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
51 static LLVMValueRef
ngg_get_vtx_cnt(struct si_shader_context
*ctx
)
53 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
54 LLVMConstInt(ctx
->ac
.i32
, 12, false),
55 LLVMConstInt(ctx
->ac
.i32
, 9, false),
59 static LLVMValueRef
ngg_get_prim_cnt(struct si_shader_context
*ctx
)
61 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
62 LLVMConstInt(ctx
->ac
.i32
, 22, false),
63 LLVMConstInt(ctx
->ac
.i32
, 9, false),
67 static LLVMValueRef
ngg_get_ordered_id(struct si_shader_context
*ctx
)
69 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
71 LLVMConstInt(ctx
->ac
.i32
, 11, false),
75 static LLVMValueRef
ngg_get_query_buf(struct si_shader_context
*ctx
)
77 LLVMValueRef buf_ptr
= LLVMGetParam(ctx
->main_fn
,
78 ctx
->param_rw_buffers
);
80 return ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
81 LLVMConstInt(ctx
->i32
, GFX10_GS_QUERY_BUF
, false));
84 /* Send GS Alloc Req message from the first wave of the group to SPI.
86 * - bits 0..10: vertices in group
87 * - bits 12..22: primitives in group
89 static void build_sendmsg_gs_alloc_req(struct si_shader_context
*ctx
,
91 LLVMValueRef prim_cnt
)
93 LLVMBuilderRef builder
= ctx
->ac
.builder
;
96 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
97 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
99 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
100 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
101 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
103 ac_build_endif(&ctx
->ac
, 5020);
107 unsigned num_vertices
;
109 LLVMValueRef index
[3];
110 LLVMValueRef edgeflag
[3];
113 static void build_export_prim(struct si_shader_context
*ctx
,
114 const struct ngg_prim
*prim
)
116 LLVMBuilderRef builder
= ctx
->ac
.builder
;
117 struct ac_export_args args
;
120 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
121 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
123 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
124 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
125 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
126 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
127 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
128 tmp
= LLVMBuildShl(builder
, tmp
,
129 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
130 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
133 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
134 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
135 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
136 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
138 args
.target
= V_008DFC_SQ_EXP_PRIM
;
139 args
.enabled_channels
= 1;
141 args
.valid_mask
= false;
144 ac_build_export(&ctx
->ac
, &args
);
147 static void build_streamout_vertex(struct si_shader_context
*ctx
,
148 LLVMValueRef
*so_buffer
, LLVMValueRef
*wg_offset_dw
,
149 unsigned stream
, LLVMValueRef offset_vtx
,
150 LLVMValueRef vertexptr
)
152 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
153 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
154 LLVMBuilderRef builder
= ctx
->ac
.builder
;
155 LLVMValueRef offset
[4] = {};
158 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
159 if (!wg_offset_dw
[buffer
])
162 tmp
= LLVMBuildMul(builder
, offset_vtx
,
163 LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false), "");
164 tmp
= LLVMBuildAdd(builder
, wg_offset_dw
[buffer
], tmp
, "");
165 offset
[buffer
] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->i32
, 2, false), "");
168 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
169 if (so
->output
[i
].stream
!= stream
)
172 unsigned reg
= so
->output
[i
].register_index
;
173 struct si_shader_output_values out
;
174 out
.semantic_name
= info
->output_semantic_name
[reg
];
175 out
.semantic_index
= info
->output_semantic_index
[reg
];
177 for (unsigned comp
= 0; comp
< 4; comp
++) {
178 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
179 LLVMConstInt(ctx
->i32
, 4 * reg
+ comp
, false));
180 out
.values
[comp
] = LLVMBuildLoad(builder
, tmp
, "");
181 out
.vertex_stream
[comp
] =
182 (info
->output_streams
[reg
] >> (2 * comp
)) & 3;
185 si_emit_streamout_output(ctx
, so_buffer
, offset
, &so
->output
[i
], &out
);
189 struct ngg_streamout
{
190 LLVMValueRef num_vertices
;
192 /* per-thread data */
193 LLVMValueRef prim_enable
[4]; /* i1 per stream */
194 LLVMValueRef vertices
[3]; /* [N x i32] addrspace(LDS)* */
197 LLVMValueRef emit
[4]; /* per-stream emitted primitives (only valid for used streams) */
201 * Build streamout logic.
205 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
207 * Clobbers gs_ngg_scratch[8:].
209 static void build_streamout(struct si_shader_context
*ctx
,
210 struct ngg_streamout
*nggso
)
212 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
213 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
214 LLVMBuilderRef builder
= ctx
->ac
.builder
;
215 LLVMValueRef buf_ptr
= LLVMGetParam(ctx
->main_fn
, ctx
->param_rw_buffers
);
216 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
217 LLVMValueRef tmp
, tmp2
;
218 LLVMValueRef i32_2
= LLVMConstInt(ctx
->i32
, 2, false);
219 LLVMValueRef i32_4
= LLVMConstInt(ctx
->i32
, 4, false);
220 LLVMValueRef i32_8
= LLVMConstInt(ctx
->i32
, 8, false);
221 LLVMValueRef so_buffer
[4] = {};
222 unsigned max_num_vertices
= 1 + (nggso
->vertices
[1] ? 1 : 0) +
223 (nggso
->vertices
[2] ? 1 : 0);
224 LLVMValueRef prim_stride_dw
[4] = {};
225 LLVMValueRef prim_stride_dw_vgpr
= LLVMGetUndef(ctx
->i32
);
226 int stream_for_buffer
[4] = { -1, -1, -1, -1 };
227 unsigned bufmask_for_stream
[4] = {};
228 bool isgs
= ctx
->type
== PIPE_SHADER_GEOMETRY
;
229 unsigned scratch_emit_base
= isgs
? 4 : 0;
230 LLVMValueRef scratch_emit_basev
= isgs
? i32_4
: ctx
->i32_0
;
231 unsigned scratch_offset_base
= isgs
? 8 : 4;
232 LLVMValueRef scratch_offset_basev
= isgs
? i32_8
: i32_4
;
234 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", 256);
236 /* Determine the mapping of streamout buffers to vertex streams. */
237 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
238 unsigned buf
= so
->output
[i
].output_buffer
;
239 unsigned stream
= so
->output
[i
].stream
;
240 assert(stream_for_buffer
[buf
] < 0 || stream_for_buffer
[buf
] == stream
);
241 stream_for_buffer
[buf
] = stream
;
242 bufmask_for_stream
[stream
] |= 1 << buf
;
245 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
246 if (stream_for_buffer
[buffer
] == -1)
249 assert(so
->stride
[buffer
]);
251 tmp
= LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false);
252 prim_stride_dw
[buffer
] = LLVMBuildMul(builder
, tmp
, nggso
->num_vertices
, "");
253 prim_stride_dw_vgpr
= ac_build_writelane(
254 &ctx
->ac
, prim_stride_dw_vgpr
, prim_stride_dw
[buffer
],
255 LLVMConstInt(ctx
->i32
, buffer
, false));
257 so_buffer
[buffer
] = ac_build_load_to_sgpr(
259 LLVMConstInt(ctx
->i32
, SI_VS_STREAMOUT_BUF0
+ buffer
, false));
262 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->i32_0
, "");
263 ac_build_ifcc(&ctx
->ac
, tmp
, 5200);
265 LLVMTypeRef gdsptr
= LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
);
266 LLVMValueRef gdsbase
= LLVMBuildIntToPtr(builder
, ctx
->i32_0
, gdsptr
, "");
268 /* Advance the streamout offsets in GDS. */
269 LLVMValueRef offsets_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
270 LLVMValueRef generated_by_stream_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
272 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
273 ac_build_ifcc(&ctx
->ac
, tmp
, 5210);
276 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
);
277 tmp
= LLVMBuildLoad(builder
, tmp
, "");
279 tmp
= ac_build_writelane(&ctx
->ac
, ctx
->i32_0
,
280 ngg_get_prim_cnt(ctx
), ctx
->i32_0
);
282 LLVMBuildStore(builder
, tmp
, generated_by_stream_vgpr
);
285 int unused_stream
= -1;
286 for (unsigned stream
= 0; stream
< 4; ++stream
) {
287 if (!info
->num_stream_output_components
[stream
]) {
288 unused_stream
= stream
;
292 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
293 if (stream_for_buffer
[buffer
] >= 0) {
294 swizzle
[buffer
] = stream_for_buffer
[buffer
];
296 assert(unused_stream
>= 0);
297 swizzle
[buffer
] = unused_stream
;
301 tmp
= ac_build_quad_swizzle(&ctx
->ac
, tmp
,
302 swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
303 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
305 LLVMValueRef args
[] = {
306 LLVMBuildIntToPtr(builder
, ngg_get_ordered_id(ctx
), gdsptr
, ""),
308 ctx
->i32_0
, // ordering
310 ctx
->ac
.i1false
, // isVolatile
311 LLVMConstInt(ctx
->i32
, 4 << 24, false), // OA index
312 ctx
->ac
.i1true
, // wave release
313 ctx
->ac
.i1true
, // wave done
315 tmp
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ds.ordered.add",
316 ctx
->i32
, args
, ARRAY_SIZE(args
), 0);
318 /* Keep offsets in a VGPR for quick retrieval via readlane by
319 * the first wave for bounds checking, and also store in LDS
320 * for retrieval by all waves later. */
321 LLVMBuildStore(builder
, tmp
, offsets_vgpr
);
323 tmp2
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
324 scratch_offset_basev
, "");
325 tmp2
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp2
);
326 LLVMBuildStore(builder
, tmp
, tmp2
);
328 ac_build_endif(&ctx
->ac
, 5210);
330 /* Determine the max emit per buffer. This is done via the SALU, in part
331 * because LLVM can't generate divide-by-multiply if we try to do this
332 * via VALU with one lane per buffer.
334 LLVMValueRef max_emit
[4] = {};
335 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
336 if (stream_for_buffer
[buffer
] == -1)
339 LLVMValueRef bufsize_dw
=
340 LLVMBuildLShr(builder
,
341 LLVMBuildExtractElement(builder
, so_buffer
[buffer
], i32_2
, ""),
344 tmp
= LLVMBuildLoad(builder
, offsets_vgpr
, "");
345 LLVMValueRef offset_dw
=
346 ac_build_readlane(&ctx
->ac
, tmp
,
347 LLVMConstInt(ctx
->i32
, buffer
, false));
349 tmp
= LLVMBuildSub(builder
, bufsize_dw
, offset_dw
, "");
350 tmp
= LLVMBuildUDiv(builder
, tmp
, prim_stride_dw
[buffer
], "");
352 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, bufsize_dw
, offset_dw
, "");
353 max_emit
[buffer
] = LLVMBuildSelect(builder
, tmp2
, ctx
->i32_0
, tmp
, "");
356 /* Determine the number of emitted primitives per stream and fixup the
357 * GDS counter if necessary.
359 * This is complicated by the fact that a single stream can emit to
360 * multiple buffers (but luckily not vice versa).
362 LLVMValueRef emit_vgpr
= ctx
->i32_0
;
364 for (unsigned stream
= 0; stream
< 4; ++stream
) {
365 if (!info
->num_stream_output_components
[stream
])
368 tmp
= LLVMBuildLoad(builder
, generated_by_stream_vgpr
, "");
369 LLVMValueRef generated
=
370 ac_build_readlane(&ctx
->ac
, tmp
,
371 LLVMConstInt(ctx
->i32
, stream
, false));
373 LLVMValueRef emit
= generated
;
374 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
375 if (stream_for_buffer
[buffer
] == stream
)
376 emit
= ac_build_umin(&ctx
->ac
, emit
, max_emit
[buffer
]);
379 emit_vgpr
= ac_build_writelane(&ctx
->ac
, emit_vgpr
, emit
,
380 LLVMConstInt(ctx
->i32
, stream
, false));
382 /* Fixup the offset using a plain GDS atomic if we overflowed. */
383 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, emit
, generated
, "");
384 ac_build_ifcc(&ctx
->ac
, tmp
, 5221); /* scalar branch */
385 tmp
= LLVMBuildLShr(builder
,
386 LLVMConstInt(ctx
->i32
, bufmask_for_stream
[stream
], false),
387 ac_get_thread_id(&ctx
->ac
), "");
388 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
389 ac_build_ifcc(&ctx
->ac
, tmp
, 5222);
391 tmp
= LLVMBuildSub(builder
, generated
, emit
, "");
392 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
393 tmp2
= LLVMBuildGEP(builder
, gdsbase
, &tid
, 1, "");
394 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpSub
, tmp2
, tmp
,
395 LLVMAtomicOrderingMonotonic
, false);
397 ac_build_endif(&ctx
->ac
, 5222);
398 ac_build_endif(&ctx
->ac
, 5221);
401 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
402 ac_build_ifcc(&ctx
->ac
, tmp
, 5225);
404 tmp
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
405 scratch_emit_basev
, "");
406 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp
);
407 LLVMBuildStore(builder
, emit_vgpr
, tmp
);
409 ac_build_endif(&ctx
->ac
, 5225);
411 ac_build_endif(&ctx
->ac
, 5200);
413 /* Determine the workgroup-relative per-thread / primitive offset into
414 * the streamout buffers */
415 struct ac_wg_scan primemit_scan
[4] = {};
418 for (unsigned stream
= 0; stream
< 4; ++stream
) {
419 if (!info
->num_stream_output_components
[stream
])
422 primemit_scan
[stream
].enable_exclusive
= true;
423 primemit_scan
[stream
].op
= nir_op_iadd
;
424 primemit_scan
[stream
].src
= nggso
->prim_enable
[stream
];
425 primemit_scan
[stream
].scratch
=
426 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
427 LLVMConstInt(ctx
->i32
, 12 + 8 * stream
, false));
428 primemit_scan
[stream
].waveidx
= get_wave_id_in_tg(ctx
);
429 primemit_scan
[stream
].numwaves
= get_tgsize(ctx
);
430 primemit_scan
[stream
].maxwaves
= 8;
431 ac_build_wg_scan_top(&ctx
->ac
, &primemit_scan
[stream
]);
435 ac_build_s_barrier(&ctx
->ac
);
437 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
438 LLVMValueRef wgoffset_dw
[4] = {};
441 LLVMValueRef scratch_vgpr
;
443 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ac_get_thread_id(&ctx
->ac
));
444 scratch_vgpr
= LLVMBuildLoad(builder
, tmp
, "");
446 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
447 if (stream_for_buffer
[buffer
] >= 0) {
448 wgoffset_dw
[buffer
] = ac_build_readlane(
449 &ctx
->ac
, scratch_vgpr
,
450 LLVMConstInt(ctx
->i32
, scratch_offset_base
+ buffer
, false));
454 for (unsigned stream
= 0; stream
< 4; ++stream
) {
455 if (info
->num_stream_output_components
[stream
]) {
456 nggso
->emit
[stream
] = ac_build_readlane(
457 &ctx
->ac
, scratch_vgpr
,
458 LLVMConstInt(ctx
->i32
, scratch_emit_base
+ stream
, false));
463 /* Write out primitive data */
464 for (unsigned stream
= 0; stream
< 4; ++stream
) {
465 if (!info
->num_stream_output_components
[stream
])
469 ac_build_wg_scan_bottom(&ctx
->ac
, &primemit_scan
[stream
]);
471 primemit_scan
[stream
].result_exclusive
= tid
;
474 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
475 primemit_scan
[stream
].result_exclusive
,
476 nggso
->emit
[stream
], "");
477 tmp
= LLVMBuildAnd(builder
, tmp
, nggso
->prim_enable
[stream
], "");
478 ac_build_ifcc(&ctx
->ac
, tmp
, 5240);
480 LLVMValueRef offset_vtx
=
481 LLVMBuildMul(builder
, primemit_scan
[stream
].result_exclusive
,
482 nggso
->num_vertices
, "");
484 for (unsigned i
= 0; i
< max_num_vertices
; ++i
) {
485 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
486 LLVMConstInt(ctx
->i32
, i
, false),
487 nggso
->num_vertices
, "");
488 ac_build_ifcc(&ctx
->ac
, tmp
, 5241);
489 build_streamout_vertex(ctx
, so_buffer
, wgoffset_dw
,
490 stream
, offset_vtx
, nggso
->vertices
[i
]);
491 ac_build_endif(&ctx
->ac
, 5241);
492 offset_vtx
= LLVMBuildAdd(builder
, offset_vtx
, ctx
->i32_1
, "");
495 ac_build_endif(&ctx
->ac
, 5240);
500 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
501 * for the vertex outputs.
503 static LLVMValueRef
ngg_nogs_vertex_ptr(struct si_shader_context
*ctx
,
506 /* The extra dword is used to avoid LDS bank conflicts. */
507 unsigned vertex_size
= 4 * ctx
->shader
->selector
->info
.num_outputs
+ 1;
508 LLVMTypeRef ai32
= LLVMArrayType(ctx
->i32
, vertex_size
);
509 LLVMTypeRef pai32
= LLVMPointerType(ai32
, AC_ADDR_SPACE_LDS
);
510 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->esgs_ring
, pai32
, "");
511 return LLVMBuildGEP(ctx
->ac
.builder
, tmp
, &vtxid
, 1, "");
515 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
517 void gfx10_emit_ngg_epilogue(struct ac_shader_abi
*abi
,
518 unsigned max_outputs
,
521 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
522 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
523 struct tgsi_shader_info
*info
= &sel
->info
;
524 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
525 LLVMBuilderRef builder
= ctx
->ac
.builder
;
526 struct lp_build_if_state if_state
;
527 LLVMValueRef tmp
, tmp2
;
529 assert(!ctx
->shader
->is_gs_copy_shader
);
530 assert(info
->num_outputs
<= max_outputs
);
532 LLVMValueRef vertex_ptr
= NULL
;
534 if (sel
->so
.num_outputs
)
535 vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
537 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
538 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
539 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
541 for (unsigned j
= 0; j
< 4; j
++) {
542 outputs
[i
].vertex_stream
[j
] =
543 (info
->output_streams
[i
] >> (2 * j
)) & 3;
545 /* TODO: we may store more outputs than streamout needs,
546 * but streamout performance isn't that important.
548 if (sel
->so
.num_outputs
) {
549 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
,
550 LLVMConstInt(ctx
->i32
, 4 * i
+ j
, false));
551 tmp2
= LLVMBuildLoad(builder
, addrs
[4 * i
+ j
], "");
552 tmp2
= ac_to_integer(&ctx
->ac
, tmp2
);
553 LLVMBuildStore(builder
, tmp2
, tmp
);
558 lp_build_endif(&ctx
->merged_wrap_if_state
);
560 LLVMValueRef prims_in_wave
= si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 8, 8);
561 LLVMValueRef vtx_in_wave
= si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 0, 8);
562 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
563 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
564 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
565 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
566 LLVMValueRef vtxindex
[] = {
567 si_unpack_param(ctx
, ctx
->param_gs_vtx01_offset
, 0, 16),
568 si_unpack_param(ctx
, ctx
->param_gs_vtx01_offset
, 16, 16),
569 si_unpack_param(ctx
, ctx
->param_gs_vtx23_offset
, 0, 16),
572 /* Determine the number of vertices per primitive. */
573 unsigned num_vertices
;
574 LLVMValueRef num_vertices_val
;
576 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
577 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
]) {
578 /* Blits always use axis-aligned rectangles with 3 vertices. */
580 num_vertices_val
= LLVMConstInt(ctx
->i32
, 3, 0);
582 /* Extract OUTPRIM field. */
583 tmp
= si_unpack_param(ctx
, ctx
->param_vs_state_bits
, 2, 2);
584 num_vertices_val
= LLVMBuildAdd(builder
, tmp
, ctx
->i32_1
, "");
585 num_vertices
= 3; /* TODO: optimize for points & lines */
588 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
590 if (info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
])
592 else if (info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
597 num_vertices_val
= LLVMConstInt(ctx
->i32
, num_vertices
, false);
601 LLVMValueRef emitted_prims
= NULL
;
603 if (sel
->so
.num_outputs
) {
604 struct ngg_streamout nggso
= {};
606 nggso
.num_vertices
= num_vertices_val
;
607 nggso
.prim_enable
[0] = is_gs_thread
;
609 for (unsigned i
= 0; i
< num_vertices
; ++i
)
610 nggso
.vertices
[i
] = ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
612 build_streamout(ctx
, &nggso
);
613 emitted_prims
= nggso
.emit
[0];
616 /* Copy Primitive IDs from GS threads to the LDS address corresponding
617 * to the ES thread of the provoking vertex.
619 if (ctx
->type
== PIPE_SHADER_VERTEX
&&
620 ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
621 /* Streamout uses LDS. We need to wait for it before we can reuse it. */
622 if (sel
->so
.num_outputs
)
623 ac_build_s_barrier(&ctx
->ac
);
625 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
626 /* Extract the PROVOKING_VTX_INDEX field. */
627 LLVMValueRef provoking_vtx_in_prim
=
628 si_unpack_param(ctx
, ctx
->param_vs_state_bits
, 4, 2);
630 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
631 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
632 LLVMValueRef provoking_vtx_index
=
633 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
635 LLVMBuildStore(builder
, ctx
->abi
.gs_prim_id
,
636 ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
, provoking_vtx_index
));
637 ac_build_endif(&ctx
->ac
, 5400);
640 /* TODO: primitive culling */
642 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
644 /* Update query buffer */
645 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
646 if (!info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
]) {
647 tmp
= si_unpack_param(ctx
, ctx
->param_vs_state_bits
, 6, 1);
648 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
649 ac_build_ifcc(&ctx
->ac
, tmp
, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
650 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
651 ac_build_ifcc(&ctx
->ac
, tmp
, 5030);
652 tmp
= LLVMBuildICmp(builder
, LLVMIntULE
, ac_get_thread_id(&ctx
->ac
),
653 sel
->so
.num_outputs
? ctx
->ac
.i32_1
: ctx
->ac
.i32_0
, "");
654 ac_build_ifcc(&ctx
->ac
, tmp
, 5031);
656 LLVMValueRef args
[] = {
657 ngg_get_prim_cnt(ctx
),
658 ngg_get_query_buf(ctx
),
659 LLVMConstInt(ctx
->i32
, 16, false), /* offset of stream[0].generated_primitives */
660 ctx
->i32_0
, /* soffset */
661 ctx
->i32_0
, /* cachepolicy */
664 if (sel
->so
.num_outputs
) {
665 args
[0] = ac_build_writelane(&ctx
->ac
, args
[0], emitted_prims
, ctx
->i32_1
);
666 args
[2] = ac_build_writelane(&ctx
->ac
, args
[2],
667 LLVMConstInt(ctx
->i32
, 24, false), ctx
->i32_1
);
670 /* TODO: should this be 64-bit atomics? */
671 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
672 ctx
->i32
, args
, 5, 0);
674 ac_build_endif(&ctx
->ac
, 5031);
675 ac_build_endif(&ctx
->ac
, 5030);
676 ac_build_endif(&ctx
->ac
, 5029);
679 /* Export primitive data to the index buffer. Format is:
680 * - bits 0..8: index 0
681 * - bit 9: edge flag 0
682 * - bits 10..18: index 1
683 * - bit 19: edge flag 1
684 * - bits 20..28: index 2
685 * - bit 29: edge flag 2
686 * - bit 31: null primitive (skip)
688 * For the first version, we will always build up all three indices
689 * independent of the primitive type. The additional garbage data
692 * TODO: culling depends on the primitive type, so can have some
695 lp_build_if(&if_state
, &ctx
->gallivm
, is_gs_thread
);
697 struct ngg_prim prim
= {};
699 prim
.num_vertices
= num_vertices
;
700 prim
.isnull
= ctx
->ac
.i1false
;
701 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
703 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
704 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
705 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
706 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
709 build_export_prim(ctx
, &prim
);
711 lp_build_endif(&if_state
);
713 /* Export per-vertex data (positions and parameters). */
714 lp_build_if(&if_state
, &ctx
->gallivm
, is_es_thread
);
718 /* Unconditionally (re-)load the values for proper SSA form. */
719 for (i
= 0; i
< info
->num_outputs
; i
++) {
720 for (unsigned j
= 0; j
< 4; j
++) {
721 outputs
[i
].values
[j
] =
722 LLVMBuildLoad(builder
,
728 if (ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
729 outputs
[i
].semantic_name
= TGSI_SEMANTIC_PRIMID
;
730 outputs
[i
].semantic_index
= 0;
732 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
733 /* Wait for GS stores to finish. */
734 ac_build_s_barrier(&ctx
->ac
);
736 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
,
737 get_thread_id_in_tg(ctx
));
738 outputs
[i
].values
[0] = LLVMBuildLoad(builder
, tmp
, "");
740 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
741 outputs
[i
].values
[0] = si_get_primitive_id(ctx
, 0);
744 outputs
[i
].values
[0] = ac_to_float(&ctx
->ac
, outputs
[i
].values
[0]);
745 for (unsigned j
= 1; j
< 4; j
++)
746 outputs
[i
].values
[j
] = LLVMGetUndef(ctx
->f32
);
748 memset(outputs
[i
].vertex_stream
, 0,
749 sizeof(outputs
[i
].vertex_stream
));
753 si_llvm_export_vs(ctx
, outputs
, i
);
755 lp_build_endif(&if_state
);
759 ngg_gs_get_vertex_storage(struct si_shader_context
*ctx
)
761 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
762 const struct tgsi_shader_info
*info
= &sel
->info
;
764 LLVMTypeRef elements
[2] = {
765 LLVMArrayType(ctx
->ac
.i32
, 4 * info
->num_outputs
),
766 LLVMArrayType(ctx
->ac
.i8
, 4),
768 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
769 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
770 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
774 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
775 * is in emit order; that is:
776 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
777 * - during vertex emit, i.e. while the API GS shader invocation is running,
778 * N = threadidx * gs_max_out_vertices + emitidx
780 * Goals of the LDS memory layout:
781 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
782 * in uniform control flow
783 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
785 * 3. Agnostic to the number of waves (since we don't know it before compiling)
786 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
787 * 5. Avoid wasting memory.
789 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
790 * layout, elimination of bank conflicts requires that each vertex occupy an
791 * odd number of dwords. We use the additional dword to store the output stream
792 * index as well as a flag to indicate whether this vertex ends a primitive
795 * Swizzling is required to satisfy points 1 and 2 simultaneously.
797 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
798 * Indices are swizzled in groups of 32, which ensures point 1 without
799 * disturbing point 2.
801 * \return an LDS pointer to type {[N x i32], [4 x i8]}
804 ngg_gs_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexidx
)
806 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
807 LLVMBuilderRef builder
= ctx
->ac
.builder
;
808 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
810 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
811 unsigned write_stride_2exp
= ffs(sel
->gs_max_out_vertices
) - 1;
812 if (write_stride_2exp
) {
814 LLVMBuildLShr(builder
, vertexidx
,
815 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
816 LLVMValueRef swizzle
=
817 LLVMBuildAnd(builder
, row
,
818 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
820 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
823 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
827 ngg_gs_emit_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef gsthread
,
828 LLVMValueRef emitidx
)
830 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
831 LLVMBuilderRef builder
= ctx
->ac
.builder
;
834 tmp
= LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false);
835 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
836 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
837 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
840 void gfx10_ngg_gs_emit_vertex(struct si_shader_context
*ctx
,
844 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
845 const struct tgsi_shader_info
*info
= &sel
->info
;
846 LLVMBuilderRef builder
= ctx
->ac
.builder
;
847 struct lp_build_if_state if_state
;
849 const LLVMValueRef vertexidx
=
850 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
852 /* If this thread has already emitted the declared maximum number of
853 * vertices, skip the write: excessive vertex emissions are not
854 * supposed to have any effect.
856 const LLVMValueRef can_emit
=
857 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
858 LLVMConstInt(ctx
->i32
, sel
->gs_max_out_vertices
, false), "");
860 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
861 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
862 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
864 lp_build_if(&if_state
, &ctx
->gallivm
, can_emit
);
866 const LLVMValueRef vertexptr
=
867 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
868 unsigned out_idx
= 0;
869 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
870 for (unsigned chan
= 0; chan
< 4; chan
++, out_idx
++) {
871 if (!(info
->output_usagemask
[i
] & (1 << chan
)) ||
872 ((info
->output_streams
[i
] >> (2 * chan
)) & 3) != stream
)
875 LLVMValueRef out_val
= LLVMBuildLoad(builder
, addrs
[4 * i
+ chan
], "");
876 LLVMValueRef gep_idx
[3] = {
877 ctx
->ac
.i32_0
, /* implied C-style array */
878 ctx
->ac
.i32_0
, /* first entry of struct */
879 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
881 LLVMValueRef ptr
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
883 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
884 LLVMBuildStore(builder
, out_val
, ptr
);
887 assert(out_idx
* 4 == sel
->gsvs_vertex_size
);
889 /* Determine and store whether this vertex completed a primitive. */
890 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
892 tmp
= LLVMConstInt(ctx
->ac
.i32
, u_vertices_per_prim(sel
->gs_output_prim
) - 1, false);
893 const LLVMValueRef iscompleteprim
=
894 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
896 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
897 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
899 LLVMValueRef gep_idx
[3] = {
900 ctx
->ac
.i32_0
, /* implied C-style array */
901 ctx
->ac
.i32_1
, /* second struct entry */
902 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
904 const LLVMValueRef primflagptr
=
905 LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
907 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
908 LLVMBuildStore(builder
, tmp
, primflagptr
);
910 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
911 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
912 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
914 lp_build_endif(&if_state
);
917 void gfx10_ngg_gs_emit_prologue(struct si_shader_context
*ctx
)
919 /* Zero out the part of LDS scratch that is used to accumulate the
920 * per-stream generated primitive count.
922 LLVMBuilderRef builder
= ctx
->ac
.builder
;
923 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
924 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
927 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
928 ac_build_ifcc(&ctx
->ac
, tmp
, 5090);
930 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
931 LLVMBuildStore(builder
, ctx
->i32_0
, ptr
);
933 ac_build_endif(&ctx
->ac
, 5090);
935 ac_build_s_barrier(&ctx
->ac
);
938 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context
*ctx
)
940 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
941 const struct tgsi_shader_info
*info
= &sel
->info
;
942 const unsigned verts_per_prim
= u_vertices_per_prim(sel
->gs_output_prim
);
943 LLVMBuilderRef builder
= ctx
->ac
.builder
;
944 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
945 LLVMValueRef tmp
, tmp2
;
947 /* Zero out remaining (non-emitted) primitive flags.
949 * Note: Alternatively, we could pass the relevant gs_next_vertex to
950 * the emit threads via LDS. This is likely worse in the expected
951 * typical case where each GS thread emits the full set of
954 for (unsigned stream
= 0; stream
< 4; ++stream
) {
955 if (!info
->num_stream_output_components
[stream
])
958 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
960 ac_build_bgnloop(&ctx
->ac
, 5100);
962 const LLVMValueRef vertexidx
=
963 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
964 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
965 LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false), "");
966 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
967 ac_build_break(&ctx
->ac
);
968 ac_build_endif(&ctx
->ac
, 5101);
970 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
971 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
973 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
974 LLVMValueRef gep_idx
[3] = {
975 ctx
->ac
.i32_0
, /* implied C-style array */
976 ctx
->ac
.i32_1
, /* second entry of struct */
977 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
979 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
980 LLVMBuildStore(builder
, i8_0
, tmp
);
982 ac_build_endloop(&ctx
->ac
, 5100);
985 /* Accumulate generated primitives counts across the entire threadgroup. */
986 for (unsigned stream
= 0; stream
< 4; ++stream
) {
987 if (!info
->num_stream_output_components
[stream
])
990 LLVMValueRef numprims
=
991 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
992 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, 64);
994 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->i32_0
, "");
995 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
997 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
998 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
999 LLVMConstInt(ctx
->i32
, stream
, false)),
1000 numprims
, LLVMAtomicOrderingMonotonic
, false);
1002 ac_build_endif(&ctx
->ac
, 5105);
1005 lp_build_endif(&ctx
->merged_wrap_if_state
);
1007 ac_build_s_barrier(&ctx
->ac
);
1009 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
1010 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
1013 if (sel
->so
.num_outputs
) {
1014 struct ngg_streamout nggso
= {};
1016 nggso
.num_vertices
= LLVMConstInt(ctx
->i32
, verts_per_prim
, false);
1018 LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tid
);
1019 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1020 if (!info
->num_stream_output_components
[stream
])
1023 LLVMValueRef gep_idx
[3] = {
1024 ctx
->i32_0
, /* implicit C-style array */
1025 ctx
->i32_1
, /* second value of struct */
1026 LLVMConstInt(ctx
->i32
, stream
, false),
1028 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
1029 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1030 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1031 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1032 nggso
.prim_enable
[stream
] = LLVMBuildAnd(builder
, tmp
, tmp2
, "");
1035 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1036 tmp
= LLVMBuildSub(builder
, tid
,
1037 LLVMConstInt(ctx
->i32
, verts_per_prim
- i
- 1, false), "");
1038 tmp
= ngg_gs_vertex_ptr(ctx
, tmp
);
1039 nggso
.vertices
[i
] = ac_build_gep0(&ctx
->ac
, tmp
, ctx
->i32_0
);
1042 build_streamout(ctx
, &nggso
);
1045 /* Write shader query data. */
1046 tmp
= si_unpack_param(ctx
, ctx
->param_vs_state_bits
, 6, 1);
1047 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1048 ac_build_ifcc(&ctx
->ac
, tmp
, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1049 unsigned num_query_comps
= sel
->so
.num_outputs
? 8 : 4;
1050 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
,
1051 LLVMConstInt(ctx
->i32
, num_query_comps
, false), "");
1052 ac_build_ifcc(&ctx
->ac
, tmp
, 5110);
1054 LLVMValueRef offset
;
1056 if (sel
->so
.num_outputs
)
1057 tmp
= LLVMBuildAnd(builder
, tmp
, LLVMConstInt(ctx
->i32
, 3, false), "");
1058 offset
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 32, false), "");
1059 if (sel
->so
.num_outputs
) {
1060 tmp
= LLVMBuildLShr(builder
, tid
, LLVMConstInt(ctx
->i32
, 2, false), "");
1061 tmp
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 8, false), "");
1062 offset
= LLVMBuildAdd(builder
, offset
, tmp
, "");
1065 tmp
= LLVMBuildLoad(builder
, ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
), "");
1066 LLVMValueRef args
[] = {
1068 ngg_get_query_buf(ctx
),
1070 LLVMConstInt(ctx
->i32
, 16, false), /* soffset */
1071 ctx
->i32_0
, /* cachepolicy */
1073 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1074 ctx
->i32
, args
, 5, 0);
1076 ac_build_endif(&ctx
->ac
, 5110);
1077 ac_build_endif(&ctx
->ac
, 5109);
1081 /* Determine vertex liveness. */
1082 LLVMValueRef vertliveptr
= lp_build_alloca(&ctx
->gallivm
, ctx
->ac
.i1
, "vertexlive");
1084 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1085 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
1087 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1088 const LLVMValueRef primidx
=
1089 LLVMBuildAdd(builder
, tid
,
1090 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
1093 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
1094 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
1097 /* Load primitive liveness */
1098 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
1099 LLVMValueRef gep_idx
[3] = {
1100 ctx
->ac
.i32_0
, /* implicit C-style array */
1101 ctx
->ac
.i32_1
, /* second value of struct */
1102 ctx
->ac
.i32_0
, /* stream 0 */
1104 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1105 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1106 const LLVMValueRef primlive
=
1107 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
1109 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
1110 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
1111 LLVMBuildStore(builder
, tmp
, vertliveptr
);
1114 ac_build_endif(&ctx
->ac
, 5121 + i
);
1117 ac_build_endif(&ctx
->ac
, 5120);
1119 /* Inclusive scan addition across the current wave. */
1120 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
1121 struct ac_wg_scan vertlive_scan
= {};
1122 vertlive_scan
.op
= nir_op_iadd
;
1123 vertlive_scan
.enable_reduce
= true;
1124 vertlive_scan
.enable_exclusive
= true;
1125 vertlive_scan
.src
= vertlive
;
1126 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->i32_0
);
1127 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
1128 vertlive_scan
.numwaves
= get_tgsize(ctx
);
1129 vertlive_scan
.maxwaves
= 8;
1131 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
1133 /* Skip all exports (including index exports) when possible. At least on
1134 * early gfx10 revisions this is also to avoid hangs.
1136 LLVMValueRef have_exports
=
1137 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
1139 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
1141 /* Allocate export space. Send this message as early as possible, to
1142 * hide the latency of the SQ <-> SPI roundtrip.
1144 * Note: We could consider compacting primitives for export as well.
1145 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1146 * prim data per clock and skips null primitives at no additional
1147 * cost. So compacting primitives can only be beneficial when
1148 * there are 4 or more contiguous null primitives in the export
1149 * (in the common case of single-dword prim exports).
1151 build_sendmsg_gs_alloc_req(ctx
, vertlive_scan
.result_reduce
, num_emit_threads
);
1153 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1154 * of the primitive liveness flags, relying on the fact that each
1155 * threadgroup can have at most 256 threads. */
1156 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
1158 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
1159 LLVMValueRef gep_idx
[3] = {
1160 ctx
->ac
.i32_0
, /* implicit C-style array */
1161 ctx
->ac
.i32_1
, /* second value of struct */
1162 ctx
->ac
.i32_1
, /* stream 1 */
1164 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1165 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
1166 LLVMBuildStore(builder
, tmp2
, tmp
);
1168 ac_build_endif(&ctx
->ac
, 5130);
1170 ac_build_s_barrier(&ctx
->ac
);
1172 /* Export primitive data */
1173 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1174 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
1176 struct ngg_prim prim
= {};
1177 prim
.num_vertices
= verts_per_prim
;
1179 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1180 LLVMValueRef gep_idx
[3] = {
1181 ctx
->ac
.i32_0
, /* implicit C-style array */
1182 ctx
->ac
.i32_1
, /* second value of struct */
1183 ctx
->ac
.i32_0
, /* primflag */
1185 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1186 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1187 prim
.isnull
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
,
1188 LLVMConstInt(ctx
->ac
.i8
, 0, false), "");
1190 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1191 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
1192 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
1193 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
1196 build_export_prim(ctx
, &prim
);
1198 ac_build_endif(&ctx
->ac
, 5140);
1200 /* Export position and parameter data */
1201 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
1202 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
1204 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
1206 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1207 LLVMValueRef gep_idx
[3] = {
1208 ctx
->ac
.i32_0
, /* implicit C-style array */
1209 ctx
->ac
.i32_1
, /* second value of struct */
1210 ctx
->ac
.i32_1
, /* stream 1: source data index */
1212 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1213 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1214 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
1215 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
1217 unsigned out_idx
= 0;
1218 gep_idx
[1] = ctx
->ac
.i32_0
;
1219 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
1220 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
1221 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
1223 for (unsigned j
= 0; j
< 4; j
++, out_idx
++) {
1224 gep_idx
[2] = LLVMConstInt(ctx
->ac
.i32
, out_idx
, false);
1225 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
1226 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1227 outputs
[i
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
1228 outputs
[i
].vertex_stream
[j
] =
1229 (info
->output_streams
[i
] >> (2 * j
)) & 3;
1233 si_llvm_export_vs(ctx
, outputs
, info
->num_outputs
);
1235 ac_build_endif(&ctx
->ac
, 5145);
1238 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1239 unsigned min_verts_per_prim
, bool use_adjacency
)
1241 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1244 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1248 * Determine subgroup information like maximum number of vertices and prims.
1250 * This happens before the shader is uploaded, since LDS relocations during
1251 * upload depend on the subgroup size.
1253 void gfx10_ngg_calculate_subgroup_info(struct si_shader
*shader
)
1255 const struct si_shader_selector
*gs_sel
= shader
->selector
;
1256 const struct si_shader_selector
*es_sel
=
1257 shader
->previous_stage_sel
? shader
->previous_stage_sel
: gs_sel
;
1258 const enum pipe_shader_type gs_type
= gs_sel
->type
;
1259 const unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
1260 /* TODO: Use QUADS as the worst case because of reuse, but triangles
1261 * will always have 1 additional unoccupied vector lane. We could use
1262 * that lane if the worst case was TRIANGLES. */
1263 const unsigned input_prim
= si_get_input_prim(gs_sel
, PIPE_PRIM_QUADS
);
1264 const bool use_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
1265 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
1266 const unsigned max_verts_per_prim
= u_vertices_per_prim(input_prim
);
1267 const unsigned min_verts_per_prim
=
1268 gs_type
== PIPE_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1270 /* All these are in dwords: */
1271 /* We can't allow using the whole LDS, because GS waves compete with
1272 * other shader stages for LDS space.
1274 * TODO: We should really take the shader's internal LDS use into
1275 * account. The linker will fail if the size is greater than
1278 const unsigned max_lds_size
= 8 * 1024 - 768;
1279 const unsigned target_lds_size
= max_lds_size
;
1280 unsigned esvert_lds_size
= 0;
1281 unsigned gsprim_lds_size
= 0;
1283 /* All these are per subgroup: */
1284 bool max_vert_out_per_gs_instance
= false;
1285 unsigned max_esverts_base
= 128;
1286 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1288 /* Hardware has the following non-natural restrictions on the value
1289 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1291 * - at most 252 for any line input primitive type
1292 * - at most 251 for any quad input primitive type
1293 * - at most 251 for triangle strips with adjacency (this happens to
1294 * be the natural limit for triangle *lists* with adjacency)
1296 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1298 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1299 unsigned max_out_verts_per_gsprim
=
1300 gs_sel
->gs_max_out_vertices
* gs_num_invocations
;
1302 if (max_out_verts_per_gsprim
<= 256) {
1303 if (max_out_verts_per_gsprim
) {
1304 max_gsprims_base
= MIN2(max_gsprims_base
,
1305 256 / max_out_verts_per_gsprim
);
1308 /* Use special multi-cycling mode in which each GS
1309 * instance gets its own subgroup. Does not work with
1311 max_vert_out_per_gs_instance
= true;
1312 max_gsprims_base
= 1;
1313 max_out_verts_per_gsprim
= gs_sel
->gs_max_out_vertices
;
1316 esvert_lds_size
= es_sel
->esgs_itemsize
/ 4;
1317 gsprim_lds_size
= (gs_sel
->gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1319 /* TODO: This needs to be adjusted once LDS use for compaction
1320 * after culling is implemented. */
1321 if (es_sel
->so
.num_outputs
)
1322 esvert_lds_size
= 4 * es_sel
->info
.num_outputs
+ 1;
1324 /* GS stores Primitive IDs into LDS at the address corresponding
1325 * to the ES thread of the provoking vertex. All ES threads
1326 * load and export PrimitiveID for their thread.
1328 if (gs_sel
->type
== PIPE_SHADER_VERTEX
&&
1329 shader
->key
.mono
.u
.vs_export_prim_id
)
1330 esvert_lds_size
= MAX2(esvert_lds_size
, 1);
1333 unsigned max_gsprims
= max_gsprims_base
;
1334 unsigned max_esverts
= max_esverts_base
;
1336 if (esvert_lds_size
)
1337 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1338 if (gsprim_lds_size
)
1339 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1341 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1342 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, use_adjacency
);
1343 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1345 if (esvert_lds_size
|| gsprim_lds_size
) {
1346 /* Now that we have a rough proportionality between esverts
1347 * and gsprims based on the primitive type, scale both of them
1348 * down simultaneously based on required LDS space.
1350 * We could be smarter about this if we knew how much vertex
1353 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1354 max_gsprims
* gsprim_lds_size
;
1355 if (lds_total
> target_lds_size
) {
1356 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1357 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1359 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1360 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1361 min_verts_per_prim
, use_adjacency
);
1362 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1366 /* Round up towards full wave sizes for better ALU utilization. */
1367 if (!max_vert_out_per_gs_instance
) {
1368 const unsigned wavesize
= 64;
1369 unsigned orig_max_esverts
;
1370 unsigned orig_max_gsprims
;
1372 orig_max_esverts
= max_esverts
;
1373 orig_max_gsprims
= max_gsprims
;
1375 max_esverts
= align(max_esverts
, wavesize
);
1376 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1377 if (esvert_lds_size
)
1378 max_esverts
= MIN2(max_esverts
,
1379 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1381 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1383 max_gsprims
= align(max_gsprims
, wavesize
);
1384 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1385 if (gsprim_lds_size
)
1386 max_gsprims
= MIN2(max_gsprims
,
1387 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1389 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1390 min_verts_per_prim
, use_adjacency
);
1391 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1392 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1395 /* Hardware restriction: minimum value of max_esverts */
1396 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1398 unsigned max_out_vertices
=
1399 max_vert_out_per_gs_instance
? gs_sel
->gs_max_out_vertices
:
1400 gs_type
== PIPE_SHADER_GEOMETRY
?
1401 max_gsprims
* gs_num_invocations
* gs_sel
->gs_max_out_vertices
:
1403 assert(max_out_vertices
<= 256);
1405 unsigned prim_amp_factor
= 1;
1406 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1407 /* Number of output primitives per GS input primitive after
1409 prim_amp_factor
= gs_sel
->gs_max_out_vertices
;
1412 /* The GE only checks against the maximum number of ES verts after
1413 * allocating a full GS primitive. So we need to ensure that whenever
1414 * this check passes, there is enough space for a full primitive without
1417 shader
->ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1418 shader
->ngg
.max_gsprims
= max_gsprims
;
1419 shader
->ngg
.max_out_verts
= max_out_vertices
;
1420 shader
->ngg
.prim_amp_factor
= prim_amp_factor
;
1421 shader
->ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1423 shader
->gs_info
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1424 shader
->ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1426 assert(shader
->ngg
.hw_max_esverts
>= 24); /* HW limitation */