2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
32 static LLVMValueRef
get_wave_id_in_tg(struct si_shader_context
*ctx
)
34 return si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 24, 4);
37 static LLVMValueRef
get_tgsize(struct si_shader_context
*ctx
)
39 return si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 28, 4);
42 static LLVMValueRef
get_thread_id_in_tg(struct si_shader_context
*ctx
)
44 LLVMBuilderRef builder
= ctx
->ac
.builder
;
46 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
47 LLVMConstInt(ctx
->ac
.i32
, 64, false), "");
48 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
51 static LLVMValueRef
ngg_get_vtx_cnt(struct si_shader_context
*ctx
)
53 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
54 LLVMConstInt(ctx
->ac
.i32
, 12, false),
55 LLVMConstInt(ctx
->ac
.i32
, 9, false),
59 static LLVMValueRef
ngg_get_prim_cnt(struct si_shader_context
*ctx
)
61 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
62 LLVMConstInt(ctx
->ac
.i32
, 22, false),
63 LLVMConstInt(ctx
->ac
.i32
, 9, false),
67 static LLVMValueRef
ngg_get_ordered_id(struct si_shader_context
*ctx
)
69 return ac_build_bfe(&ctx
->ac
, ctx
->gs_tg_info
,
71 LLVMConstInt(ctx
->ac
.i32
, 11, false),
75 static LLVMValueRef
ngg_get_query_buf(struct si_shader_context
*ctx
)
77 LLVMValueRef buf_ptr
= LLVMGetParam(ctx
->main_fn
,
78 ctx
->param_rw_buffers
);
80 return ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
81 LLVMConstInt(ctx
->i32
, GFX10_GS_QUERY_BUF
, false));
84 /* Send GS Alloc Req message from the first wave of the group to SPI.
86 * - bits 0..10: vertices in group
87 * - bits 12..22: primitives in group
89 static void build_sendmsg_gs_alloc_req(struct si_shader_context
*ctx
,
91 LLVMValueRef prim_cnt
)
93 LLVMBuilderRef builder
= ctx
->ac
.builder
;
96 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
97 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
99 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
100 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
101 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
103 ac_build_endif(&ctx
->ac
, 5020);
107 unsigned num_vertices
;
109 LLVMValueRef index
[3];
110 LLVMValueRef edgeflag
[3];
113 static void build_export_prim(struct si_shader_context
*ctx
,
114 const struct ngg_prim
*prim
)
116 LLVMBuilderRef builder
= ctx
->ac
.builder
;
117 struct ac_export_args args
;
120 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
121 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
123 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
124 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
125 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
126 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
127 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
128 tmp
= LLVMBuildShl(builder
, tmp
,
129 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
130 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
133 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
134 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
135 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
136 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
138 args
.target
= V_008DFC_SQ_EXP_PRIM
;
139 args
.enabled_channels
= 1;
141 args
.valid_mask
= false;
144 ac_build_export(&ctx
->ac
, &args
);
147 static void build_streamout_vertex(struct si_shader_context
*ctx
,
148 LLVMValueRef
*so_buffer
, LLVMValueRef
*wg_offset_dw
,
149 unsigned stream
, LLVMValueRef offset_vtx
,
150 LLVMValueRef vertexptr
)
152 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
153 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
154 LLVMBuilderRef builder
= ctx
->ac
.builder
;
155 LLVMValueRef offset
[4] = {};
158 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
159 if (!wg_offset_dw
[buffer
])
162 tmp
= LLVMBuildMul(builder
, offset_vtx
,
163 LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false), "");
164 tmp
= LLVMBuildAdd(builder
, wg_offset_dw
[buffer
], tmp
, "");
165 offset
[buffer
] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->i32
, 2, false), "");
168 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
169 if (so
->output
[i
].stream
!= stream
)
172 unsigned reg
= so
->output
[i
].register_index
;
173 struct si_shader_output_values out
;
174 out
.semantic_name
= info
->output_semantic_name
[reg
];
175 out
.semantic_index
= info
->output_semantic_index
[reg
];
177 for (unsigned comp
= 0; comp
< 4; comp
++) {
178 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
179 LLVMConstInt(ctx
->i32
, 4 * reg
+ comp
, false));
180 out
.values
[comp
] = LLVMBuildLoad(builder
, tmp
, "");
181 out
.vertex_stream
[comp
] =
182 (info
->output_streams
[reg
] >> (2 * comp
)) & 3;
185 si_emit_streamout_output(ctx
, so_buffer
, offset
, &so
->output
[i
], &out
);
189 struct ngg_streamout
{
190 LLVMValueRef num_vertices
;
192 /* per-thread data */
193 LLVMValueRef prim_enable
[4]; /* i1 per stream */
194 LLVMValueRef vertices
[3]; /* [N x i32] addrspace(LDS)* */
197 LLVMValueRef emit
[4]; /* per-stream emitted primitives (only valid for used streams) */
201 * Build streamout logic.
205 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
207 * Clobbers gs_ngg_scratch[8:].
209 static void build_streamout(struct si_shader_context
*ctx
,
210 struct ngg_streamout
*nggso
)
212 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
213 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
214 LLVMBuilderRef builder
= ctx
->ac
.builder
;
215 LLVMValueRef buf_ptr
= LLVMGetParam(ctx
->main_fn
, ctx
->param_rw_buffers
);
216 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
217 LLVMValueRef tmp
, tmp2
;
218 LLVMValueRef i32_2
= LLVMConstInt(ctx
->i32
, 2, false);
219 LLVMValueRef i32_4
= LLVMConstInt(ctx
->i32
, 4, false);
220 LLVMValueRef i32_8
= LLVMConstInt(ctx
->i32
, 8, false);
221 LLVMValueRef so_buffer
[4] = {};
222 unsigned max_num_vertices
= 1 + (nggso
->vertices
[1] ? 1 : 0) +
223 (nggso
->vertices
[2] ? 1 : 0);
224 LLVMValueRef prim_stride_dw
[4] = {};
225 LLVMValueRef prim_stride_dw_vgpr
= LLVMGetUndef(ctx
->i32
);
226 int stream_for_buffer
[4] = { -1, -1, -1, -1 };
227 unsigned bufmask_for_stream
[4] = {};
228 bool isgs
= ctx
->type
== PIPE_SHADER_GEOMETRY
;
229 unsigned scratch_emit_base
= isgs
? 4 : 0;
230 LLVMValueRef scratch_emit_basev
= isgs
? i32_4
: ctx
->i32_0
;
231 unsigned scratch_offset_base
= isgs
? 8 : 4;
232 LLVMValueRef scratch_offset_basev
= isgs
? i32_8
: i32_4
;
234 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", 256);
236 /* Determine the mapping of streamout buffers to vertex streams. */
237 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
238 unsigned buf
= so
->output
[i
].output_buffer
;
239 unsigned stream
= so
->output
[i
].stream
;
240 assert(stream_for_buffer
[buf
] < 0 || stream_for_buffer
[buf
] == stream
);
241 stream_for_buffer
[buf
] = stream
;
242 bufmask_for_stream
[stream
] |= 1 << buf
;
245 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
246 if (stream_for_buffer
[buffer
] == -1)
249 assert(so
->stride
[buffer
]);
251 tmp
= LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false);
252 prim_stride_dw
[buffer
] = LLVMBuildMul(builder
, tmp
, nggso
->num_vertices
, "");
253 prim_stride_dw_vgpr
= ac_build_writelane(
254 &ctx
->ac
, prim_stride_dw_vgpr
, prim_stride_dw
[buffer
],
255 LLVMConstInt(ctx
->i32
, buffer
, false));
257 so_buffer
[buffer
] = ac_build_load_to_sgpr(
259 LLVMConstInt(ctx
->i32
, SI_VS_STREAMOUT_BUF0
+ buffer
, false));
262 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->i32_0
, "");
263 ac_build_ifcc(&ctx
->ac
, tmp
, 5200);
265 LLVMTypeRef gdsptr
= LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
);
266 LLVMValueRef gdsbase
= LLVMBuildIntToPtr(builder
, ctx
->i32_0
, gdsptr
, "");
268 /* Advance the streamout offsets in GDS. */
269 LLVMValueRef offsets_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
270 LLVMValueRef generated_by_stream_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
272 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
273 ac_build_ifcc(&ctx
->ac
, tmp
, 5210);
276 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
);
277 tmp
= LLVMBuildLoad(builder
, tmp
, "");
279 tmp
= ac_build_writelane(&ctx
->ac
, ctx
->i32_0
,
280 ngg_get_prim_cnt(ctx
), ctx
->i32_0
);
282 LLVMBuildStore(builder
, tmp
, generated_by_stream_vgpr
);
285 int unused_stream
= -1;
286 for (unsigned stream
= 0; stream
< 4; ++stream
) {
287 if (!info
->num_stream_output_components
[stream
]) {
288 unused_stream
= stream
;
292 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
293 if (stream_for_buffer
[buffer
] >= 0) {
294 swizzle
[buffer
] = stream_for_buffer
[buffer
];
296 assert(unused_stream
>= 0);
297 swizzle
[buffer
] = unused_stream
;
301 tmp
= ac_build_quad_swizzle(&ctx
->ac
, tmp
,
302 swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
303 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
305 LLVMValueRef args
[] = {
306 LLVMBuildIntToPtr(builder
, ngg_get_ordered_id(ctx
), gdsptr
, ""),
308 ctx
->i32_0
, // ordering
310 ctx
->ac
.i1false
, // isVolatile
311 LLVMConstInt(ctx
->i32
, 4 << 24, false), // OA index
312 ctx
->ac
.i1true
, // wave release
313 ctx
->ac
.i1true
, // wave done
315 tmp
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ds.ordered.add",
316 ctx
->i32
, args
, ARRAY_SIZE(args
), 0);
318 /* Keep offsets in a VGPR for quick retrieval via readlane by
319 * the first wave for bounds checking, and also store in LDS
320 * for retrieval by all waves later. */
321 LLVMBuildStore(builder
, tmp
, offsets_vgpr
);
323 tmp2
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
324 scratch_offset_basev
, "");
325 tmp2
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp2
);
326 LLVMBuildStore(builder
, tmp
, tmp2
);
328 ac_build_endif(&ctx
->ac
, 5210);
330 /* Determine the max emit per buffer. This is done via the SALU, in part
331 * because LLVM can't generate divide-by-multiply if we try to do this
332 * via VALU with one lane per buffer.
334 LLVMValueRef max_emit
[4] = {};
335 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
336 if (stream_for_buffer
[buffer
] == -1)
339 LLVMValueRef bufsize_dw
=
340 LLVMBuildLShr(builder
,
341 LLVMBuildExtractElement(builder
, so_buffer
[buffer
], i32_2
, ""),
344 tmp
= LLVMBuildLoad(builder
, offsets_vgpr
, "");
345 LLVMValueRef offset_dw
=
346 ac_build_readlane(&ctx
->ac
, tmp
,
347 LLVMConstInt(ctx
->i32
, buffer
, false));
349 tmp
= LLVMBuildSub(builder
, bufsize_dw
, offset_dw
, "");
350 tmp
= LLVMBuildUDiv(builder
, tmp
, prim_stride_dw
[buffer
], "");
352 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, bufsize_dw
, offset_dw
, "");
353 max_emit
[buffer
] = LLVMBuildSelect(builder
, tmp2
, ctx
->i32_0
, tmp
, "");
356 /* Determine the number of emitted primitives per stream and fixup the
357 * GDS counter if necessary.
359 * This is complicated by the fact that a single stream can emit to
360 * multiple buffers (but luckily not vice versa).
362 LLVMValueRef emit_vgpr
= ctx
->i32_0
;
364 for (unsigned stream
= 0; stream
< 4; ++stream
) {
365 if (!info
->num_stream_output_components
[stream
])
368 tmp
= LLVMBuildLoad(builder
, generated_by_stream_vgpr
, "");
369 LLVMValueRef generated
=
370 ac_build_readlane(&ctx
->ac
, tmp
,
371 LLVMConstInt(ctx
->i32
, stream
, false));
373 LLVMValueRef emit
= generated
;
374 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
375 if (stream_for_buffer
[buffer
] == stream
)
376 emit
= ac_build_umin(&ctx
->ac
, emit
, max_emit
[buffer
]);
379 emit_vgpr
= ac_build_writelane(&ctx
->ac
, emit_vgpr
, emit
,
380 LLVMConstInt(ctx
->i32
, stream
, false));
382 /* Fixup the offset using a plain GDS atomic if we overflowed. */
383 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, emit
, generated
, "");
384 ac_build_ifcc(&ctx
->ac
, tmp
, 5221); /* scalar branch */
385 tmp
= LLVMBuildLShr(builder
,
386 LLVMConstInt(ctx
->i32
, bufmask_for_stream
[stream
], false),
387 ac_get_thread_id(&ctx
->ac
), "");
388 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
389 ac_build_ifcc(&ctx
->ac
, tmp
, 5222);
391 tmp
= LLVMBuildSub(builder
, generated
, emit
, "");
392 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
393 tmp2
= LLVMBuildGEP(builder
, gdsbase
, &tid
, 1, "");
394 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpSub
, tmp2
, tmp
,
395 LLVMAtomicOrderingMonotonic
, false);
397 ac_build_endif(&ctx
->ac
, 5222);
398 ac_build_endif(&ctx
->ac
, 5221);
401 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
402 ac_build_ifcc(&ctx
->ac
, tmp
, 5225);
404 tmp
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
405 scratch_emit_basev
, "");
406 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp
);
407 LLVMBuildStore(builder
, emit_vgpr
, tmp
);
409 ac_build_endif(&ctx
->ac
, 5225);
411 ac_build_endif(&ctx
->ac
, 5200);
413 /* Determine the workgroup-relative per-thread / primitive offset into
414 * the streamout buffers */
415 struct ac_wg_scan primemit_scan
[4] = {};
418 for (unsigned stream
= 0; stream
< 4; ++stream
) {
419 if (!info
->num_stream_output_components
[stream
])
422 primemit_scan
[stream
].enable_exclusive
= true;
423 primemit_scan
[stream
].op
= nir_op_iadd
;
424 primemit_scan
[stream
].src
= nggso
->prim_enable
[stream
];
425 primemit_scan
[stream
].scratch
=
426 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
427 LLVMConstInt(ctx
->i32
, 12 + 8 * stream
, false));
428 primemit_scan
[stream
].waveidx
= get_wave_id_in_tg(ctx
);
429 primemit_scan
[stream
].numwaves
= get_tgsize(ctx
);
430 primemit_scan
[stream
].maxwaves
= 8;
431 ac_build_wg_scan_top(&ctx
->ac
, &primemit_scan
[stream
]);
435 ac_build_s_barrier(&ctx
->ac
);
437 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
438 LLVMValueRef wgoffset_dw
[4] = {};
441 LLVMValueRef scratch_vgpr
;
443 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ac_get_thread_id(&ctx
->ac
));
444 scratch_vgpr
= LLVMBuildLoad(builder
, tmp
, "");
446 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
447 if (stream_for_buffer
[buffer
] >= 0) {
448 wgoffset_dw
[buffer
] = ac_build_readlane(
449 &ctx
->ac
, scratch_vgpr
,
450 LLVMConstInt(ctx
->i32
, scratch_offset_base
+ buffer
, false));
454 for (unsigned stream
= 0; stream
< 4; ++stream
) {
455 if (info
->num_stream_output_components
[stream
]) {
456 nggso
->emit
[stream
] = ac_build_readlane(
457 &ctx
->ac
, scratch_vgpr
,
458 LLVMConstInt(ctx
->i32
, scratch_emit_base
+ stream
, false));
463 /* Write out primitive data */
464 for (unsigned stream
= 0; stream
< 4; ++stream
) {
465 if (!info
->num_stream_output_components
[stream
])
469 ac_build_wg_scan_bottom(&ctx
->ac
, &primemit_scan
[stream
]);
471 primemit_scan
[stream
].result_exclusive
= tid
;
474 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
475 primemit_scan
[stream
].result_exclusive
,
476 nggso
->emit
[stream
], "");
477 tmp
= LLVMBuildAnd(builder
, tmp
, nggso
->prim_enable
[stream
], "");
478 ac_build_ifcc(&ctx
->ac
, tmp
, 5240);
480 LLVMValueRef offset_vtx
=
481 LLVMBuildMul(builder
, primemit_scan
[stream
].result_exclusive
,
482 nggso
->num_vertices
, "");
484 for (unsigned i
= 0; i
< max_num_vertices
; ++i
) {
485 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
486 LLVMConstInt(ctx
->i32
, i
, false),
487 nggso
->num_vertices
, "");
488 ac_build_ifcc(&ctx
->ac
, tmp
, 5241);
489 build_streamout_vertex(ctx
, so_buffer
, wgoffset_dw
,
490 stream
, offset_vtx
, nggso
->vertices
[i
]);
491 ac_build_endif(&ctx
->ac
, 5241);
492 offset_vtx
= LLVMBuildAdd(builder
, offset_vtx
, ctx
->i32_1
, "");
495 ac_build_endif(&ctx
->ac
, 5240);
500 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
501 * for the vertex outputs.
503 static LLVMValueRef
ngg_nogs_vertex_ptr(struct si_shader_context
*ctx
,
506 /* The extra dword is used to avoid LDS bank conflicts. */
507 unsigned vertex_size
= 4 * ctx
->shader
->selector
->info
.num_outputs
+ 1;
508 LLVMTypeRef ai32
= LLVMArrayType(ctx
->i32
, vertex_size
);
509 LLVMTypeRef pai32
= LLVMPointerType(ai32
, AC_ADDR_SPACE_LDS
);
510 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->esgs_ring
, pai32
, "");
511 return LLVMBuildGEP(ctx
->ac
.builder
, tmp
, &vtxid
, 1, "");
515 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
517 void gfx10_emit_ngg_epilogue(struct ac_shader_abi
*abi
,
518 unsigned max_outputs
,
521 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
522 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
523 struct tgsi_shader_info
*info
= &sel
->info
;
524 struct si_shader_output_values
*outputs
= NULL
;
525 LLVMBuilderRef builder
= ctx
->ac
.builder
;
526 struct lp_build_if_state if_state
;
527 LLVMValueRef tmp
, tmp2
;
529 assert(!ctx
->shader
->is_gs_copy_shader
);
530 assert(info
->num_outputs
<= max_outputs
);
532 outputs
= MALLOC((info
->num_outputs
+ 1) * sizeof(outputs
[0]));
534 LLVMValueRef vertex_ptr
= NULL
;
536 if (sel
->so
.num_outputs
)
537 vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
539 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
540 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
541 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
543 /* This is used only by streamout. */
544 for (unsigned j
= 0; j
< 4; j
++) {
545 outputs
[i
].values
[j
] =
546 LLVMBuildLoad(builder
,
549 outputs
[i
].vertex_stream
[j
] =
550 (info
->output_streams
[i
] >> (2 * j
)) & 3;
553 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
,
554 LLVMConstInt(ctx
->i32
, 4 * i
+ j
, false));
555 tmp2
= ac_to_integer(&ctx
->ac
, outputs
[i
].values
[j
]);
556 LLVMBuildStore(builder
, tmp2
, tmp
);
561 lp_build_endif(&ctx
->merged_wrap_if_state
);
563 LLVMValueRef prims_in_wave
= si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 8, 8);
564 LLVMValueRef vtx_in_wave
= si_unpack_param(ctx
, ctx
->param_merged_wave_info
, 0, 8);
565 LLVMValueRef is_gs_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
566 ac_get_thread_id(&ctx
->ac
), prims_in_wave
, "");
567 LLVMValueRef is_es_thread
= LLVMBuildICmp(builder
, LLVMIntULT
,
568 ac_get_thread_id(&ctx
->ac
), vtx_in_wave
, "");
569 LLVMValueRef vtxindex
[] = {
570 si_unpack_param(ctx
, ctx
->param_gs_vtx01_offset
, 0, 16),
571 si_unpack_param(ctx
, ctx
->param_gs_vtx01_offset
, 16, 16),
572 si_unpack_param(ctx
, ctx
->param_gs_vtx23_offset
, 0, 16),
575 /* Determine the number of vertices per primitive. */
576 unsigned num_vertices
;
577 LLVMValueRef num_vertices_val
;
579 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
580 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
]) {
581 /* Blits always use axis-aligned rectangles with 3 vertices. */
583 num_vertices_val
= LLVMConstInt(ctx
->i32
, 3, 0);
585 /* Extract OUTPRIM field. */
586 tmp
= si_unpack_param(ctx
, ctx
->param_vs_state_bits
, 2, 2);
587 num_vertices_val
= LLVMBuildAdd(builder
, tmp
, ctx
->i32_1
, "");
588 num_vertices
= 3; /* TODO: optimize for points & lines */
591 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
593 if (info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
])
595 else if (info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
600 num_vertices_val
= LLVMConstInt(ctx
->i32
, num_vertices
, false);
604 LLVMValueRef emitted_prims
= NULL
;
606 if (sel
->so
.num_outputs
) {
607 struct ngg_streamout nggso
= {};
609 nggso
.num_vertices
= num_vertices_val
;
610 nggso
.prim_enable
[0] = is_gs_thread
;
612 for (unsigned i
= 0; i
< num_vertices
; ++i
)
613 nggso
.vertices
[i
] = ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
615 build_streamout(ctx
, &nggso
);
616 emitted_prims
= nggso
.emit
[0];
619 /* TODO: primitive culling */
621 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
623 /* Update query buffer */
624 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
625 if (!info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS
]) {
626 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
627 ac_build_ifcc(&ctx
->ac
, tmp
, 5030);
628 tmp
= LLVMBuildICmp(builder
, LLVMIntULE
, ac_get_thread_id(&ctx
->ac
),
629 sel
->so
.num_outputs
? ctx
->ac
.i32_1
: ctx
->ac
.i32_0
, "");
630 ac_build_ifcc(&ctx
->ac
, tmp
, 5031);
632 LLVMValueRef args
[] = {
633 ngg_get_prim_cnt(ctx
),
634 ngg_get_query_buf(ctx
),
635 LLVMConstInt(ctx
->i32
, 16, false), /* offset of stream[0].generated_primitives */
636 ctx
->i32_0
, /* soffset */
637 ctx
->i32_0
, /* cachepolicy */
640 if (sel
->so
.num_outputs
) {
641 args
[0] = ac_build_writelane(&ctx
->ac
, args
[0], emitted_prims
, ctx
->i32_1
);
642 args
[2] = ac_build_writelane(&ctx
->ac
, args
[2],
643 LLVMConstInt(ctx
->i32
, 24, false), ctx
->i32_1
);
646 /* TODO: should this be 64-bit atomics? */
647 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
648 ctx
->i32
, args
, 5, 0);
650 ac_build_endif(&ctx
->ac
, 5031);
651 ac_build_endif(&ctx
->ac
, 5030);
654 /* Export primitive data to the index buffer. Format is:
655 * - bits 0..8: index 0
656 * - bit 9: edge flag 0
657 * - bits 10..18: index 1
658 * - bit 19: edge flag 1
659 * - bits 20..28: index 2
660 * - bit 29: edge flag 2
661 * - bit 31: null primitive (skip)
663 * For the first version, we will always build up all three indices
664 * independent of the primitive type. The additional garbage data
667 * TODO: culling depends on the primitive type, so can have some
670 lp_build_if(&if_state
, &ctx
->gallivm
, is_gs_thread
);
672 struct ngg_prim prim
= {};
674 prim
.num_vertices
= num_vertices
;
675 prim
.isnull
= ctx
->ac
.i1false
;
676 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
678 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
679 tmp
= LLVMBuildLShr(builder
, ctx
->abi
.gs_invocation_id
,
680 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
681 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
684 build_export_prim(ctx
, &prim
);
686 lp_build_endif(&if_state
);
688 /* Export per-vertex data (positions and parameters). */
689 lp_build_if(&if_state
, &ctx
->gallivm
, is_es_thread
);
693 /* Unconditionally (re-)load the values for proper SSA form. */
694 for (i
= 0; i
< info
->num_outputs
; i
++) {
695 for (unsigned j
= 0; j
< 4; j
++) {
696 outputs
[i
].values
[j
] =
697 LLVMBuildLoad(builder
,
703 /* TODO: Vertex shaders have to get PrimitiveID from GS VGPRs. */
704 if (ctx
->type
== PIPE_SHADER_TESS_EVAL
&&
705 ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
706 outputs
[i
].semantic_name
= TGSI_SEMANTIC_PRIMID
;
707 outputs
[i
].semantic_index
= 0;
708 outputs
[i
].values
[0] = ac_to_float(&ctx
->ac
, si_get_primitive_id(ctx
, 0));
709 for (unsigned j
= 1; j
< 4; j
++)
710 outputs
[i
].values
[j
] = LLVMGetUndef(ctx
->f32
);
712 memset(outputs
[i
].vertex_stream
, 0,
713 sizeof(outputs
[i
].vertex_stream
));
717 si_llvm_export_vs(ctx
, outputs
, i
);
719 lp_build_endif(&if_state
);
725 ngg_gs_get_vertex_storage(struct si_shader_context
*ctx
)
727 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
728 const struct tgsi_shader_info
*info
= &sel
->info
;
730 LLVMTypeRef elements
[2] = {
731 LLVMArrayType(ctx
->ac
.i32
, 4 * info
->num_outputs
),
732 LLVMArrayType(ctx
->ac
.i8
, 4),
734 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
735 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
736 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
740 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
741 * is in emit order; that is:
742 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
743 * - during vertex emit, i.e. while the API GS shader invocation is running,
744 * N = threadidx * gs_max_out_vertices + emitidx
746 * Goals of the LDS memory layout:
747 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
748 * in uniform control flow
749 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
751 * 3. Agnostic to the number of waves (since we don't know it before compiling)
752 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
753 * 5. Avoid wasting memory.
755 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
756 * layout, elimination of bank conflicts requires that each vertex occupy an
757 * odd number of dwords. We use the additional dword to store the output stream
758 * index as well as a flag to indicate whether this vertex ends a primitive
761 * Swizzling is required to satisfy points 1 and 2 simultaneously.
763 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
764 * Indices are swizzled in groups of 32, which ensures point 1 without
765 * disturbing point 2.
767 * \return an LDS pointer to type {[N x i32], [4 x i8]}
770 ngg_gs_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexidx
)
772 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
773 LLVMBuilderRef builder
= ctx
->ac
.builder
;
774 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
776 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
777 unsigned write_stride_2exp
= ffs(sel
->gs_max_out_vertices
) - 1;
778 if (write_stride_2exp
) {
780 LLVMBuildLShr(builder
, vertexidx
,
781 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
782 LLVMValueRef swizzle
=
783 LLVMBuildAnd(builder
, row
,
784 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
786 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
789 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
793 ngg_gs_emit_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef gsthread
,
794 LLVMValueRef emitidx
)
796 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
797 LLVMBuilderRef builder
= ctx
->ac
.builder
;
800 tmp
= LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false);
801 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
802 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
803 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
806 void gfx10_ngg_gs_emit_vertex(struct si_shader_context
*ctx
,
810 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
811 const struct tgsi_shader_info
*info
= &sel
->info
;
812 LLVMBuilderRef builder
= ctx
->ac
.builder
;
813 struct lp_build_if_state if_state
;
815 const LLVMValueRef vertexidx
=
816 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
818 /* If this thread has already emitted the declared maximum number of
819 * vertices, skip the write: excessive vertex emissions are not
820 * supposed to have any effect.
822 const LLVMValueRef can_emit
=
823 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
824 LLVMConstInt(ctx
->i32
, sel
->gs_max_out_vertices
, false), "");
826 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
827 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
828 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
830 lp_build_if(&if_state
, &ctx
->gallivm
, can_emit
);
832 const LLVMValueRef vertexptr
=
833 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
834 unsigned out_idx
= 0;
835 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
836 for (unsigned chan
= 0; chan
< 4; chan
++, out_idx
++) {
837 if (!(info
->output_usagemask
[i
] & (1 << chan
)) ||
838 ((info
->output_streams
[i
] >> (2 * chan
)) & 3) != stream
)
841 LLVMValueRef out_val
= LLVMBuildLoad(builder
, addrs
[4 * i
+ chan
], "");
842 LLVMValueRef gep_idx
[3] = {
843 ctx
->ac
.i32_0
, /* implied C-style array */
844 ctx
->ac
.i32_0
, /* first entry of struct */
845 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
847 LLVMValueRef ptr
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
849 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
850 LLVMBuildStore(builder
, out_val
, ptr
);
853 assert(out_idx
* 4 == sel
->gsvs_vertex_size
);
855 /* Determine and store whether this vertex completed a primitive. */
856 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
858 tmp
= LLVMConstInt(ctx
->ac
.i32
, u_vertices_per_prim(sel
->gs_output_prim
) - 1, false);
859 const LLVMValueRef iscompleteprim
=
860 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
862 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
863 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
865 LLVMValueRef gep_idx
[3] = {
866 ctx
->ac
.i32_0
, /* implied C-style array */
867 ctx
->ac
.i32_1
, /* second struct entry */
868 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
870 const LLVMValueRef primflagptr
=
871 LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
873 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
874 LLVMBuildStore(builder
, tmp
, primflagptr
);
876 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
877 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
878 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
880 lp_build_endif(&if_state
);
883 void gfx10_ngg_gs_emit_prologue(struct si_shader_context
*ctx
)
885 /* Zero out the part of LDS scratch that is used to accumulate the
886 * per-stream generated primitive count.
888 LLVMBuilderRef builder
= ctx
->ac
.builder
;
889 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
890 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
893 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
894 ac_build_ifcc(&ctx
->ac
, tmp
, 5090);
896 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
897 LLVMBuildStore(builder
, ctx
->i32_0
, ptr
);
899 ac_build_endif(&ctx
->ac
, 5090);
901 ac_build_s_barrier(&ctx
->ac
);
904 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context
*ctx
)
906 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
907 const struct tgsi_shader_info
*info
= &sel
->info
;
908 const unsigned verts_per_prim
= u_vertices_per_prim(sel
->gs_output_prim
);
909 LLVMBuilderRef builder
= ctx
->ac
.builder
;
910 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
911 LLVMValueRef tmp
, tmp2
;
913 /* Zero out remaining (non-emitted) primitive flags.
915 * Note: Alternatively, we could pass the relevant gs_next_vertex to
916 * the emit threads via LDS. This is likely worse in the expected
917 * typical case where each GS thread emits the full set of
920 for (unsigned stream
= 0; stream
< 4; ++stream
) {
921 if (!info
->num_stream_output_components
[stream
])
924 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
926 ac_build_bgnloop(&ctx
->ac
, 5100);
928 const LLVMValueRef vertexidx
=
929 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
930 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
931 LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false), "");
932 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
933 ac_build_break(&ctx
->ac
);
934 ac_build_endif(&ctx
->ac
, 5101);
936 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
937 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
939 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
940 LLVMValueRef gep_idx
[3] = {
941 ctx
->ac
.i32_0
, /* implied C-style array */
942 ctx
->ac
.i32_1
, /* second entry of struct */
943 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
945 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
946 LLVMBuildStore(builder
, i8_0
, tmp
);
948 ac_build_endloop(&ctx
->ac
, 5100);
951 /* Accumulate generated primitives counts across the entire threadgroup. */
952 for (unsigned stream
= 0; stream
< 4; ++stream
) {
953 if (!info
->num_stream_output_components
[stream
])
956 LLVMValueRef numprims
=
957 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
958 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, 64);
960 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->i32_0
, "");
961 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
963 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
964 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
965 LLVMConstInt(ctx
->i32
, stream
, false)),
966 numprims
, LLVMAtomicOrderingMonotonic
, false);
968 ac_build_endif(&ctx
->ac
, 5105);
971 lp_build_endif(&ctx
->merged_wrap_if_state
);
973 ac_build_s_barrier(&ctx
->ac
);
975 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
976 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
979 if (sel
->so
.num_outputs
) {
980 struct ngg_streamout nggso
= {};
982 nggso
.num_vertices
= LLVMConstInt(ctx
->i32
, verts_per_prim
, false);
984 LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tid
);
985 for (unsigned stream
= 0; stream
< 4; ++stream
) {
986 if (!info
->num_stream_output_components
[stream
])
989 LLVMValueRef gep_idx
[3] = {
990 ctx
->i32_0
, /* implicit C-style array */
991 ctx
->i32_1
, /* second value of struct */
992 LLVMConstInt(ctx
->i32
, stream
, false),
994 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
995 tmp
= LLVMBuildLoad(builder
, tmp
, "");
996 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
997 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
998 nggso
.prim_enable
[stream
] = LLVMBuildAnd(builder
, tmp
, tmp2
, "");
1001 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1002 tmp
= LLVMBuildSub(builder
, tid
,
1003 LLVMConstInt(ctx
->i32
, verts_per_prim
- i
- 1, false), "");
1004 tmp
= ngg_gs_vertex_ptr(ctx
, tmp
);
1005 nggso
.vertices
[i
] = ac_build_gep0(&ctx
->ac
, tmp
, ctx
->i32_0
);
1008 build_streamout(ctx
, &nggso
);
1011 /* Write shader query data. */
1012 unsigned num_query_comps
= sel
->so
.num_outputs
? 8 : 4;
1013 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
,
1014 LLVMConstInt(ctx
->i32
, num_query_comps
, false), "");
1015 ac_build_ifcc(&ctx
->ac
, tmp
, 5110);
1017 LLVMValueRef offset
;
1019 if (sel
->so
.num_outputs
)
1020 tmp
= LLVMBuildAnd(builder
, tmp
, LLVMConstInt(ctx
->i32
, 3, false), "");
1021 offset
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 32, false), "");
1022 if (sel
->so
.num_outputs
) {
1023 tmp
= LLVMBuildLShr(builder
, tid
, LLVMConstInt(ctx
->i32
, 2, false), "");
1024 tmp
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 8, false), "");
1025 offset
= LLVMBuildAdd(builder
, offset
, tmp
, "");
1028 tmp
= LLVMBuildLoad(builder
, ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
), "");
1029 LLVMValueRef args
[] = {
1031 ngg_get_query_buf(ctx
),
1033 LLVMConstInt(ctx
->i32
, 16, false), /* soffset */
1034 ctx
->i32_0
, /* cachepolicy */
1036 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1037 ctx
->i32
, args
, 5, 0);
1039 ac_build_endif(&ctx
->ac
, 5110);
1043 /* Determine vertex liveness. */
1044 LLVMValueRef vertliveptr
= lp_build_alloca(&ctx
->gallivm
, ctx
->ac
.i1
, "vertexlive");
1046 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1047 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
1049 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1050 const LLVMValueRef primidx
=
1051 LLVMBuildAdd(builder
, tid
,
1052 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
1055 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
1056 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
1059 /* Load primitive liveness */
1060 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
1061 LLVMValueRef gep_idx
[3] = {
1062 ctx
->ac
.i32_0
, /* implicit C-style array */
1063 ctx
->ac
.i32_1
, /* second value of struct */
1064 ctx
->ac
.i32_0
, /* stream 0 */
1066 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1067 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1068 const LLVMValueRef primlive
=
1069 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
1071 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
1072 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
1073 LLVMBuildStore(builder
, tmp
, vertliveptr
);
1076 ac_build_endif(&ctx
->ac
, 5121 + i
);
1079 ac_build_endif(&ctx
->ac
, 5120);
1081 /* Inclusive scan addition across the current wave. */
1082 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
1083 struct ac_wg_scan vertlive_scan
= {};
1084 vertlive_scan
.op
= nir_op_iadd
;
1085 vertlive_scan
.enable_reduce
= true;
1086 vertlive_scan
.enable_exclusive
= true;
1087 vertlive_scan
.src
= vertlive
;
1088 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->i32_0
);
1089 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
1090 vertlive_scan
.numwaves
= get_tgsize(ctx
);
1091 vertlive_scan
.maxwaves
= 8;
1093 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
1095 /* Skip all exports (including index exports) when possible. At least on
1096 * early gfx10 revisions this is also to avoid hangs.
1098 LLVMValueRef have_exports
=
1099 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
1101 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
1103 /* Allocate export space. Send this message as early as possible, to
1104 * hide the latency of the SQ <-> SPI roundtrip.
1106 * Note: We could consider compacting primitives for export as well.
1107 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1108 * prim data per clock and skips null primitives at no additional
1109 * cost. So compacting primitives can only be beneficial when
1110 * there are 4 or more contiguous null primitives in the export
1111 * (in the common case of single-dword prim exports).
1113 build_sendmsg_gs_alloc_req(ctx
, vertlive_scan
.result_reduce
, num_emit_threads
);
1115 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1116 * of the primitive liveness flags, relying on the fact that each
1117 * threadgroup can have at most 256 threads. */
1118 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
1120 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
1121 LLVMValueRef gep_idx
[3] = {
1122 ctx
->ac
.i32_0
, /* implicit C-style array */
1123 ctx
->ac
.i32_1
, /* second value of struct */
1124 ctx
->ac
.i32_1
, /* stream 1 */
1126 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1127 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
1128 LLVMBuildStore(builder
, tmp2
, tmp
);
1130 ac_build_endif(&ctx
->ac
, 5130);
1132 ac_build_s_barrier(&ctx
->ac
);
1134 /* Export primitive data */
1135 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1136 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
1138 struct ngg_prim prim
= {};
1139 prim
.num_vertices
= verts_per_prim
;
1141 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1142 LLVMValueRef gep_idx
[3] = {
1143 ctx
->ac
.i32_0
, /* implicit C-style array */
1144 ctx
->ac
.i32_1
, /* second value of struct */
1145 ctx
->ac
.i32_0
, /* primflag */
1147 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1148 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1149 prim
.isnull
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
,
1150 LLVMConstInt(ctx
->ac
.i8
, 0, false), "");
1152 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1153 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
1154 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
1155 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
1158 build_export_prim(ctx
, &prim
);
1160 ac_build_endif(&ctx
->ac
, 5140);
1162 /* Export position and parameter data */
1163 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
1164 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
1166 struct si_shader_output_values
*outputs
= NULL
;
1167 outputs
= MALLOC(info
->num_outputs
* sizeof(outputs
[0]));
1169 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1170 LLVMValueRef gep_idx
[3] = {
1171 ctx
->ac
.i32_0
, /* implicit C-style array */
1172 ctx
->ac
.i32_1
, /* second value of struct */
1173 ctx
->ac
.i32_1
, /* stream 1: source data index */
1175 tmp
= LLVMBuildGEP(builder
, tmp
, gep_idx
, 3, "");
1176 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1177 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
1178 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
1180 unsigned out_idx
= 0;
1181 gep_idx
[1] = ctx
->ac
.i32_0
;
1182 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
1183 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
1184 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
1186 for (unsigned j
= 0; j
< 4; j
++, out_idx
++) {
1187 gep_idx
[2] = LLVMConstInt(ctx
->ac
.i32
, out_idx
, false);
1188 tmp
= LLVMBuildGEP(builder
, vertexptr
, gep_idx
, 3, "");
1189 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1190 outputs
[i
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
1191 outputs
[i
].vertex_stream
[j
] =
1192 (info
->output_streams
[i
] >> (2 * j
)) & 3;
1196 si_llvm_export_vs(ctx
, outputs
, info
->num_outputs
);
1200 ac_build_endif(&ctx
->ac
, 5145);
1203 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1204 unsigned min_verts_per_prim
, bool use_adjacency
)
1206 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1209 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1213 * Determine subgroup information like maximum number of vertices and prims.
1215 * This happens before the shader is uploaded, since LDS relocations during
1216 * upload depend on the subgroup size.
1218 void gfx10_ngg_calculate_subgroup_info(struct si_shader
*shader
)
1220 const struct si_shader_selector
*gs_sel
= shader
->selector
;
1221 const struct si_shader_selector
*es_sel
=
1222 shader
->previous_stage_sel
? shader
->previous_stage_sel
: gs_sel
;
1223 const enum pipe_shader_type gs_type
= gs_sel
->type
;
1224 const unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
1225 /* TODO: Specialize for known primitive type without GS. */
1226 const unsigned input_prim
= gs_type
== PIPE_SHADER_GEOMETRY
?
1227 gs_sel
->info
.properties
[TGSI_PROPERTY_GS_INPUT_PRIM
] :
1228 PIPE_PRIM_TRIANGLES
;
1229 const bool use_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
1230 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
1231 const unsigned max_verts_per_prim
= u_vertices_per_prim(input_prim
);
1232 const unsigned min_verts_per_prim
=
1233 gs_type
== PIPE_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1235 /* All these are in dwords: */
1236 /* We can't allow using the whole LDS, because GS waves compete with
1237 * other shader stages for LDS space.
1239 * Streamout can increase the ESGS buffer size later on, so be more
1240 * conservative with streamout and use 4K dwords. This may be suboptimal.
1242 * Otherwise, use the limit of 7K dwords. The reason is that we need
1243 * to leave some headroom for the max_esverts increase at the end.
1245 * TODO: We should really take the shader's internal LDS use into
1246 * account. The linker will fail if the size is greater than
1249 const unsigned max_lds_size
= (gs_sel
->so
.num_outputs
? 4 : 7) * 1024 - 128;
1250 const unsigned target_lds_size
= max_lds_size
;
1251 unsigned esvert_lds_size
= 0;
1252 unsigned gsprim_lds_size
= 0;
1254 /* All these are per subgroup: */
1255 bool max_vert_out_per_gs_instance
= false;
1256 unsigned max_esverts_base
= 256;
1257 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1259 /* Hardware has the following non-natural restrictions on the value
1260 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1262 * - at most 252 for any line input primitive type
1263 * - at most 251 for any quad input primitive type
1264 * - at most 251 for triangle strips with adjacency (this happens to
1265 * be the natural limit for triangle *lists* with adjacency)
1267 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1269 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1270 unsigned max_out_verts_per_gsprim
=
1271 gs_sel
->gs_max_out_vertices
* gs_num_invocations
;
1273 if (max_out_verts_per_gsprim
<= 256) {
1274 if (max_out_verts_per_gsprim
) {
1275 max_gsprims_base
= MIN2(max_gsprims_base
,
1276 256 / max_out_verts_per_gsprim
);
1279 /* Use special multi-cycling mode in which each GS
1280 * instance gets its own subgroup. Does not work with
1282 max_vert_out_per_gs_instance
= true;
1283 max_gsprims_base
= 1;
1284 max_out_verts_per_gsprim
= gs_sel
->gs_max_out_vertices
;
1287 esvert_lds_size
= es_sel
->esgs_itemsize
/ 4;
1288 gsprim_lds_size
= (gs_sel
->gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1290 /* TODO: This needs to be adjusted once LDS use for compaction
1291 * after culling is implemented. */
1292 if (es_sel
->so
.num_outputs
)
1293 esvert_lds_size
= 4 * es_sel
->info
.num_outputs
+ 1;
1296 unsigned max_gsprims
= max_gsprims_base
;
1297 unsigned max_esverts
= max_esverts_base
;
1299 if (esvert_lds_size
)
1300 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1301 if (gsprim_lds_size
)
1302 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1304 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1305 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, use_adjacency
);
1306 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1308 if (esvert_lds_size
|| gsprim_lds_size
) {
1309 /* Now that we have a rough proportionality between esverts
1310 * and gsprims based on the primitive type, scale both of them
1311 * down simultaneously based on required LDS space.
1313 * We could be smarter about this if we knew how much vertex
1316 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1317 max_gsprims
* gsprim_lds_size
;
1318 if (lds_total
> target_lds_size
) {
1319 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1320 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1322 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1323 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1324 min_verts_per_prim
, use_adjacency
);
1325 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1329 /* Round up towards full wave sizes for better ALU utilization. */
1330 if (!max_vert_out_per_gs_instance
) {
1331 const unsigned wavesize
= 64;
1332 unsigned orig_max_esverts
;
1333 unsigned orig_max_gsprims
;
1335 orig_max_esverts
= max_esverts
;
1336 orig_max_gsprims
= max_gsprims
;
1338 max_esverts
= align(max_esverts
, wavesize
);
1339 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1340 if (esvert_lds_size
)
1341 max_esverts
= MIN2(max_esverts
,
1342 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1344 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1346 max_gsprims
= align(max_gsprims
, wavesize
);
1347 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1348 if (gsprim_lds_size
)
1349 max_gsprims
= MIN2(max_gsprims
,
1350 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1352 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1353 min_verts_per_prim
, use_adjacency
);
1354 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1355 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1358 /* Hardware restriction: minimum value of max_esverts */
1359 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1361 unsigned max_out_vertices
=
1362 max_vert_out_per_gs_instance
? gs_sel
->gs_max_out_vertices
:
1363 gs_type
== PIPE_SHADER_GEOMETRY
?
1364 max_gsprims
* gs_num_invocations
* gs_sel
->gs_max_out_vertices
:
1366 assert(max_out_vertices
<= 256);
1368 unsigned prim_amp_factor
= 1;
1369 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1370 /* Number of output primitives per GS input primitive after
1372 prim_amp_factor
= gs_sel
->gs_max_out_vertices
;
1375 /* The GE only checks against the maximum number of ES verts after
1376 * allocating a full GS primitive. So we need to ensure that whenever
1377 * this check passes, there is enough space for a full primitive without
1380 shader
->ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1381 shader
->ngg
.max_gsprims
= max_gsprims
;
1382 shader
->ngg
.max_out_verts
= max_out_vertices
;
1383 shader
->ngg
.prim_amp_factor
= prim_amp_factor
;
1384 shader
->ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1386 shader
->gs_info
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1387 shader
->ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1389 assert(shader
->ngg
.hw_max_esverts
>= 24); /* HW limitation */