2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_shader_internal.h"
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
32 static LLVMValueRef
get_wave_id_in_tg(struct si_shader_context
*ctx
)
34 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 24, 4);
37 static LLVMValueRef
get_tgsize(struct si_shader_context
*ctx
)
39 return si_unpack_param(ctx
, ctx
->merged_wave_info
, 28, 4);
42 static LLVMValueRef
get_thread_id_in_tg(struct si_shader_context
*ctx
)
44 LLVMBuilderRef builder
= ctx
->ac
.builder
;
46 tmp
= LLVMBuildMul(builder
, get_wave_id_in_tg(ctx
),
47 LLVMConstInt(ctx
->ac
.i32
, ctx
->ac
.wave_size
, false), "");
48 return LLVMBuildAdd(builder
, tmp
, ac_get_thread_id(&ctx
->ac
), "");
51 static LLVMValueRef
ngg_get_vtx_cnt(struct si_shader_context
*ctx
)
53 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 12, 9);
56 static LLVMValueRef
ngg_get_prim_cnt(struct si_shader_context
*ctx
)
58 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 22, 9);
61 static LLVMValueRef
ngg_get_ordered_id(struct si_shader_context
*ctx
)
63 return si_unpack_param(ctx
, ctx
->gs_tg_info
, 0, 11);
66 static LLVMValueRef
ngg_get_query_buf(struct si_shader_context
*ctx
)
68 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
70 return ac_build_load_to_sgpr(&ctx
->ac
, buf_ptr
,
71 LLVMConstInt(ctx
->i32
, GFX10_GS_QUERY_BUF
, false));
74 /* Send GS Alloc Req message from the first wave of the group to SPI.
76 * - bits 0..10: vertices in group
77 * - bits 12..22: primitives in group
79 static void build_sendmsg_gs_alloc_req(struct si_shader_context
*ctx
,
81 LLVMValueRef prim_cnt
)
83 LLVMBuilderRef builder
= ctx
->ac
.builder
;
86 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
87 ac_build_ifcc(&ctx
->ac
, tmp
, 5020);
89 tmp
= LLVMBuildShl(builder
, prim_cnt
, LLVMConstInt(ctx
->ac
.i32
, 12, false),"");
90 tmp
= LLVMBuildOr(builder
, tmp
, vtx_cnt
, "");
91 ac_build_sendmsg(&ctx
->ac
, AC_SENDMSG_GS_ALLOC_REQ
, tmp
);
93 ac_build_endif(&ctx
->ac
, 5020);
97 unsigned num_vertices
;
99 LLVMValueRef index
[3];
100 LLVMValueRef edgeflag
[3];
103 static void build_export_prim(struct si_shader_context
*ctx
,
104 const struct ngg_prim
*prim
)
106 LLVMBuilderRef builder
= ctx
->ac
.builder
;
107 struct ac_export_args args
;
110 tmp
= LLVMBuildZExt(builder
, prim
->isnull
, ctx
->ac
.i32
, "");
111 args
.out
[0] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->ac
.i32
, 31, false), "");
113 for (unsigned i
= 0; i
< prim
->num_vertices
; ++i
) {
114 tmp
= LLVMBuildShl(builder
, prim
->index
[i
],
115 LLVMConstInt(ctx
->ac
.i32
, 10 * i
, false), "");
116 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
117 tmp
= LLVMBuildZExt(builder
, prim
->edgeflag
[i
], ctx
->ac
.i32
, "");
118 tmp
= LLVMBuildShl(builder
, tmp
,
119 LLVMConstInt(ctx
->ac
.i32
, 10 * i
+ 9, false), "");
120 args
.out
[0] = LLVMBuildOr(builder
, args
.out
[0], tmp
, "");
123 args
.out
[0] = LLVMBuildBitCast(builder
, args
.out
[0], ctx
->ac
.f32
, "");
124 args
.out
[1] = LLVMGetUndef(ctx
->ac
.f32
);
125 args
.out
[2] = LLVMGetUndef(ctx
->ac
.f32
);
126 args
.out
[3] = LLVMGetUndef(ctx
->ac
.f32
);
128 args
.target
= V_008DFC_SQ_EXP_PRIM
;
129 args
.enabled_channels
= 1;
131 args
.valid_mask
= false;
134 ac_build_export(&ctx
->ac
, &args
);
137 static void build_streamout_vertex(struct si_shader_context
*ctx
,
138 LLVMValueRef
*so_buffer
, LLVMValueRef
*wg_offset_dw
,
139 unsigned stream
, LLVMValueRef offset_vtx
,
140 LLVMValueRef vertexptr
)
142 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
143 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
144 LLVMBuilderRef builder
= ctx
->ac
.builder
;
145 LLVMValueRef offset
[4] = {};
148 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
149 if (!wg_offset_dw
[buffer
])
152 tmp
= LLVMBuildMul(builder
, offset_vtx
,
153 LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false), "");
154 tmp
= LLVMBuildAdd(builder
, wg_offset_dw
[buffer
], tmp
, "");
155 offset
[buffer
] = LLVMBuildShl(builder
, tmp
, LLVMConstInt(ctx
->i32
, 2, false), "");
158 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
159 if (so
->output
[i
].stream
!= stream
)
162 unsigned reg
= so
->output
[i
].register_index
;
163 struct si_shader_output_values out
;
164 out
.semantic_name
= info
->output_semantic_name
[reg
];
165 out
.semantic_index
= info
->output_semantic_index
[reg
];
167 for (unsigned comp
= 0; comp
< 4; comp
++) {
168 tmp
= ac_build_gep0(&ctx
->ac
, vertexptr
,
169 LLVMConstInt(ctx
->i32
, 4 * reg
+ comp
, false));
170 out
.values
[comp
] = LLVMBuildLoad(builder
, tmp
, "");
171 out
.vertex_stream
[comp
] =
172 (info
->output_streams
[reg
] >> (2 * comp
)) & 3;
175 si_emit_streamout_output(ctx
, so_buffer
, offset
, &so
->output
[i
], &out
);
179 struct ngg_streamout
{
180 LLVMValueRef num_vertices
;
182 /* per-thread data */
183 LLVMValueRef prim_enable
[4]; /* i1 per stream */
184 LLVMValueRef vertices
[3]; /* [N x i32] addrspace(LDS)* */
187 LLVMValueRef emit
[4]; /* per-stream emitted primitives (only valid for used streams) */
191 * Build streamout logic.
195 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
197 * Clobbers gs_ngg_scratch[8:].
199 static void build_streamout(struct si_shader_context
*ctx
,
200 struct ngg_streamout
*nggso
)
202 struct tgsi_shader_info
*info
= &ctx
->shader
->selector
->info
;
203 struct pipe_stream_output_info
*so
= &ctx
->shader
->selector
->so
;
204 LLVMBuilderRef builder
= ctx
->ac
.builder
;
205 LLVMValueRef buf_ptr
= ac_get_arg(&ctx
->ac
, ctx
->rw_buffers
);
206 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
207 LLVMValueRef tmp
, tmp2
;
208 LLVMValueRef i32_2
= LLVMConstInt(ctx
->i32
, 2, false);
209 LLVMValueRef i32_4
= LLVMConstInt(ctx
->i32
, 4, false);
210 LLVMValueRef i32_8
= LLVMConstInt(ctx
->i32
, 8, false);
211 LLVMValueRef so_buffer
[4] = {};
212 unsigned max_num_vertices
= 1 + (nggso
->vertices
[1] ? 1 : 0) +
213 (nggso
->vertices
[2] ? 1 : 0);
214 LLVMValueRef prim_stride_dw
[4] = {};
215 LLVMValueRef prim_stride_dw_vgpr
= LLVMGetUndef(ctx
->i32
);
216 int stream_for_buffer
[4] = { -1, -1, -1, -1 };
217 unsigned bufmask_for_stream
[4] = {};
218 bool isgs
= ctx
->type
== PIPE_SHADER_GEOMETRY
;
219 unsigned scratch_emit_base
= isgs
? 4 : 0;
220 LLVMValueRef scratch_emit_basev
= isgs
? i32_4
: ctx
->i32_0
;
221 unsigned scratch_offset_base
= isgs
? 8 : 4;
222 LLVMValueRef scratch_offset_basev
= isgs
? i32_8
: i32_4
;
224 ac_llvm_add_target_dep_function_attr(ctx
->main_fn
, "amdgpu-gds-size", 256);
226 /* Determine the mapping of streamout buffers to vertex streams. */
227 for (unsigned i
= 0; i
< so
->num_outputs
; ++i
) {
228 unsigned buf
= so
->output
[i
].output_buffer
;
229 unsigned stream
= so
->output
[i
].stream
;
230 assert(stream_for_buffer
[buf
] < 0 || stream_for_buffer
[buf
] == stream
);
231 stream_for_buffer
[buf
] = stream
;
232 bufmask_for_stream
[stream
] |= 1 << buf
;
235 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
236 if (stream_for_buffer
[buffer
] == -1)
239 assert(so
->stride
[buffer
]);
241 tmp
= LLVMConstInt(ctx
->i32
, so
->stride
[buffer
], false);
242 prim_stride_dw
[buffer
] = LLVMBuildMul(builder
, tmp
, nggso
->num_vertices
, "");
243 prim_stride_dw_vgpr
= ac_build_writelane(
244 &ctx
->ac
, prim_stride_dw_vgpr
, prim_stride_dw
[buffer
],
245 LLVMConstInt(ctx
->i32
, buffer
, false));
247 so_buffer
[buffer
] = ac_build_load_to_sgpr(
249 LLVMConstInt(ctx
->i32
, SI_VS_STREAMOUT_BUF0
+ buffer
, false));
252 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->i32_0
, "");
253 ac_build_ifcc(&ctx
->ac
, tmp
, 5200);
255 LLVMTypeRef gdsptr
= LLVMPointerType(ctx
->i32
, AC_ADDR_SPACE_GDS
);
256 LLVMValueRef gdsbase
= LLVMBuildIntToPtr(builder
, ctx
->i32_0
, gdsptr
, "");
258 /* Advance the streamout offsets in GDS. */
259 LLVMValueRef offsets_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
260 LLVMValueRef generated_by_stream_vgpr
= ac_build_alloca_undef(&ctx
->ac
, ctx
->i32
, "");
262 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
263 ac_build_ifcc(&ctx
->ac
, tmp
, 5210);
266 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
);
267 tmp
= LLVMBuildLoad(builder
, tmp
, "");
269 tmp
= ac_build_writelane(&ctx
->ac
, ctx
->i32_0
,
270 ngg_get_prim_cnt(ctx
), ctx
->i32_0
);
272 LLVMBuildStore(builder
, tmp
, generated_by_stream_vgpr
);
275 int unused_stream
= -1;
276 for (unsigned stream
= 0; stream
< 4; ++stream
) {
277 if (!info
->num_stream_output_components
[stream
]) {
278 unused_stream
= stream
;
282 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
283 if (stream_for_buffer
[buffer
] >= 0) {
284 swizzle
[buffer
] = stream_for_buffer
[buffer
];
286 assert(unused_stream
>= 0);
287 swizzle
[buffer
] = unused_stream
;
291 tmp
= ac_build_quad_swizzle(&ctx
->ac
, tmp
,
292 swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]);
293 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
295 LLVMValueRef args
[] = {
296 LLVMBuildIntToPtr(builder
, ngg_get_ordered_id(ctx
), gdsptr
, ""),
298 ctx
->i32_0
, // ordering
300 ctx
->ac
.i1false
, // isVolatile
301 LLVMConstInt(ctx
->i32
, 4 << 24, false), // OA index
302 ctx
->ac
.i1true
, // wave release
303 ctx
->ac
.i1true
, // wave done
305 tmp
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ds.ordered.add",
306 ctx
->i32
, args
, ARRAY_SIZE(args
), 0);
308 /* Keep offsets in a VGPR for quick retrieval via readlane by
309 * the first wave for bounds checking, and also store in LDS
310 * for retrieval by all waves later. */
311 LLVMBuildStore(builder
, tmp
, offsets_vgpr
);
313 tmp2
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
314 scratch_offset_basev
, "");
315 tmp2
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp2
);
316 LLVMBuildStore(builder
, tmp
, tmp2
);
318 ac_build_endif(&ctx
->ac
, 5210);
320 /* Determine the max emit per buffer. This is done via the SALU, in part
321 * because LLVM can't generate divide-by-multiply if we try to do this
322 * via VALU with one lane per buffer.
324 LLVMValueRef max_emit
[4] = {};
325 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
326 if (stream_for_buffer
[buffer
] == -1)
329 LLVMValueRef bufsize_dw
=
330 LLVMBuildLShr(builder
,
331 LLVMBuildExtractElement(builder
, so_buffer
[buffer
], i32_2
, ""),
334 tmp
= LLVMBuildLoad(builder
, offsets_vgpr
, "");
335 LLVMValueRef offset_dw
=
336 ac_build_readlane(&ctx
->ac
, tmp
,
337 LLVMConstInt(ctx
->i32
, buffer
, false));
339 tmp
= LLVMBuildSub(builder
, bufsize_dw
, offset_dw
, "");
340 tmp
= LLVMBuildUDiv(builder
, tmp
, prim_stride_dw
[buffer
], "");
342 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, bufsize_dw
, offset_dw
, "");
343 max_emit
[buffer
] = LLVMBuildSelect(builder
, tmp2
, ctx
->i32_0
, tmp
, "");
346 /* Determine the number of emitted primitives per stream and fixup the
347 * GDS counter if necessary.
349 * This is complicated by the fact that a single stream can emit to
350 * multiple buffers (but luckily not vice versa).
352 LLVMValueRef emit_vgpr
= ctx
->i32_0
;
354 for (unsigned stream
= 0; stream
< 4; ++stream
) {
355 if (!info
->num_stream_output_components
[stream
])
358 tmp
= LLVMBuildLoad(builder
, generated_by_stream_vgpr
, "");
359 LLVMValueRef generated
=
360 ac_build_readlane(&ctx
->ac
, tmp
,
361 LLVMConstInt(ctx
->i32
, stream
, false));
363 LLVMValueRef emit
= generated
;
364 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
365 if (stream_for_buffer
[buffer
] == stream
)
366 emit
= ac_build_umin(&ctx
->ac
, emit
, max_emit
[buffer
]);
369 emit_vgpr
= ac_build_writelane(&ctx
->ac
, emit_vgpr
, emit
,
370 LLVMConstInt(ctx
->i32
, stream
, false));
372 /* Fixup the offset using a plain GDS atomic if we overflowed. */
373 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, emit
, generated
, "");
374 ac_build_ifcc(&ctx
->ac
, tmp
, 5221); /* scalar branch */
375 tmp
= LLVMBuildLShr(builder
,
376 LLVMConstInt(ctx
->i32
, bufmask_for_stream
[stream
], false),
377 ac_get_thread_id(&ctx
->ac
), "");
378 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
379 ac_build_ifcc(&ctx
->ac
, tmp
, 5222);
381 tmp
= LLVMBuildSub(builder
, generated
, emit
, "");
382 tmp
= LLVMBuildMul(builder
, tmp
, prim_stride_dw_vgpr
, "");
383 tmp2
= LLVMBuildGEP(builder
, gdsbase
, &tid
, 1, "");
384 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpSub
, tmp2
, tmp
,
385 LLVMAtomicOrderingMonotonic
, false);
387 ac_build_endif(&ctx
->ac
, 5222);
388 ac_build_endif(&ctx
->ac
, 5221);
391 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, ac_get_thread_id(&ctx
->ac
), i32_4
, "");
392 ac_build_ifcc(&ctx
->ac
, tmp
, 5225);
394 tmp
= LLVMBuildAdd(builder
, ac_get_thread_id(&ctx
->ac
),
395 scratch_emit_basev
, "");
396 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tmp
);
397 LLVMBuildStore(builder
, emit_vgpr
, tmp
);
399 ac_build_endif(&ctx
->ac
, 5225);
401 ac_build_endif(&ctx
->ac
, 5200);
403 /* Determine the workgroup-relative per-thread / primitive offset into
404 * the streamout buffers */
405 struct ac_wg_scan primemit_scan
[4] = {};
408 for (unsigned stream
= 0; stream
< 4; ++stream
) {
409 if (!info
->num_stream_output_components
[stream
])
412 primemit_scan
[stream
].enable_exclusive
= true;
413 primemit_scan
[stream
].op
= nir_op_iadd
;
414 primemit_scan
[stream
].src
= nggso
->prim_enable
[stream
];
415 primemit_scan
[stream
].scratch
=
416 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
417 LLVMConstInt(ctx
->i32
, 12 + 8 * stream
, false));
418 primemit_scan
[stream
].waveidx
= get_wave_id_in_tg(ctx
);
419 primemit_scan
[stream
].numwaves
= get_tgsize(ctx
);
420 primemit_scan
[stream
].maxwaves
= 8;
421 ac_build_wg_scan_top(&ctx
->ac
, &primemit_scan
[stream
]);
425 ac_build_s_barrier(&ctx
->ac
);
427 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
428 LLVMValueRef wgoffset_dw
[4] = {};
431 LLVMValueRef scratch_vgpr
;
433 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ac_get_thread_id(&ctx
->ac
));
434 scratch_vgpr
= LLVMBuildLoad(builder
, tmp
, "");
436 for (unsigned buffer
= 0; buffer
< 4; ++buffer
) {
437 if (stream_for_buffer
[buffer
] >= 0) {
438 wgoffset_dw
[buffer
] = ac_build_readlane(
439 &ctx
->ac
, scratch_vgpr
,
440 LLVMConstInt(ctx
->i32
, scratch_offset_base
+ buffer
, false));
444 for (unsigned stream
= 0; stream
< 4; ++stream
) {
445 if (info
->num_stream_output_components
[stream
]) {
446 nggso
->emit
[stream
] = ac_build_readlane(
447 &ctx
->ac
, scratch_vgpr
,
448 LLVMConstInt(ctx
->i32
, scratch_emit_base
+ stream
, false));
453 /* Write out primitive data */
454 for (unsigned stream
= 0; stream
< 4; ++stream
) {
455 if (!info
->num_stream_output_components
[stream
])
459 ac_build_wg_scan_bottom(&ctx
->ac
, &primemit_scan
[stream
]);
461 primemit_scan
[stream
].result_exclusive
= tid
;
464 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
465 primemit_scan
[stream
].result_exclusive
,
466 nggso
->emit
[stream
], "");
467 tmp
= LLVMBuildAnd(builder
, tmp
, nggso
->prim_enable
[stream
], "");
468 ac_build_ifcc(&ctx
->ac
, tmp
, 5240);
470 LLVMValueRef offset_vtx
=
471 LLVMBuildMul(builder
, primemit_scan
[stream
].result_exclusive
,
472 nggso
->num_vertices
, "");
474 for (unsigned i
= 0; i
< max_num_vertices
; ++i
) {
475 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
,
476 LLVMConstInt(ctx
->i32
, i
, false),
477 nggso
->num_vertices
, "");
478 ac_build_ifcc(&ctx
->ac
, tmp
, 5241);
479 build_streamout_vertex(ctx
, so_buffer
, wgoffset_dw
,
480 stream
, offset_vtx
, nggso
->vertices
[i
]);
481 ac_build_endif(&ctx
->ac
, 5241);
482 offset_vtx
= LLVMBuildAdd(builder
, offset_vtx
, ctx
->i32_1
, "");
485 ac_build_endif(&ctx
->ac
, 5240);
489 static unsigned ngg_nogs_vertex_size(struct si_shader
*shader
)
491 unsigned lds_vertex_size
= 0;
493 /* The edgeflag is always stored in the last element that's also
494 * used for padding to reduce LDS bank conflicts. */
495 if (shader
->selector
->so
.num_outputs
)
496 lds_vertex_size
= 4 * shader
->selector
->info
.num_outputs
+ 1;
497 if (shader
->selector
->info
.writes_edgeflag
)
498 lds_vertex_size
= MAX2(lds_vertex_size
, 1);
500 return lds_vertex_size
;
504 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
505 * for the vertex outputs.
507 static LLVMValueRef
ngg_nogs_vertex_ptr(struct si_shader_context
*ctx
,
510 /* The extra dword is used to avoid LDS bank conflicts. */
511 unsigned vertex_size
= ngg_nogs_vertex_size(ctx
->shader
);
512 LLVMTypeRef ai32
= LLVMArrayType(ctx
->i32
, vertex_size
);
513 LLVMTypeRef pai32
= LLVMPointerType(ai32
, AC_ADDR_SPACE_LDS
);
514 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->esgs_ring
, pai32
, "");
515 return LLVMBuildGEP(ctx
->ac
.builder
, tmp
, &vtxid
, 1, "");
519 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
521 void gfx10_emit_ngg_epilogue(struct ac_shader_abi
*abi
,
522 unsigned max_outputs
,
525 struct si_shader_context
*ctx
= si_shader_context_from_abi(abi
);
526 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
527 struct tgsi_shader_info
*info
= &sel
->info
;
528 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
529 LLVMBuilderRef builder
= ctx
->ac
.builder
;
530 LLVMValueRef tmp
, tmp2
;
532 assert(!ctx
->shader
->is_gs_copy_shader
);
533 assert(info
->num_outputs
<= max_outputs
);
535 LLVMValueRef vertex_ptr
= NULL
;
537 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
538 vertex_ptr
= ngg_nogs_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
));
540 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
541 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
542 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
544 for (unsigned j
= 0; j
< 4; j
++) {
545 outputs
[i
].vertex_stream
[j
] =
546 (info
->output_streams
[i
] >> (2 * j
)) & 3;
548 /* TODO: we may store more outputs than streamout needs,
549 * but streamout performance isn't that important.
551 if (sel
->so
.num_outputs
) {
552 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
,
553 LLVMConstInt(ctx
->i32
, 4 * i
+ j
, false));
554 tmp2
= LLVMBuildLoad(builder
, addrs
[4 * i
+ j
], "");
555 tmp2
= ac_to_integer(&ctx
->ac
, tmp2
);
556 LLVMBuildStore(builder
, tmp2
, tmp
);
560 /* Store the edgeflag at the end (if streamout is enabled) */
561 if (info
->output_semantic_name
[i
] == TGSI_SEMANTIC_EDGEFLAG
&&
562 sel
->info
.writes_edgeflag
) {
563 LLVMValueRef edgeflag
= LLVMBuildLoad(builder
, addrs
[4 * i
], "");
564 /* The output is a float, but the hw expects a 1-bit integer. */
565 edgeflag
= LLVMBuildFPToUI(ctx
->ac
.builder
, edgeflag
, ctx
->i32
, "");
566 edgeflag
= ac_build_umin(&ctx
->ac
, edgeflag
, ctx
->i32_1
);
568 tmp
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
569 tmp
= ac_build_gep0(&ctx
->ac
, vertex_ptr
, tmp
);
570 LLVMBuildStore(builder
, edgeflag
, tmp
);
574 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
576 LLVMValueRef is_gs_thread
= si_is_gs_thread(ctx
);
577 LLVMValueRef is_es_thread
= si_is_es_thread(ctx
);
578 LLVMValueRef vtxindex
[] = {
579 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 0, 16),
580 si_unpack_param(ctx
, ctx
->gs_vtx01_offset
, 16, 16),
581 si_unpack_param(ctx
, ctx
->gs_vtx23_offset
, 0, 16),
584 /* Determine the number of vertices per primitive. */
585 unsigned num_vertices
;
586 LLVMValueRef num_vertices_val
;
588 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
589 if (info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
590 /* Blits always use axis-aligned rectangles with 3 vertices. */
592 num_vertices_val
= LLVMConstInt(ctx
->i32
, 3, 0);
594 /* Extract OUTPRIM field. */
595 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 2, 2);
596 num_vertices_val
= LLVMBuildAdd(builder
, tmp
, ctx
->i32_1
, "");
597 num_vertices
= 3; /* TODO: optimize for points & lines */
600 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
602 if (info
->properties
[TGSI_PROPERTY_TES_POINT_MODE
])
604 else if (info
->properties
[TGSI_PROPERTY_TES_PRIM_MODE
] == PIPE_PRIM_LINES
)
609 num_vertices_val
= LLVMConstInt(ctx
->i32
, num_vertices
, false);
613 LLVMValueRef emitted_prims
= NULL
;
615 if (sel
->so
.num_outputs
) {
616 struct ngg_streamout nggso
= {};
618 nggso
.num_vertices
= num_vertices_val
;
619 nggso
.prim_enable
[0] = is_gs_thread
;
621 for (unsigned i
= 0; i
< num_vertices
; ++i
)
622 nggso
.vertices
[i
] = ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
624 build_streamout(ctx
, &nggso
);
625 emitted_prims
= nggso
.emit
[0];
628 LLVMValueRef user_edgeflags
[3] = {};
630 if (sel
->info
.writes_edgeflag
) {
631 /* Streamout already inserted the barrier, so don't insert it again. */
632 if (!sel
->so
.num_outputs
)
633 ac_build_s_barrier(&ctx
->ac
);
635 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
636 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
637 for (unsigned i
= 0; i
< num_vertices
; i
++) {
638 tmp
= ngg_nogs_vertex_ptr(ctx
, vtxindex
[i
]);
639 tmp2
= LLVMConstInt(ctx
->i32
, ngg_nogs_vertex_size(ctx
->shader
) - 1, 0);
640 tmp
= ac_build_gep0(&ctx
->ac
, tmp
, tmp2
);
641 tmp
= LLVMBuildLoad(builder
, tmp
, "");
642 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
644 user_edgeflags
[i
] = ac_build_alloca_undef(&ctx
->ac
, ctx
->i1
, "");
645 LLVMBuildStore(builder
, tmp
, user_edgeflags
[i
]);
647 ac_build_endif(&ctx
->ac
, 5400);
650 /* Copy Primitive IDs from GS threads to the LDS address corresponding
651 * to the ES thread of the provoking vertex.
653 if (ctx
->type
== PIPE_SHADER_VERTEX
&&
654 ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
655 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
656 if (sel
->so
.num_outputs
|| sel
->info
.writes_edgeflag
)
657 ac_build_s_barrier(&ctx
->ac
);
659 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 5400);
660 /* Extract the PROVOKING_VTX_INDEX field. */
661 LLVMValueRef provoking_vtx_in_prim
=
662 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2);
664 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
665 LLVMValueRef indices
= ac_build_gather_values(&ctx
->ac
, vtxindex
, 3);
666 LLVMValueRef provoking_vtx_index
=
667 LLVMBuildExtractElement(builder
, indices
, provoking_vtx_in_prim
, "");
669 LLVMBuildStore(builder
, ac_get_arg(&ctx
->ac
, ctx
->args
.gs_prim_id
),
670 ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
, provoking_vtx_index
));
671 ac_build_endif(&ctx
->ac
, 5400);
674 build_sendmsg_gs_alloc_req(ctx
, ngg_get_vtx_cnt(ctx
), ngg_get_prim_cnt(ctx
));
676 /* Update query buffer */
677 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
678 if (!info
->properties
[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD
]) {
679 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
680 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
681 ac_build_ifcc(&ctx
->ac
, tmp
, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
682 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, get_wave_id_in_tg(ctx
), ctx
->ac
.i32_0
, "");
683 ac_build_ifcc(&ctx
->ac
, tmp
, 5030);
684 tmp
= LLVMBuildICmp(builder
, LLVMIntULE
, ac_get_thread_id(&ctx
->ac
),
685 sel
->so
.num_outputs
? ctx
->ac
.i32_1
: ctx
->ac
.i32_0
, "");
686 ac_build_ifcc(&ctx
->ac
, tmp
, 5031);
688 LLVMValueRef args
[] = {
689 ngg_get_prim_cnt(ctx
),
690 ngg_get_query_buf(ctx
),
691 LLVMConstInt(ctx
->i32
, 16, false), /* offset of stream[0].generated_primitives */
692 ctx
->i32_0
, /* soffset */
693 ctx
->i32_0
, /* cachepolicy */
696 if (sel
->so
.num_outputs
) {
697 args
[0] = ac_build_writelane(&ctx
->ac
, args
[0], emitted_prims
, ctx
->i32_1
);
698 args
[2] = ac_build_writelane(&ctx
->ac
, args
[2],
699 LLVMConstInt(ctx
->i32
, 24, false), ctx
->i32_1
);
702 /* TODO: should this be 64-bit atomics? */
703 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
704 ctx
->i32
, args
, 5, 0);
706 ac_build_endif(&ctx
->ac
, 5031);
707 ac_build_endif(&ctx
->ac
, 5030);
708 ac_build_endif(&ctx
->ac
, 5029);
711 /* Export primitive data to the index buffer. Format is:
712 * - bits 0..8: index 0
713 * - bit 9: edge flag 0
714 * - bits 10..18: index 1
715 * - bit 19: edge flag 1
716 * - bits 20..28: index 2
717 * - bit 29: edge flag 2
718 * - bit 31: null primitive (skip)
720 * For the first version, we will always build up all three indices
721 * independent of the primitive type. The additional garbage data
724 * TODO: culling depends on the primitive type, so can have some
727 ac_build_ifcc(&ctx
->ac
, is_gs_thread
, 6001);
729 struct ngg_prim prim
= {};
731 prim
.num_vertices
= num_vertices
;
732 prim
.isnull
= ctx
->ac
.i1false
;
733 memcpy(prim
.index
, vtxindex
, sizeof(vtxindex
[0]) * 3);
735 for (unsigned i
= 0; i
< num_vertices
; ++i
) {
736 if (ctx
->type
!= PIPE_SHADER_VERTEX
) {
737 prim
.edgeflag
[i
] = ctx
->i1false
;
741 tmp
= LLVMBuildLShr(builder
,
742 ac_get_arg(&ctx
->ac
, ctx
->args
.gs_invocation_id
),
743 LLVMConstInt(ctx
->ac
.i32
, 8 + i
, false), "");
744 prim
.edgeflag
[i
] = LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
746 if (sel
->info
.writes_edgeflag
) {
747 tmp2
= LLVMBuildLoad(builder
, user_edgeflags
[i
], "");
748 prim
.edgeflag
[i
] = LLVMBuildAnd(builder
, prim
.edgeflag
[i
],
753 build_export_prim(ctx
, &prim
);
755 ac_build_endif(&ctx
->ac
, 6001);
757 /* Export per-vertex data (positions and parameters). */
758 ac_build_ifcc(&ctx
->ac
, is_es_thread
, 6002);
762 /* Unconditionally (re-)load the values for proper SSA form. */
763 for (i
= 0; i
< info
->num_outputs
; i
++) {
764 for (unsigned j
= 0; j
< 4; j
++) {
765 outputs
[i
].values
[j
] =
766 LLVMBuildLoad(builder
,
772 if (ctx
->shader
->key
.mono
.u
.vs_export_prim_id
) {
773 outputs
[i
].semantic_name
= TGSI_SEMANTIC_PRIMID
;
774 outputs
[i
].semantic_index
= 0;
776 if (ctx
->type
== PIPE_SHADER_VERTEX
) {
777 /* Wait for GS stores to finish. */
778 ac_build_s_barrier(&ctx
->ac
);
780 tmp
= ac_build_gep0(&ctx
->ac
, ctx
->esgs_ring
,
781 get_thread_id_in_tg(ctx
));
782 outputs
[i
].values
[0] = LLVMBuildLoad(builder
, tmp
, "");
784 assert(ctx
->type
== PIPE_SHADER_TESS_EVAL
);
785 outputs
[i
].values
[0] = si_get_primitive_id(ctx
, 0);
788 outputs
[i
].values
[0] = ac_to_float(&ctx
->ac
, outputs
[i
].values
[0]);
789 for (unsigned j
= 1; j
< 4; j
++)
790 outputs
[i
].values
[j
] = LLVMGetUndef(ctx
->f32
);
792 memset(outputs
[i
].vertex_stream
, 0,
793 sizeof(outputs
[i
].vertex_stream
));
797 si_llvm_export_vs(ctx
, outputs
, i
);
799 ac_build_endif(&ctx
->ac
, 6002);
803 ngg_gs_get_vertex_storage(struct si_shader_context
*ctx
)
805 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
806 const struct tgsi_shader_info
*info
= &sel
->info
;
808 LLVMTypeRef elements
[2] = {
809 LLVMArrayType(ctx
->ac
.i32
, 4 * info
->num_outputs
),
810 LLVMArrayType(ctx
->ac
.i8
, 4),
812 LLVMTypeRef type
= LLVMStructTypeInContext(ctx
->ac
.context
, elements
, 2, false);
813 type
= LLVMPointerType(LLVMArrayType(type
, 0), AC_ADDR_SPACE_LDS
);
814 return LLVMBuildBitCast(ctx
->ac
.builder
, ctx
->gs_ngg_emit
, type
, "");
818 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
819 * is in emit order; that is:
820 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
821 * - during vertex emit, i.e. while the API GS shader invocation is running,
822 * N = threadidx * gs_max_out_vertices + emitidx
824 * Goals of the LDS memory layout:
825 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
826 * in uniform control flow
827 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
829 * 3. Agnostic to the number of waves (since we don't know it before compiling)
830 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
831 * 5. Avoid wasting memory.
833 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
834 * layout, elimination of bank conflicts requires that each vertex occupy an
835 * odd number of dwords. We use the additional dword to store the output stream
836 * index as well as a flag to indicate whether this vertex ends a primitive
839 * Swizzling is required to satisfy points 1 and 2 simultaneously.
841 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
842 * Indices are swizzled in groups of 32, which ensures point 1 without
843 * disturbing point 2.
845 * \return an LDS pointer to type {[N x i32], [4 x i8]}
848 ngg_gs_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexidx
)
850 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
851 LLVMBuilderRef builder
= ctx
->ac
.builder
;
852 LLVMValueRef storage
= ngg_gs_get_vertex_storage(ctx
);
854 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
855 unsigned write_stride_2exp
= ffs(sel
->gs_max_out_vertices
) - 1;
856 if (write_stride_2exp
) {
858 LLVMBuildLShr(builder
, vertexidx
,
859 LLVMConstInt(ctx
->ac
.i32
, 5, false), "");
860 LLVMValueRef swizzle
=
861 LLVMBuildAnd(builder
, row
,
862 LLVMConstInt(ctx
->ac
.i32
, (1u << write_stride_2exp
) - 1,
864 vertexidx
= LLVMBuildXor(builder
, vertexidx
, swizzle
, "");
867 return ac_build_gep0(&ctx
->ac
, storage
, vertexidx
);
871 ngg_gs_emit_vertex_ptr(struct si_shader_context
*ctx
, LLVMValueRef gsthread
,
872 LLVMValueRef emitidx
)
874 struct si_shader_selector
*sel
= ctx
->shader
->selector
;
875 LLVMBuilderRef builder
= ctx
->ac
.builder
;
878 tmp
= LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false);
879 tmp
= LLVMBuildMul(builder
, tmp
, gsthread
, "");
880 const LLVMValueRef vertexidx
= LLVMBuildAdd(builder
, tmp
, emitidx
, "");
881 return ngg_gs_vertex_ptr(ctx
, vertexidx
);
885 ngg_gs_get_emit_output_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
888 LLVMValueRef gep_idx
[3] = {
889 ctx
->ac
.i32_0
, /* implied C-style array */
890 ctx
->ac
.i32_0
, /* first struct entry */
891 LLVMConstInt(ctx
->ac
.i32
, out_idx
, false),
893 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
897 ngg_gs_get_emit_primflag_ptr(struct si_shader_context
*ctx
, LLVMValueRef vertexptr
,
900 LLVMValueRef gep_idx
[3] = {
901 ctx
->ac
.i32_0
, /* implied C-style array */
902 ctx
->ac
.i32_1
, /* second struct entry */
903 LLVMConstInt(ctx
->ac
.i32
, stream
, false),
905 return LLVMBuildGEP(ctx
->ac
.builder
, vertexptr
, gep_idx
, 3, "");
908 void gfx10_ngg_gs_emit_vertex(struct si_shader_context
*ctx
,
912 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
913 const struct tgsi_shader_info
*info
= &sel
->info
;
914 LLVMBuilderRef builder
= ctx
->ac
.builder
;
916 const LLVMValueRef vertexidx
=
917 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
919 /* If this thread has already emitted the declared maximum number of
920 * vertices, skip the write: excessive vertex emissions are not
921 * supposed to have any effect.
923 const LLVMValueRef can_emit
=
924 LLVMBuildICmp(builder
, LLVMIntULT
, vertexidx
,
925 LLVMConstInt(ctx
->i32
, sel
->gs_max_out_vertices
, false), "");
927 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
928 tmp
= LLVMBuildSelect(builder
, can_emit
, tmp
, vertexidx
, "");
929 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
931 ac_build_ifcc(&ctx
->ac
, can_emit
, 9001);
933 const LLVMValueRef vertexptr
=
934 ngg_gs_emit_vertex_ptr(ctx
, get_thread_id_in_tg(ctx
), vertexidx
);
935 unsigned out_idx
= 0;
936 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
937 for (unsigned chan
= 0; chan
< 4; chan
++, out_idx
++) {
938 if (!(info
->output_usagemask
[i
] & (1 << chan
)) ||
939 ((info
->output_streams
[i
] >> (2 * chan
)) & 3) != stream
)
942 LLVMValueRef out_val
= LLVMBuildLoad(builder
, addrs
[4 * i
+ chan
], "");
943 out_val
= ac_to_integer(&ctx
->ac
, out_val
);
944 LLVMBuildStore(builder
, out_val
,
945 ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
));
948 assert(out_idx
* 4 == sel
->gsvs_vertex_size
);
950 /* Determine and store whether this vertex completed a primitive. */
951 const LLVMValueRef curverts
= LLVMBuildLoad(builder
, ctx
->gs_curprim_verts
[stream
], "");
953 tmp
= LLVMConstInt(ctx
->ac
.i32
, u_vertices_per_prim(sel
->gs_output_prim
) - 1, false);
954 const LLVMValueRef iscompleteprim
=
955 LLVMBuildICmp(builder
, LLVMIntUGE
, curverts
, tmp
, "");
957 /* Since the geometry shader emits triangle strips, we need to
958 * track which primitive is odd and swap vertex indices to get
959 * the correct vertex order.
961 LLVMValueRef is_odd
= ctx
->i1false
;
962 if (stream
== 0 && u_vertices_per_prim(sel
->gs_output_prim
) == 3) {
963 tmp
= LLVMBuildAnd(builder
, curverts
, ctx
->i32_1
, "");
964 is_odd
= LLVMBuildICmp(builder
, LLVMIntEQ
, tmp
, ctx
->i32_1
, "");
967 tmp
= LLVMBuildAdd(builder
, curverts
, ctx
->ac
.i32_1
, "");
968 LLVMBuildStore(builder
, tmp
, ctx
->gs_curprim_verts
[stream
]);
970 /* The per-vertex primitive flag encoding:
971 * bit 0: whether this vertex finishes a primitive
972 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
974 tmp
= LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i8
, "");
975 tmp
= LLVMBuildOr(builder
, tmp
,
976 LLVMBuildShl(builder
,
977 LLVMBuildZExt(builder
, is_odd
, ctx
->ac
.i8
, ""),
978 ctx
->ac
.i8_1
, ""), "");
979 LLVMBuildStore(builder
, tmp
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
));
981 tmp
= LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
982 tmp
= LLVMBuildAdd(builder
, tmp
, LLVMBuildZExt(builder
, iscompleteprim
, ctx
->ac
.i32
, ""), "");
983 LLVMBuildStore(builder
, tmp
, ctx
->gs_generated_prims
[stream
]);
985 ac_build_endif(&ctx
->ac
, 9001);
988 void gfx10_ngg_gs_emit_prologue(struct si_shader_context
*ctx
)
990 /* Zero out the part of LDS scratch that is used to accumulate the
991 * per-stream generated primitive count.
993 LLVMBuilderRef builder
= ctx
->ac
.builder
;
994 LLVMValueRef scratchptr
= ctx
->gs_ngg_scratch
;
995 LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
998 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, LLVMConstInt(ctx
->i32
, 4, false), "");
999 ac_build_ifcc(&ctx
->ac
, tmp
, 5090);
1001 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, scratchptr
, tid
);
1002 LLVMBuildStore(builder
, ctx
->i32_0
, ptr
);
1004 ac_build_endif(&ctx
->ac
, 5090);
1006 ac_build_s_barrier(&ctx
->ac
);
1009 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context
*ctx
)
1011 const struct si_shader_selector
*sel
= ctx
->shader
->selector
;
1012 const struct tgsi_shader_info
*info
= &sel
->info
;
1013 const unsigned verts_per_prim
= u_vertices_per_prim(sel
->gs_output_prim
);
1014 LLVMBuilderRef builder
= ctx
->ac
.builder
;
1015 LLVMValueRef i8_0
= LLVMConstInt(ctx
->ac
.i8
, 0, false);
1016 LLVMValueRef tmp
, tmp2
;
1018 /* Zero out remaining (non-emitted) primitive flags.
1020 * Note: Alternatively, we could pass the relevant gs_next_vertex to
1021 * the emit threads via LDS. This is likely worse in the expected
1022 * typical case where each GS thread emits the full set of
1025 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1026 if (!info
->num_stream_output_components
[stream
])
1029 const LLVMValueRef gsthread
= get_thread_id_in_tg(ctx
);
1031 ac_build_bgnloop(&ctx
->ac
, 5100);
1033 const LLVMValueRef vertexidx
=
1034 LLVMBuildLoad(builder
, ctx
->gs_next_vertex
[stream
], "");
1035 tmp
= LLVMBuildICmp(builder
, LLVMIntUGE
, vertexidx
,
1036 LLVMConstInt(ctx
->ac
.i32
, sel
->gs_max_out_vertices
, false), "");
1037 ac_build_ifcc(&ctx
->ac
, tmp
, 5101);
1038 ac_build_break(&ctx
->ac
);
1039 ac_build_endif(&ctx
->ac
, 5101);
1041 tmp
= LLVMBuildAdd(builder
, vertexidx
, ctx
->ac
.i32_1
, "");
1042 LLVMBuildStore(builder
, tmp
, ctx
->gs_next_vertex
[stream
]);
1044 tmp
= ngg_gs_emit_vertex_ptr(ctx
, gsthread
, vertexidx
);
1045 LLVMBuildStore(builder
, i8_0
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, stream
));
1047 ac_build_endloop(&ctx
->ac
, 5100);
1050 /* Accumulate generated primitives counts across the entire threadgroup. */
1051 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1052 if (!info
->num_stream_output_components
[stream
])
1055 LLVMValueRef numprims
=
1056 LLVMBuildLoad(builder
, ctx
->gs_generated_prims
[stream
], "");
1057 numprims
= ac_build_reduce(&ctx
->ac
, numprims
, nir_op_iadd
, ctx
->ac
.wave_size
);
1059 tmp
= LLVMBuildICmp(builder
, LLVMIntEQ
, ac_get_thread_id(&ctx
->ac
), ctx
->i32_0
, "");
1060 ac_build_ifcc(&ctx
->ac
, tmp
, 5105);
1062 LLVMBuildAtomicRMW(builder
, LLVMAtomicRMWBinOpAdd
,
1063 ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
,
1064 LLVMConstInt(ctx
->i32
, stream
, false)),
1065 numprims
, LLVMAtomicOrderingMonotonic
, false);
1067 ac_build_endif(&ctx
->ac
, 5105);
1070 ac_build_endif(&ctx
->ac
, ctx
->merged_wrap_if_label
);
1072 ac_build_s_barrier(&ctx
->ac
);
1074 const LLVMValueRef tid
= get_thread_id_in_tg(ctx
);
1075 LLVMValueRef num_emit_threads
= ngg_get_prim_cnt(ctx
);
1078 if (sel
->so
.num_outputs
) {
1079 struct ngg_streamout nggso
= {};
1081 nggso
.num_vertices
= LLVMConstInt(ctx
->i32
, verts_per_prim
, false);
1083 LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tid
);
1084 for (unsigned stream
= 0; stream
< 4; ++stream
) {
1085 if (!info
->num_stream_output_components
[stream
])
1088 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, vertexptr
, stream
), "");
1089 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1090 tmp2
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1091 nggso
.prim_enable
[stream
] = LLVMBuildAnd(builder
, tmp
, tmp2
, "");
1094 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1095 tmp
= LLVMBuildSub(builder
, tid
,
1096 LLVMConstInt(ctx
->i32
, verts_per_prim
- i
- 1, false), "");
1097 tmp
= ngg_gs_vertex_ptr(ctx
, tmp
);
1098 nggso
.vertices
[i
] = ac_build_gep0(&ctx
->ac
, tmp
, ctx
->i32_0
);
1101 build_streamout(ctx
, &nggso
);
1104 /* Write shader query data. */
1105 tmp
= si_unpack_param(ctx
, ctx
->vs_state_bits
, 6, 1);
1106 tmp
= LLVMBuildTrunc(builder
, tmp
, ctx
->i1
, "");
1107 ac_build_ifcc(&ctx
->ac
, tmp
, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1108 unsigned num_query_comps
= sel
->so
.num_outputs
? 8 : 4;
1109 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
,
1110 LLVMConstInt(ctx
->i32
, num_query_comps
, false), "");
1111 ac_build_ifcc(&ctx
->ac
, tmp
, 5110);
1113 LLVMValueRef offset
;
1115 if (sel
->so
.num_outputs
)
1116 tmp
= LLVMBuildAnd(builder
, tmp
, LLVMConstInt(ctx
->i32
, 3, false), "");
1117 offset
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 32, false), "");
1118 if (sel
->so
.num_outputs
) {
1119 tmp
= LLVMBuildLShr(builder
, tid
, LLVMConstInt(ctx
->i32
, 2, false), "");
1120 tmp
= LLVMBuildNUWMul(builder
, tmp
, LLVMConstInt(ctx
->i32
, 8, false), "");
1121 offset
= LLVMBuildAdd(builder
, offset
, tmp
, "");
1124 tmp
= LLVMBuildLoad(builder
, ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, tid
), "");
1125 LLVMValueRef args
[] = {
1127 ngg_get_query_buf(ctx
),
1129 LLVMConstInt(ctx
->i32
, 16, false), /* soffset */
1130 ctx
->i32_0
, /* cachepolicy */
1132 ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1133 ctx
->i32
, args
, 5, 0);
1135 ac_build_endif(&ctx
->ac
, 5110);
1136 ac_build_endif(&ctx
->ac
, 5109);
1140 /* Determine vertex liveness. */
1141 LLVMValueRef vertliveptr
= ac_build_alloca(&ctx
->ac
, ctx
->ac
.i1
, "vertexlive");
1143 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1144 ac_build_ifcc(&ctx
->ac
, tmp
, 5120);
1146 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1147 const LLVMValueRef primidx
=
1148 LLVMBuildAdd(builder
, tid
,
1149 LLVMConstInt(ctx
->ac
.i32
, i
, false), "");
1152 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, primidx
, num_emit_threads
, "");
1153 ac_build_ifcc(&ctx
->ac
, tmp
, 5121 + i
);
1156 /* Load primitive liveness */
1157 tmp
= ngg_gs_vertex_ptr(ctx
, primidx
);
1158 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1159 const LLVMValueRef primlive
=
1160 LLVMBuildTrunc(builder
, tmp
, ctx
->ac
.i1
, "");
1162 tmp
= LLVMBuildLoad(builder
, vertliveptr
, "");
1163 tmp
= LLVMBuildOr(builder
, tmp
, primlive
, ""),
1164 LLVMBuildStore(builder
, tmp
, vertliveptr
);
1167 ac_build_endif(&ctx
->ac
, 5121 + i
);
1170 ac_build_endif(&ctx
->ac
, 5120);
1172 /* Inclusive scan addition across the current wave. */
1173 LLVMValueRef vertlive
= LLVMBuildLoad(builder
, vertliveptr
, "");
1174 struct ac_wg_scan vertlive_scan
= {};
1175 vertlive_scan
.op
= nir_op_iadd
;
1176 vertlive_scan
.enable_reduce
= true;
1177 vertlive_scan
.enable_exclusive
= true;
1178 vertlive_scan
.src
= vertlive
;
1179 vertlive_scan
.scratch
= ac_build_gep0(&ctx
->ac
, ctx
->gs_ngg_scratch
, ctx
->i32_0
);
1180 vertlive_scan
.waveidx
= get_wave_id_in_tg(ctx
);
1181 vertlive_scan
.numwaves
= get_tgsize(ctx
);
1182 vertlive_scan
.maxwaves
= 8;
1184 ac_build_wg_scan(&ctx
->ac
, &vertlive_scan
);
1186 /* Skip all exports (including index exports) when possible. At least on
1187 * early gfx10 revisions this is also to avoid hangs.
1189 LLVMValueRef have_exports
=
1190 LLVMBuildICmp(builder
, LLVMIntNE
, vertlive_scan
.result_reduce
, ctx
->ac
.i32_0
, "");
1192 LLVMBuildSelect(builder
, have_exports
, num_emit_threads
, ctx
->ac
.i32_0
, "");
1194 /* Allocate export space. Send this message as early as possible, to
1195 * hide the latency of the SQ <-> SPI roundtrip.
1197 * Note: We could consider compacting primitives for export as well.
1198 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1199 * prim data per clock and skips null primitives at no additional
1200 * cost. So compacting primitives can only be beneficial when
1201 * there are 4 or more contiguous null primitives in the export
1202 * (in the common case of single-dword prim exports).
1204 build_sendmsg_gs_alloc_req(ctx
, vertlive_scan
.result_reduce
, num_emit_threads
);
1206 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1207 * of the primitive liveness flags, relying on the fact that each
1208 * threadgroup can have at most 256 threads. */
1209 ac_build_ifcc(&ctx
->ac
, vertlive
, 5130);
1211 tmp
= ngg_gs_vertex_ptr(ctx
, vertlive_scan
.result_exclusive
);
1212 tmp2
= LLVMBuildTrunc(builder
, tid
, ctx
->ac
.i8
, "");
1213 LLVMBuildStore(builder
, tmp2
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1));
1215 ac_build_endif(&ctx
->ac
, 5130);
1217 ac_build_s_barrier(&ctx
->ac
);
1219 /* Export primitive data */
1220 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, num_emit_threads
, "");
1221 ac_build_ifcc(&ctx
->ac
, tmp
, 5140);
1224 struct ngg_prim prim
= {};
1225 prim
.num_vertices
= verts_per_prim
;
1227 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1228 flags
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 0), "");
1229 prim
.isnull
= LLVMBuildNot(builder
, LLVMBuildTrunc(builder
, flags
, ctx
->i1
, ""), "");
1231 for (unsigned i
= 0; i
< verts_per_prim
; ++i
) {
1232 prim
.index
[i
] = LLVMBuildSub(builder
, vertlive_scan
.result_exclusive
,
1233 LLVMConstInt(ctx
->ac
.i32
, verts_per_prim
- i
- 1, false), "");
1234 prim
.edgeflag
[i
] = ctx
->ac
.i1false
;
1237 /* Geometry shaders output triangle strips, but NGG expects triangles.
1238 * We need to change the vertex order for odd triangles to get correct
1239 * front/back facing by swapping 2 vertex indices, but we also have to
1240 * keep the provoking vertex in the same place.
1242 * If the first vertex is provoking, swap index 1 and 2.
1243 * If the last vertex is provoking, swap index 0 and 1.
1245 if (verts_per_prim
== 3) {
1246 LLVMValueRef is_odd
= LLVMBuildLShr(builder
, flags
, ctx
->ac
.i8_1
, "");
1247 is_odd
= LLVMBuildTrunc(builder
, is_odd
, ctx
->i1
, "");
1248 LLVMValueRef flatshade_first
=
1249 LLVMBuildICmp(builder
, LLVMIntEQ
,
1250 si_unpack_param(ctx
, ctx
->vs_state_bits
, 4, 2),
1253 struct ngg_prim in
= prim
;
1254 prim
.index
[0] = LLVMBuildSelect(builder
, flatshade_first
,
1256 LLVMBuildSelect(builder
, is_odd
,
1257 in
.index
[1], in
.index
[0], ""), "");
1258 prim
.index
[1] = LLVMBuildSelect(builder
, flatshade_first
,
1259 LLVMBuildSelect(builder
, is_odd
,
1260 in
.index
[2], in
.index
[1], ""),
1261 LLVMBuildSelect(builder
, is_odd
,
1262 in
.index
[0], in
.index
[1], ""), "");
1263 prim
.index
[2] = LLVMBuildSelect(builder
, flatshade_first
,
1264 LLVMBuildSelect(builder
, is_odd
,
1265 in
.index
[1], in
.index
[2], ""),
1269 build_export_prim(ctx
, &prim
);
1271 ac_build_endif(&ctx
->ac
, 5140);
1273 /* Export position and parameter data */
1274 tmp
= LLVMBuildICmp(builder
, LLVMIntULT
, tid
, vertlive_scan
.result_reduce
, "");
1275 ac_build_ifcc(&ctx
->ac
, tmp
, 5145);
1277 struct si_shader_output_values outputs
[PIPE_MAX_SHADER_OUTPUTS
];
1279 tmp
= ngg_gs_vertex_ptr(ctx
, tid
);
1280 tmp
= LLVMBuildLoad(builder
, ngg_gs_get_emit_primflag_ptr(ctx
, tmp
, 1), "");
1281 tmp
= LLVMBuildZExt(builder
, tmp
, ctx
->ac
.i32
, "");
1282 const LLVMValueRef vertexptr
= ngg_gs_vertex_ptr(ctx
, tmp
);
1284 unsigned out_idx
= 0;
1285 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
1286 outputs
[i
].semantic_name
= info
->output_semantic_name
[i
];
1287 outputs
[i
].semantic_index
= info
->output_semantic_index
[i
];
1289 for (unsigned j
= 0; j
< 4; j
++, out_idx
++) {
1290 tmp
= ngg_gs_get_emit_output_ptr(ctx
, vertexptr
, out_idx
);
1291 tmp
= LLVMBuildLoad(builder
, tmp
, "");
1292 outputs
[i
].values
[j
] = ac_to_float(&ctx
->ac
, tmp
);
1293 outputs
[i
].vertex_stream
[j
] =
1294 (info
->output_streams
[i
] >> (2 * j
)) & 3;
1298 si_llvm_export_vs(ctx
, outputs
, info
->num_outputs
);
1300 ac_build_endif(&ctx
->ac
, 5145);
1303 static void clamp_gsprims_to_esverts(unsigned *max_gsprims
, unsigned max_esverts
,
1304 unsigned min_verts_per_prim
, bool use_adjacency
)
1306 unsigned max_reuse
= max_esverts
- min_verts_per_prim
;
1309 *max_gsprims
= MIN2(*max_gsprims
, 1 + max_reuse
);
1313 * Determine subgroup information like maximum number of vertices and prims.
1315 * This happens before the shader is uploaded, since LDS relocations during
1316 * upload depend on the subgroup size.
1318 void gfx10_ngg_calculate_subgroup_info(struct si_shader
*shader
)
1320 const struct si_shader_selector
*gs_sel
= shader
->selector
;
1321 const struct si_shader_selector
*es_sel
=
1322 shader
->previous_stage_sel
? shader
->previous_stage_sel
: gs_sel
;
1323 const enum pipe_shader_type gs_type
= gs_sel
->type
;
1324 const unsigned gs_num_invocations
= MAX2(gs_sel
->gs_num_invocations
, 1);
1325 const unsigned input_prim
= si_get_input_prim(gs_sel
);
1326 const bool use_adjacency
= input_prim
>= PIPE_PRIM_LINES_ADJACENCY
&&
1327 input_prim
<= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
;
1328 const unsigned max_verts_per_prim
= u_vertices_per_prim(input_prim
);
1329 const unsigned min_verts_per_prim
=
1330 gs_type
== PIPE_SHADER_GEOMETRY
? max_verts_per_prim
: 1;
1332 /* All these are in dwords: */
1333 /* We can't allow using the whole LDS, because GS waves compete with
1334 * other shader stages for LDS space.
1336 * TODO: We should really take the shader's internal LDS use into
1337 * account. The linker will fail if the size is greater than
1340 const unsigned max_lds_size
= 8 * 1024 - 768;
1341 const unsigned target_lds_size
= max_lds_size
;
1342 unsigned esvert_lds_size
= 0;
1343 unsigned gsprim_lds_size
= 0;
1345 /* All these are per subgroup: */
1346 bool max_vert_out_per_gs_instance
= false;
1347 unsigned max_esverts_base
= 128;
1348 unsigned max_gsprims_base
= 128; /* default prim group size clamp */
1350 /* Hardware has the following non-natural restrictions on the value
1351 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1353 * - at most 252 for any line input primitive type
1354 * - at most 251 for any quad input primitive type
1355 * - at most 251 for triangle strips with adjacency (this happens to
1356 * be the natural limit for triangle *lists* with adjacency)
1358 max_esverts_base
= MIN2(max_esverts_base
, 251 + max_verts_per_prim
- 1);
1360 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1361 unsigned max_out_verts_per_gsprim
=
1362 gs_sel
->gs_max_out_vertices
* gs_num_invocations
;
1364 if (max_out_verts_per_gsprim
<= 256) {
1365 if (max_out_verts_per_gsprim
) {
1366 max_gsprims_base
= MIN2(max_gsprims_base
,
1367 256 / max_out_verts_per_gsprim
);
1370 /* Use special multi-cycling mode in which each GS
1371 * instance gets its own subgroup. Does not work with
1373 max_vert_out_per_gs_instance
= true;
1374 max_gsprims_base
= 1;
1375 max_out_verts_per_gsprim
= gs_sel
->gs_max_out_vertices
;
1378 esvert_lds_size
= es_sel
->esgs_itemsize
/ 4;
1379 gsprim_lds_size
= (gs_sel
->gsvs_vertex_size
/ 4 + 1) * max_out_verts_per_gsprim
;
1382 /* LDS size for passing data from ES to GS. */
1383 esvert_lds_size
= ngg_nogs_vertex_size(shader
);
1385 /* LDS size for passing data from GS to ES.
1386 * GS stores Primitive IDs into LDS at the address corresponding
1387 * to the ES thread of the provoking vertex. All ES threads
1388 * load and export PrimitiveID for their thread.
1390 if (gs_sel
->type
== PIPE_SHADER_VERTEX
&&
1391 shader
->key
.mono
.u
.vs_export_prim_id
)
1392 esvert_lds_size
= MAX2(esvert_lds_size
, 1);
1395 unsigned max_gsprims
= max_gsprims_base
;
1396 unsigned max_esverts
= max_esverts_base
;
1398 if (esvert_lds_size
)
1399 max_esverts
= MIN2(max_esverts
, target_lds_size
/ esvert_lds_size
);
1400 if (gsprim_lds_size
)
1401 max_gsprims
= MIN2(max_gsprims
, target_lds_size
/ gsprim_lds_size
);
1403 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1404 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
, min_verts_per_prim
, use_adjacency
);
1405 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1407 if (esvert_lds_size
|| gsprim_lds_size
) {
1408 /* Now that we have a rough proportionality between esverts
1409 * and gsprims based on the primitive type, scale both of them
1410 * down simultaneously based on required LDS space.
1412 * We could be smarter about this if we knew how much vertex
1415 unsigned lds_total
= max_esverts
* esvert_lds_size
+
1416 max_gsprims
* gsprim_lds_size
;
1417 if (lds_total
> target_lds_size
) {
1418 max_esverts
= max_esverts
* target_lds_size
/ lds_total
;
1419 max_gsprims
= max_gsprims
* target_lds_size
/ lds_total
;
1421 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1422 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1423 min_verts_per_prim
, use_adjacency
);
1424 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1428 /* Round up towards full wave sizes for better ALU utilization. */
1429 if (!max_vert_out_per_gs_instance
) {
1430 const unsigned wavesize
= gs_sel
->screen
->ge_wave_size
;
1431 unsigned orig_max_esverts
;
1432 unsigned orig_max_gsprims
;
1434 orig_max_esverts
= max_esverts
;
1435 orig_max_gsprims
= max_gsprims
;
1437 max_esverts
= align(max_esverts
, wavesize
);
1438 max_esverts
= MIN2(max_esverts
, max_esverts_base
);
1439 if (esvert_lds_size
)
1440 max_esverts
= MIN2(max_esverts
,
1441 (max_lds_size
- max_gsprims
* gsprim_lds_size
) /
1443 max_esverts
= MIN2(max_esverts
, max_gsprims
* max_verts_per_prim
);
1445 max_gsprims
= align(max_gsprims
, wavesize
);
1446 max_gsprims
= MIN2(max_gsprims
, max_gsprims_base
);
1447 if (gsprim_lds_size
)
1448 max_gsprims
= MIN2(max_gsprims
,
1449 (max_lds_size
- max_esverts
* esvert_lds_size
) /
1451 clamp_gsprims_to_esverts(&max_gsprims
, max_esverts
,
1452 min_verts_per_prim
, use_adjacency
);
1453 assert(max_esverts
>= max_verts_per_prim
&& max_gsprims
>= 1);
1454 } while (orig_max_esverts
!= max_esverts
|| orig_max_gsprims
!= max_gsprims
);
1457 /* Hardware restriction: minimum value of max_esverts */
1458 max_esverts
= MAX2(max_esverts
, 23 + max_verts_per_prim
);
1460 unsigned max_out_vertices
=
1461 max_vert_out_per_gs_instance
? gs_sel
->gs_max_out_vertices
:
1462 gs_type
== PIPE_SHADER_GEOMETRY
?
1463 max_gsprims
* gs_num_invocations
* gs_sel
->gs_max_out_vertices
:
1465 assert(max_out_vertices
<= 256);
1467 unsigned prim_amp_factor
= 1;
1468 if (gs_type
== PIPE_SHADER_GEOMETRY
) {
1469 /* Number of output primitives per GS input primitive after
1471 prim_amp_factor
= gs_sel
->gs_max_out_vertices
;
1474 /* The GE only checks against the maximum number of ES verts after
1475 * allocating a full GS primitive. So we need to ensure that whenever
1476 * this check passes, there is enough space for a full primitive without
1479 shader
->ngg
.hw_max_esverts
= max_esverts
- max_verts_per_prim
+ 1;
1480 shader
->ngg
.max_gsprims
= max_gsprims
;
1481 shader
->ngg
.max_out_verts
= max_out_vertices
;
1482 shader
->ngg
.prim_amp_factor
= prim_amp_factor
;
1483 shader
->ngg
.max_vert_out_per_gs_instance
= max_vert_out_per_gs_instance
;
1485 shader
->gs_info
.esgs_ring_size
= 4 * max_esverts
* esvert_lds_size
;
1486 shader
->ngg
.ngg_emit_size
= max_gsprims
* gsprim_lds_size
;
1488 assert(shader
->ngg
.hw_max_esverts
>= 24); /* HW limitation */