a25c89bac56a903e282366ba7513c2fe789bb689
[mesa.git] / src / gallium / drivers / radeonsi / gfx10_shader_ngg.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_shader_internal.h"
26
27 #include "sid.h"
28
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31
32 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
33 {
34 return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
35 }
36
37 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
38 {
39 return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
40 }
41
42 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
43 {
44 LLVMBuilderRef builder = ctx->ac.builder;
45 LLVMValueRef tmp;
46 tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
47 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
48 return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
49 }
50
51 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
52 {
53 return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
54 }
55
56 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
57 {
58 return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
59 }
60
61 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
62 {
63 return si_unpack_param(ctx, ctx->gs_tg_info, 0, 12);
64 }
65
66 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
67 {
68 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
69
70 return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
71 LLVMConstInt(ctx->i32, GFX10_GS_QUERY_BUF, false));
72 }
73
74 static LLVMValueRef ngg_get_initial_edgeflag(struct si_shader_context *ctx, unsigned index)
75 {
76 if (ctx->type == PIPE_SHADER_VERTEX) {
77 LLVMValueRef tmp;
78 tmp = LLVMBuildLShr(ctx->ac.builder,
79 ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
80 LLVMConstInt(ctx->ac.i32, 8 + index, false), "");
81 return LLVMBuildTrunc(ctx->ac.builder, tmp, ctx->ac.i1, "");
82 }
83 return ctx->i1false;
84 }
85
86 /**
87 * Return the number of vertices as a constant in \p num_vertices,
88 * and return a more precise value as LLVMValueRef from the function.
89 */
90 static LLVMValueRef ngg_get_vertices_per_prim(struct si_shader_context *ctx,
91 unsigned *num_vertices)
92 {
93 const struct si_shader_info *info = &ctx->shader->selector->info;
94
95 if (ctx->type == PIPE_SHADER_VERTEX) {
96 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
97 /* Blits always use axis-aligned rectangles with 3 vertices. */
98 *num_vertices = 3;
99 return LLVMConstInt(ctx->i32, 3, 0);
100 } else {
101 /* We always build up all three indices for the prim export
102 * independent of the primitive type. The additional garbage
103 * data shouldn't hurt. This number doesn't matter with
104 * NGG passthrough.
105 */
106 *num_vertices = 3;
107
108 /* Extract OUTPRIM field. */
109 LLVMValueRef num = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
110 return LLVMBuildAdd(ctx->ac.builder, num, ctx->i32_1, "");
111 }
112 } else {
113 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
114
115 if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
116 *num_vertices = 1;
117 else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
118 *num_vertices = 2;
119 else
120 *num_vertices = 3;
121
122 return LLVMConstInt(ctx->i32, *num_vertices, false);
123 }
124 }
125
126 bool gfx10_ngg_export_prim_early(struct si_shader *shader)
127 {
128 struct si_shader_selector *sel = shader->selector;
129
130 assert(shader->key.as_ngg && !shader->key.as_es);
131
132 return sel->type != PIPE_SHADER_GEOMETRY &&
133 !sel->info.writes_edgeflag;
134 }
135
136 void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx)
137 {
138 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
139 ngg_get_vtx_cnt(ctx),
140 ngg_get_prim_cnt(ctx));
141 }
142
143 void gfx10_ngg_build_export_prim(struct si_shader_context *ctx,
144 LLVMValueRef user_edgeflags[3])
145 {
146 if (gfx10_is_ngg_passthrough(ctx->shader)) {
147 ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
148 {
149 struct ac_ngg_prim prim = {};
150
151 prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
152 ac_build_export_prim(&ctx->ac, &prim);
153 }
154 ac_build_endif(&ctx->ac, 6001);
155 return;
156 }
157
158 ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
159 {
160 struct ac_ngg_prim prim = {};
161
162 ngg_get_vertices_per_prim(ctx, &prim.num_vertices);
163
164 prim.isnull = ctx->ac.i1false;
165 prim.index[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
166 prim.index[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
167 prim.index[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
168
169 for (unsigned i = 0; i < prim.num_vertices; ++i) {
170 prim.edgeflag[i] = ngg_get_initial_edgeflag(ctx, i);
171
172 if (ctx->shader->selector->info.writes_edgeflag) {
173 LLVMValueRef edge;
174
175 edge = LLVMBuildLoad(ctx->ac.builder, user_edgeflags[i], "");
176 edge = LLVMBuildAnd(ctx->ac.builder, prim.edgeflag[i], edge, "");
177 prim.edgeflag[i] = edge;
178 }
179 }
180
181 ac_build_export_prim(&ctx->ac, &prim);
182 }
183 ac_build_endif(&ctx->ac, 6001);
184 }
185
186 static void build_streamout_vertex(struct si_shader_context *ctx,
187 LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
188 unsigned stream, LLVMValueRef offset_vtx,
189 LLVMValueRef vertexptr)
190 {
191 struct si_shader_info *info = &ctx->shader->selector->info;
192 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
193 LLVMBuilderRef builder = ctx->ac.builder;
194 LLVMValueRef offset[4] = {};
195 LLVMValueRef tmp;
196
197 for (unsigned buffer = 0; buffer < 4; ++buffer) {
198 if (!wg_offset_dw[buffer])
199 continue;
200
201 tmp = LLVMBuildMul(builder, offset_vtx,
202 LLVMConstInt(ctx->i32, so->stride[buffer], false), "");
203 tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
204 offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 2, false), "");
205 }
206
207 for (unsigned i = 0; i < so->num_outputs; ++i) {
208 if (so->output[i].stream != stream)
209 continue;
210
211 unsigned reg = so->output[i].register_index;
212 struct si_shader_output_values out;
213 out.semantic_name = info->output_semantic_name[reg];
214 out.semantic_index = info->output_semantic_index[reg];
215
216 for (unsigned comp = 0; comp < 4; comp++) {
217 tmp = ac_build_gep0(&ctx->ac, vertexptr,
218 LLVMConstInt(ctx->i32, 4 * reg + comp, false));
219 out.values[comp] = LLVMBuildLoad(builder, tmp, "");
220 out.vertex_stream[comp] =
221 (info->output_streams[reg] >> (2 * comp)) & 3;
222 }
223
224 si_emit_streamout_output(ctx, so_buffer, offset, &so->output[i], &out);
225 }
226 }
227
228 struct ngg_streamout {
229 LLVMValueRef num_vertices;
230
231 /* per-thread data */
232 LLVMValueRef prim_enable[4]; /* i1 per stream */
233 LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
234
235 /* Output */
236 LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
237 };
238
239 /**
240 * Build streamout logic.
241 *
242 * Implies a barrier.
243 *
244 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
245 *
246 * Clobbers gs_ngg_scratch[8:].
247 */
248 static void build_streamout(struct si_shader_context *ctx,
249 struct ngg_streamout *nggso)
250 {
251 struct si_shader_info *info = &ctx->shader->selector->info;
252 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
253 LLVMBuilderRef builder = ctx->ac.builder;
254 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
255 LLVMValueRef tid = get_thread_id_in_tg(ctx);
256 LLVMValueRef tmp, tmp2;
257 LLVMValueRef i32_2 = LLVMConstInt(ctx->i32, 2, false);
258 LLVMValueRef i32_4 = LLVMConstInt(ctx->i32, 4, false);
259 LLVMValueRef i32_8 = LLVMConstInt(ctx->i32, 8, false);
260 LLVMValueRef so_buffer[4] = {};
261 unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
262 (nggso->vertices[2] ? 1 : 0);
263 LLVMValueRef prim_stride_dw[4] = {};
264 LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->i32);
265 int stream_for_buffer[4] = { -1, -1, -1, -1 };
266 unsigned bufmask_for_stream[4] = {};
267 bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
268 unsigned scratch_emit_base = isgs ? 4 : 0;
269 LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->i32_0;
270 unsigned scratch_offset_base = isgs ? 8 : 4;
271 LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
272
273 ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
274
275 /* Determine the mapping of streamout buffers to vertex streams. */
276 for (unsigned i = 0; i < so->num_outputs; ++i) {
277 unsigned buf = so->output[i].output_buffer;
278 unsigned stream = so->output[i].stream;
279 assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
280 stream_for_buffer[buf] = stream;
281 bufmask_for_stream[stream] |= 1 << buf;
282 }
283
284 for (unsigned buffer = 0; buffer < 4; ++buffer) {
285 if (stream_for_buffer[buffer] == -1)
286 continue;
287
288 assert(so->stride[buffer]);
289
290 tmp = LLVMConstInt(ctx->i32, so->stride[buffer], false);
291 prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
292 prim_stride_dw_vgpr = ac_build_writelane(
293 &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
294 LLVMConstInt(ctx->i32, buffer, false));
295
296 so_buffer[buffer] = ac_build_load_to_sgpr(
297 &ctx->ac, buf_ptr,
298 LLVMConstInt(ctx->i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
299 }
300
301 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->i32_0, "");
302 ac_build_ifcc(&ctx->ac, tmp, 5200);
303 {
304 LLVMTypeRef gdsptr = LLVMPointerType(ctx->i32, AC_ADDR_SPACE_GDS);
305 LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->i32_0, gdsptr, "");
306
307 /* Advance the streamout offsets in GDS. */
308 LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
309 LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
310
311 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
312 ac_build_ifcc(&ctx->ac, tmp, 5210);
313 {
314 if (isgs) {
315 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
316 tmp = LLVMBuildLoad(builder, tmp, "");
317 } else {
318 tmp = ac_build_writelane(&ctx->ac, ctx->i32_0,
319 ngg_get_prim_cnt(ctx), ctx->i32_0);
320 }
321 LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
322
323 unsigned swizzle[4];
324 int unused_stream = -1;
325 for (unsigned stream = 0; stream < 4; ++stream) {
326 if (!info->num_stream_output_components[stream]) {
327 unused_stream = stream;
328 break;
329 }
330 }
331 for (unsigned buffer = 0; buffer < 4; ++buffer) {
332 if (stream_for_buffer[buffer] >= 0) {
333 swizzle[buffer] = stream_for_buffer[buffer];
334 } else {
335 assert(unused_stream >= 0);
336 swizzle[buffer] = unused_stream;
337 }
338 }
339
340 tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
341 swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
342 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
343
344 LLVMValueRef args[] = {
345 LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
346 tmp,
347 ctx->i32_0, // ordering
348 ctx->i32_0, // scope
349 ctx->ac.i1false, // isVolatile
350 LLVMConstInt(ctx->i32, 4 << 24, false), // OA index
351 ctx->ac.i1true, // wave release
352 ctx->ac.i1true, // wave done
353 };
354 tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
355 ctx->i32, args, ARRAY_SIZE(args), 0);
356
357 /* Keep offsets in a VGPR for quick retrieval via readlane by
358 * the first wave for bounds checking, and also store in LDS
359 * for retrieval by all waves later. */
360 LLVMBuildStore(builder, tmp, offsets_vgpr);
361
362 tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
363 scratch_offset_basev, "");
364 tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
365 LLVMBuildStore(builder, tmp, tmp2);
366 }
367 ac_build_endif(&ctx->ac, 5210);
368
369 /* Determine the max emit per buffer. This is done via the SALU, in part
370 * because LLVM can't generate divide-by-multiply if we try to do this
371 * via VALU with one lane per buffer.
372 */
373 LLVMValueRef max_emit[4] = {};
374 for (unsigned buffer = 0; buffer < 4; ++buffer) {
375 if (stream_for_buffer[buffer] == -1)
376 continue;
377
378 LLVMValueRef bufsize_dw =
379 LLVMBuildLShr(builder,
380 LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
381 i32_2, "");
382
383 tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
384 LLVMValueRef offset_dw =
385 ac_build_readlane(&ctx->ac, tmp,
386 LLVMConstInt(ctx->i32, buffer, false));
387
388 tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
389 tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
390
391 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
392 max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->i32_0, tmp, "");
393 }
394
395 /* Determine the number of emitted primitives per stream and fixup the
396 * GDS counter if necessary.
397 *
398 * This is complicated by the fact that a single stream can emit to
399 * multiple buffers (but luckily not vice versa).
400 */
401 LLVMValueRef emit_vgpr = ctx->i32_0;
402
403 for (unsigned stream = 0; stream < 4; ++stream) {
404 if (!info->num_stream_output_components[stream])
405 continue;
406
407 tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
408 LLVMValueRef generated =
409 ac_build_readlane(&ctx->ac, tmp,
410 LLVMConstInt(ctx->i32, stream, false));
411
412 LLVMValueRef emit = generated;
413 for (unsigned buffer = 0; buffer < 4; ++buffer) {
414 if (stream_for_buffer[buffer] == stream)
415 emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
416 }
417
418 emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
419 LLVMConstInt(ctx->i32, stream, false));
420
421 /* Fixup the offset using a plain GDS atomic if we overflowed. */
422 tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
423 ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
424 tmp = LLVMBuildLShr(builder,
425 LLVMConstInt(ctx->i32, bufmask_for_stream[stream], false),
426 ac_get_thread_id(&ctx->ac), "");
427 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
428 ac_build_ifcc(&ctx->ac, tmp, 5222);
429 {
430 tmp = LLVMBuildSub(builder, generated, emit, "");
431 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
432 tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
433 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
434 LLVMAtomicOrderingMonotonic, false);
435 }
436 ac_build_endif(&ctx->ac, 5222);
437 ac_build_endif(&ctx->ac, 5221);
438 }
439
440 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
441 ac_build_ifcc(&ctx->ac, tmp, 5225);
442 {
443 tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
444 scratch_emit_basev, "");
445 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
446 LLVMBuildStore(builder, emit_vgpr, tmp);
447 }
448 ac_build_endif(&ctx->ac, 5225);
449 }
450 ac_build_endif(&ctx->ac, 5200);
451
452 /* Determine the workgroup-relative per-thread / primitive offset into
453 * the streamout buffers */
454 struct ac_wg_scan primemit_scan[4] = {};
455
456 if (isgs) {
457 for (unsigned stream = 0; stream < 4; ++stream) {
458 if (!info->num_stream_output_components[stream])
459 continue;
460
461 primemit_scan[stream].enable_exclusive = true;
462 primemit_scan[stream].op = nir_op_iadd;
463 primemit_scan[stream].src = nggso->prim_enable[stream];
464 primemit_scan[stream].scratch =
465 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
466 LLVMConstInt(ctx->i32, 12 + 8 * stream, false));
467 primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
468 primemit_scan[stream].numwaves = get_tgsize(ctx);
469 primemit_scan[stream].maxwaves = 8;
470 ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
471 }
472 }
473
474 ac_build_s_barrier(&ctx->ac);
475
476 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
477 LLVMValueRef wgoffset_dw[4] = {};
478
479 {
480 LLVMValueRef scratch_vgpr;
481
482 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
483 scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
484
485 for (unsigned buffer = 0; buffer < 4; ++buffer) {
486 if (stream_for_buffer[buffer] >= 0) {
487 wgoffset_dw[buffer] = ac_build_readlane(
488 &ctx->ac, scratch_vgpr,
489 LLVMConstInt(ctx->i32, scratch_offset_base + buffer, false));
490 }
491 }
492
493 for (unsigned stream = 0; stream < 4; ++stream) {
494 if (info->num_stream_output_components[stream]) {
495 nggso->emit[stream] = ac_build_readlane(
496 &ctx->ac, scratch_vgpr,
497 LLVMConstInt(ctx->i32, scratch_emit_base + stream, false));
498 }
499 }
500 }
501
502 /* Write out primitive data */
503 for (unsigned stream = 0; stream < 4; ++stream) {
504 if (!info->num_stream_output_components[stream])
505 continue;
506
507 if (isgs) {
508 ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
509 } else {
510 primemit_scan[stream].result_exclusive = tid;
511 }
512
513 tmp = LLVMBuildICmp(builder, LLVMIntULT,
514 primemit_scan[stream].result_exclusive,
515 nggso->emit[stream], "");
516 tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
517 ac_build_ifcc(&ctx->ac, tmp, 5240);
518 {
519 LLVMValueRef offset_vtx =
520 LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
521 nggso->num_vertices, "");
522
523 for (unsigned i = 0; i < max_num_vertices; ++i) {
524 tmp = LLVMBuildICmp(builder, LLVMIntULT,
525 LLVMConstInt(ctx->i32, i, false),
526 nggso->num_vertices, "");
527 ac_build_ifcc(&ctx->ac, tmp, 5241);
528 build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
529 stream, offset_vtx, nggso->vertices[i]);
530 ac_build_endif(&ctx->ac, 5241);
531 offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->i32_1, "");
532 }
533 }
534 ac_build_endif(&ctx->ac, 5240);
535 }
536 }
537
538 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
539 {
540 unsigned lds_vertex_size = 0;
541
542 /* The edgeflag is always stored in the last element that's also
543 * used for padding to reduce LDS bank conflicts. */
544 if (shader->selector->so.num_outputs)
545 lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
546 if (shader->selector->info.writes_edgeflag)
547 lds_vertex_size = MAX2(lds_vertex_size, 1);
548
549 /* LDS size for passing data from GS to ES.
550 * GS stores Primitive IDs into LDS at the address corresponding
551 * to the ES thread of the provoking vertex. All ES threads
552 * load and export PrimitiveID for their thread.
553 */
554 if (shader->selector->type == PIPE_SHADER_VERTEX &&
555 shader->key.mono.u.vs_export_prim_id)
556 lds_vertex_size = MAX2(lds_vertex_size, 1);
557
558 return lds_vertex_size;
559 }
560
561 /**
562 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
563 * for the vertex outputs.
564 */
565 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
566 LLVMValueRef vtxid)
567 {
568 /* The extra dword is used to avoid LDS bank conflicts. */
569 unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
570 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, vertex_size);
571 LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
572 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
573 return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
574 }
575
576 /**
577 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
578 */
579 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
580 unsigned max_outputs,
581 LLVMValueRef *addrs)
582 {
583 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
584 struct si_shader_selector *sel = ctx->shader->selector;
585 struct si_shader_info *info = &sel->info;
586 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
587 LLVMBuilderRef builder = ctx->ac.builder;
588 LLVMValueRef tmp, tmp2;
589
590 assert(!ctx->shader->is_gs_copy_shader);
591 assert(info->num_outputs <= max_outputs);
592
593 LLVMValueRef vertex_ptr = NULL;
594
595 if (sel->so.num_outputs || sel->info.writes_edgeflag)
596 vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
597
598 for (unsigned i = 0; i < info->num_outputs; i++) {
599 outputs[i].semantic_name = info->output_semantic_name[i];
600 outputs[i].semantic_index = info->output_semantic_index[i];
601
602 for (unsigned j = 0; j < 4; j++) {
603 outputs[i].vertex_stream[j] =
604 (info->output_streams[i] >> (2 * j)) & 3;
605
606 /* TODO: we may store more outputs than streamout needs,
607 * but streamout performance isn't that important.
608 */
609 if (sel->so.num_outputs) {
610 tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
611 LLVMConstInt(ctx->i32, 4 * i + j, false));
612 tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
613 tmp2 = ac_to_integer(&ctx->ac, tmp2);
614 LLVMBuildStore(builder, tmp2, tmp);
615 }
616 }
617
618 /* Store the edgeflag at the end (if streamout is enabled) */
619 if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG &&
620 sel->info.writes_edgeflag) {
621 LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
622 /* The output is a float, but the hw expects a 1-bit integer. */
623 edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->i32, "");
624 edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->i32_1);
625
626 tmp = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
627 tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
628 LLVMBuildStore(builder, edgeflag, tmp);
629 }
630 }
631
632 bool unterminated_es_if_block =
633 gfx10_is_ngg_passthrough(ctx->shader) &&
634 !ctx->screen->use_ngg_streamout && /* no query buffer */
635 (ctx->type != PIPE_SHADER_VERTEX ||
636 !ctx->shader->key.mono.u.vs_export_prim_id);
637
638 if (!unterminated_es_if_block)
639 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
640
641 LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
642 LLVMValueRef is_es_thread = si_is_es_thread(ctx);
643 LLVMValueRef vtxindex[] = {
644 si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16),
645 si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16),
646 si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16),
647 };
648
649 /* Determine the number of vertices per primitive. */
650 unsigned num_vertices;
651 LLVMValueRef num_vertices_val = ngg_get_vertices_per_prim(ctx, &num_vertices);
652
653 /* Streamout */
654 LLVMValueRef emitted_prims = NULL;
655
656 if (sel->so.num_outputs) {
657 assert(!unterminated_es_if_block);
658
659 struct ngg_streamout nggso = {};
660 nggso.num_vertices = num_vertices_val;
661 nggso.prim_enable[0] = is_gs_thread;
662
663 for (unsigned i = 0; i < num_vertices; ++i)
664 nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
665
666 build_streamout(ctx, &nggso);
667 emitted_prims = nggso.emit[0];
668 }
669
670 LLVMValueRef user_edgeflags[3] = {};
671
672 if (sel->info.writes_edgeflag) {
673 assert(!unterminated_es_if_block);
674
675 /* Streamout already inserted the barrier, so don't insert it again. */
676 if (!sel->so.num_outputs)
677 ac_build_s_barrier(&ctx->ac);
678
679 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
680 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
681 for (unsigned i = 0; i < num_vertices; i++) {
682 tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
683 tmp2 = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
684 tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
685 tmp = LLVMBuildLoad(builder, tmp, "");
686 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
687
688 user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
689 LLVMBuildStore(builder, tmp, user_edgeflags[i]);
690 }
691 ac_build_endif(&ctx->ac, 5400);
692 }
693
694 /* Copy Primitive IDs from GS threads to the LDS address corresponding
695 * to the ES thread of the provoking vertex.
696 */
697 if (ctx->type == PIPE_SHADER_VERTEX &&
698 ctx->shader->key.mono.u.vs_export_prim_id) {
699 assert(!unterminated_es_if_block);
700
701 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
702 if (sel->so.num_outputs || sel->info.writes_edgeflag)
703 ac_build_s_barrier(&ctx->ac);
704
705 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
706 /* Extract the PROVOKING_VTX_INDEX field. */
707 LLVMValueRef provoking_vtx_in_prim =
708 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
709
710 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
711 LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
712 LLVMValueRef provoking_vtx_index =
713 LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
714 LLVMValueRef vertex_ptr = ngg_nogs_vertex_ptr(ctx, provoking_vtx_index);
715
716 LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
717 ac_build_gep0(&ctx->ac, vertex_ptr, ctx->i32_0));
718 ac_build_endif(&ctx->ac, 5400);
719 }
720
721 /* Update query buffer */
722 if (ctx->screen->use_ngg_streamout &&
723 !info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
724 assert(!unterminated_es_if_block);
725
726 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
727 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
728 ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
729 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
730 ac_build_ifcc(&ctx->ac, tmp, 5030);
731 tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
732 sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
733 ac_build_ifcc(&ctx->ac, tmp, 5031);
734 {
735 LLVMValueRef args[] = {
736 ngg_get_prim_cnt(ctx),
737 ngg_get_query_buf(ctx),
738 LLVMConstInt(ctx->i32, 16, false), /* offset of stream[0].generated_primitives */
739 ctx->i32_0, /* soffset */
740 ctx->i32_0, /* cachepolicy */
741 };
742
743 if (sel->so.num_outputs) {
744 args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->i32_1);
745 args[2] = ac_build_writelane(&ctx->ac, args[2],
746 LLVMConstInt(ctx->i32, 24, false), ctx->i32_1);
747 }
748
749 /* TODO: should this be 64-bit atomics? */
750 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
751 ctx->i32, args, 5, 0);
752 }
753 ac_build_endif(&ctx->ac, 5031);
754 ac_build_endif(&ctx->ac, 5030);
755 ac_build_endif(&ctx->ac, 5029);
756 }
757
758 /* Build the primitive export. */
759 if (!gfx10_ngg_export_prim_early(ctx->shader)) {
760 assert(!unterminated_es_if_block);
761 gfx10_ngg_build_export_prim(ctx, user_edgeflags);
762 }
763
764 /* Export per-vertex data (positions and parameters). */
765 if (!unterminated_es_if_block)
766 ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
767 {
768 unsigned i;
769
770 /* Unconditionally (re-)load the values for proper SSA form. */
771 for (i = 0; i < info->num_outputs; i++) {
772 for (unsigned j = 0; j < 4; j++) {
773 outputs[i].values[j] =
774 LLVMBuildLoad(builder,
775 addrs[4 * i + j],
776 "");
777 }
778 }
779
780 if (ctx->shader->key.mono.u.vs_export_prim_id) {
781 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
782 outputs[i].semantic_index = 0;
783
784 if (ctx->type == PIPE_SHADER_VERTEX) {
785 /* Wait for GS stores to finish. */
786 ac_build_s_barrier(&ctx->ac);
787
788 tmp = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
789 tmp = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
790 outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
791 } else {
792 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
793 outputs[i].values[0] = si_get_primitive_id(ctx, 0);
794 }
795
796 outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
797 for (unsigned j = 1; j < 4; j++)
798 outputs[i].values[j] = LLVMGetUndef(ctx->f32);
799
800 memset(outputs[i].vertex_stream, 0,
801 sizeof(outputs[i].vertex_stream));
802 i++;
803 }
804
805 si_llvm_export_vs(ctx, outputs, i);
806 }
807 ac_build_endif(&ctx->ac, 6002);
808 }
809
810 static LLVMValueRef
811 ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
812 {
813 const struct si_shader_selector *sel = ctx->shader->selector;
814 const struct si_shader_info *info = &sel->info;
815
816 LLVMTypeRef elements[2] = {
817 LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
818 LLVMArrayType(ctx->ac.i8, 4),
819 };
820 LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
821 type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
822 return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
823 }
824
825 /**
826 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
827 * is in emit order; that is:
828 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
829 * - during vertex emit, i.e. while the API GS shader invocation is running,
830 * N = threadidx * gs_max_out_vertices + emitidx
831 *
832 * Goals of the LDS memory layout:
833 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
834 * in uniform control flow
835 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
836 * culling
837 * 3. Agnostic to the number of waves (since we don't know it before compiling)
838 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
839 * 5. Avoid wasting memory.
840 *
841 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
842 * layout, elimination of bank conflicts requires that each vertex occupy an
843 * odd number of dwords. We use the additional dword to store the output stream
844 * index as well as a flag to indicate whether this vertex ends a primitive
845 * for rasterization.
846 *
847 * Swizzling is required to satisfy points 1 and 2 simultaneously.
848 *
849 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
850 * Indices are swizzled in groups of 32, which ensures point 1 without
851 * disturbing point 2.
852 *
853 * \return an LDS pointer to type {[N x i32], [4 x i8]}
854 */
855 static LLVMValueRef
856 ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
857 {
858 struct si_shader_selector *sel = ctx->shader->selector;
859 LLVMBuilderRef builder = ctx->ac.builder;
860 LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
861
862 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
863 unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
864 if (write_stride_2exp) {
865 LLVMValueRef row =
866 LLVMBuildLShr(builder, vertexidx,
867 LLVMConstInt(ctx->ac.i32, 5, false), "");
868 LLVMValueRef swizzle =
869 LLVMBuildAnd(builder, row,
870 LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
871 false), "");
872 vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
873 }
874
875 return ac_build_gep0(&ctx->ac, storage, vertexidx);
876 }
877
878 static LLVMValueRef
879 ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
880 LLVMValueRef emitidx)
881 {
882 struct si_shader_selector *sel = ctx->shader->selector;
883 LLVMBuilderRef builder = ctx->ac.builder;
884 LLVMValueRef tmp;
885
886 tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
887 tmp = LLVMBuildMul(builder, tmp, gsthread, "");
888 const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
889 return ngg_gs_vertex_ptr(ctx, vertexidx);
890 }
891
892 static LLVMValueRef
893 ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
894 unsigned out_idx)
895 {
896 LLVMValueRef gep_idx[3] = {
897 ctx->ac.i32_0, /* implied C-style array */
898 ctx->ac.i32_0, /* first struct entry */
899 LLVMConstInt(ctx->ac.i32, out_idx, false),
900 };
901 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
902 }
903
904 static LLVMValueRef
905 ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
906 unsigned stream)
907 {
908 LLVMValueRef gep_idx[3] = {
909 ctx->ac.i32_0, /* implied C-style array */
910 ctx->ac.i32_1, /* second struct entry */
911 LLVMConstInt(ctx->ac.i32, stream, false),
912 };
913 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
914 }
915
916 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
917 unsigned stream,
918 LLVMValueRef *addrs)
919 {
920 const struct si_shader_selector *sel = ctx->shader->selector;
921 const struct si_shader_info *info = &sel->info;
922 LLVMBuilderRef builder = ctx->ac.builder;
923 LLVMValueRef tmp;
924 const LLVMValueRef vertexidx =
925 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
926
927 /* If this thread has already emitted the declared maximum number of
928 * vertices, skip the write: excessive vertex emissions are not
929 * supposed to have any effect.
930 */
931 const LLVMValueRef can_emit =
932 LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
933 LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
934
935 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
936 tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
937 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
938
939 ac_build_ifcc(&ctx->ac, can_emit, 9001);
940
941 const LLVMValueRef vertexptr =
942 ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
943 unsigned out_idx = 0;
944 for (unsigned i = 0; i < info->num_outputs; i++) {
945 for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
946 if (!(info->output_usagemask[i] & (1 << chan)) ||
947 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
948 continue;
949
950 LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
951 out_val = ac_to_integer(&ctx->ac, out_val);
952 LLVMBuildStore(builder, out_val,
953 ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
954 }
955 }
956 assert(out_idx * 4 == sel->gsvs_vertex_size);
957
958 /* Determine and store whether this vertex completed a primitive. */
959 const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
960
961 tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
962 const LLVMValueRef iscompleteprim =
963 LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
964
965 /* Since the geometry shader emits triangle strips, we need to
966 * track which primitive is odd and swap vertex indices to get
967 * the correct vertex order.
968 */
969 LLVMValueRef is_odd = ctx->i1false;
970 if (stream == 0 && u_vertices_per_prim(sel->gs_output_prim) == 3) {
971 tmp = LLVMBuildAnd(builder, curverts, ctx->i32_1, "");
972 is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->i32_1, "");
973 }
974
975 tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
976 LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
977
978 /* The per-vertex primitive flag encoding:
979 * bit 0: whether this vertex finishes a primitive
980 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
981 */
982 tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
983 tmp = LLVMBuildOr(builder, tmp,
984 LLVMBuildShl(builder,
985 LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""),
986 ctx->ac.i8_1, ""), "");
987 LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
988
989 tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
990 tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
991 LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
992
993 ac_build_endif(&ctx->ac, 9001);
994 }
995
996 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
997 {
998 /* Zero out the part of LDS scratch that is used to accumulate the
999 * per-stream generated primitive count.
1000 */
1001 LLVMBuilderRef builder = ctx->ac.builder;
1002 LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
1003 LLVMValueRef tid = get_thread_id_in_tg(ctx);
1004 LLVMValueRef tmp;
1005
1006 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->i32, 4, false), "");
1007 ac_build_ifcc(&ctx->ac, tmp, 5090);
1008 {
1009 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
1010 LLVMBuildStore(builder, ctx->i32_0, ptr);
1011 }
1012 ac_build_endif(&ctx->ac, 5090);
1013
1014 ac_build_s_barrier(&ctx->ac);
1015 }
1016
1017 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
1018 {
1019 const struct si_shader_selector *sel = ctx->shader->selector;
1020 const struct si_shader_info *info = &sel->info;
1021 const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
1022 LLVMBuilderRef builder = ctx->ac.builder;
1023 LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
1024 LLVMValueRef tmp, tmp2;
1025
1026 /* Zero out remaining (non-emitted) primitive flags.
1027 *
1028 * Note: Alternatively, we could pass the relevant gs_next_vertex to
1029 * the emit threads via LDS. This is likely worse in the expected
1030 * typical case where each GS thread emits the full set of
1031 * vertices.
1032 */
1033 for (unsigned stream = 0; stream < 4; ++stream) {
1034 if (!info->num_stream_output_components[stream])
1035 continue;
1036
1037 const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
1038
1039 ac_build_bgnloop(&ctx->ac, 5100);
1040
1041 const LLVMValueRef vertexidx =
1042 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1043 tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
1044 LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
1045 ac_build_ifcc(&ctx->ac, tmp, 5101);
1046 ac_build_break(&ctx->ac);
1047 ac_build_endif(&ctx->ac, 5101);
1048
1049 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1050 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1051
1052 tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
1053 LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
1054
1055 ac_build_endloop(&ctx->ac, 5100);
1056 }
1057
1058 /* Accumulate generated primitives counts across the entire threadgroup. */
1059 for (unsigned stream = 0; stream < 4; ++stream) {
1060 if (!info->num_stream_output_components[stream])
1061 continue;
1062
1063 LLVMValueRef numprims =
1064 LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1065 numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
1066
1067 tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->i32_0, "");
1068 ac_build_ifcc(&ctx->ac, tmp, 5105);
1069 {
1070 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
1071 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
1072 LLVMConstInt(ctx->i32, stream, false)),
1073 numprims, LLVMAtomicOrderingMonotonic, false);
1074 }
1075 ac_build_endif(&ctx->ac, 5105);
1076 }
1077
1078 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1079
1080 ac_build_s_barrier(&ctx->ac);
1081
1082 const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1083 LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1084
1085 /* Streamout */
1086 if (sel->so.num_outputs) {
1087 struct ngg_streamout nggso = {};
1088
1089 nggso.num_vertices = LLVMConstInt(ctx->i32, verts_per_prim, false);
1090
1091 LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1092 for (unsigned stream = 0; stream < 4; ++stream) {
1093 if (!info->num_stream_output_components[stream])
1094 continue;
1095
1096 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
1097 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1098 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1099 nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1100 }
1101
1102 for (unsigned i = 0; i < verts_per_prim; ++i) {
1103 tmp = LLVMBuildSub(builder, tid,
1104 LLVMConstInt(ctx->i32, verts_per_prim - i - 1, false), "");
1105 tmp = ngg_gs_vertex_ptr(ctx, tmp);
1106 nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
1107 }
1108
1109 build_streamout(ctx, &nggso);
1110 }
1111
1112 /* Write shader query data. */
1113 if (ctx->screen->use_ngg_streamout) {
1114 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1115 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1116 ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1117 unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1118 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1119 LLVMConstInt(ctx->i32, num_query_comps, false), "");
1120 ac_build_ifcc(&ctx->ac, tmp, 5110);
1121 {
1122 LLVMValueRef offset;
1123 tmp = tid;
1124 if (sel->so.num_outputs)
1125 tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->i32, 3, false), "");
1126 offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 32, false), "");
1127 if (sel->so.num_outputs) {
1128 tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->i32, 2, false), "");
1129 tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 8, false), "");
1130 offset = LLVMBuildAdd(builder, offset, tmp, "");
1131 }
1132
1133 tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1134 LLVMValueRef args[] = {
1135 tmp,
1136 ngg_get_query_buf(ctx),
1137 offset,
1138 LLVMConstInt(ctx->i32, 16, false), /* soffset */
1139 ctx->i32_0, /* cachepolicy */
1140 };
1141 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1142 ctx->i32, args, 5, 0);
1143 }
1144 ac_build_endif(&ctx->ac, 5110);
1145 ac_build_endif(&ctx->ac, 5109);
1146 }
1147
1148 /* Determine vertex liveness. */
1149 LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
1150
1151 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1152 ac_build_ifcc(&ctx->ac, tmp, 5120);
1153 {
1154 for (unsigned i = 0; i < verts_per_prim; ++i) {
1155 const LLVMValueRef primidx =
1156 LLVMBuildAdd(builder, tid,
1157 LLVMConstInt(ctx->ac.i32, i, false), "");
1158
1159 if (i > 0) {
1160 tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1161 ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1162 }
1163
1164 /* Load primitive liveness */
1165 tmp = ngg_gs_vertex_ptr(ctx, primidx);
1166 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1167 const LLVMValueRef primlive =
1168 LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1169
1170 tmp = LLVMBuildLoad(builder, vertliveptr, "");
1171 tmp = LLVMBuildOr(builder, tmp, primlive, ""),
1172 LLVMBuildStore(builder, tmp, vertliveptr);
1173
1174 if (i > 0)
1175 ac_build_endif(&ctx->ac, 5121 + i);
1176 }
1177 }
1178 ac_build_endif(&ctx->ac, 5120);
1179
1180 /* Inclusive scan addition across the current wave. */
1181 LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1182 struct ac_wg_scan vertlive_scan = {};
1183 vertlive_scan.op = nir_op_iadd;
1184 vertlive_scan.enable_reduce = true;
1185 vertlive_scan.enable_exclusive = true;
1186 vertlive_scan.src = vertlive;
1187 vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
1188 vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1189 vertlive_scan.numwaves = get_tgsize(ctx);
1190 vertlive_scan.maxwaves = 8;
1191
1192 ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1193
1194 /* Skip all exports (including index exports) when possible. At least on
1195 * early gfx10 revisions this is also to avoid hangs.
1196 */
1197 LLVMValueRef have_exports =
1198 LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1199 num_emit_threads =
1200 LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1201
1202 /* Allocate export space. Send this message as early as possible, to
1203 * hide the latency of the SQ <-> SPI roundtrip.
1204 *
1205 * Note: We could consider compacting primitives for export as well.
1206 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1207 * prim data per clock and skips null primitives at no additional
1208 * cost. So compacting primitives can only be beneficial when
1209 * there are 4 or more contiguous null primitives in the export
1210 * (in the common case of single-dword prim exports).
1211 */
1212 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
1213 vertlive_scan.result_reduce, num_emit_threads);
1214
1215 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1216 * of the primitive liveness flags, relying on the fact that each
1217 * threadgroup can have at most 256 threads. */
1218 ac_build_ifcc(&ctx->ac, vertlive, 5130);
1219 {
1220 tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1221 tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1222 LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
1223 }
1224 ac_build_endif(&ctx->ac, 5130);
1225
1226 ac_build_s_barrier(&ctx->ac);
1227
1228 /* Export primitive data */
1229 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1230 ac_build_ifcc(&ctx->ac, tmp, 5140);
1231 {
1232 LLVMValueRef flags;
1233 struct ac_ngg_prim prim = {};
1234 prim.num_vertices = verts_per_prim;
1235
1236 tmp = ngg_gs_vertex_ptr(ctx, tid);
1237 flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1238 prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->i1, ""), "");
1239
1240 for (unsigned i = 0; i < verts_per_prim; ++i) {
1241 prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1242 LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1243 prim.edgeflag[i] = ctx->ac.i1false;
1244 }
1245
1246 /* Geometry shaders output triangle strips, but NGG expects triangles.
1247 * We need to change the vertex order for odd triangles to get correct
1248 * front/back facing by swapping 2 vertex indices, but we also have to
1249 * keep the provoking vertex in the same place.
1250 *
1251 * If the first vertex is provoking, swap index 1 and 2.
1252 * If the last vertex is provoking, swap index 0 and 1.
1253 */
1254 if (verts_per_prim == 3) {
1255 LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
1256 is_odd = LLVMBuildTrunc(builder, is_odd, ctx->i1, "");
1257 LLVMValueRef flatshade_first =
1258 LLVMBuildICmp(builder, LLVMIntEQ,
1259 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2),
1260 ctx->i32_0, "");
1261
1262 struct ac_ngg_prim in = prim;
1263 prim.index[0] = LLVMBuildSelect(builder, flatshade_first,
1264 in.index[0],
1265 LLVMBuildSelect(builder, is_odd,
1266 in.index[1], in.index[0], ""), "");
1267 prim.index[1] = LLVMBuildSelect(builder, flatshade_first,
1268 LLVMBuildSelect(builder, is_odd,
1269 in.index[2], in.index[1], ""),
1270 LLVMBuildSelect(builder, is_odd,
1271 in.index[0], in.index[1], ""), "");
1272 prim.index[2] = LLVMBuildSelect(builder, flatshade_first,
1273 LLVMBuildSelect(builder, is_odd,
1274 in.index[1], in.index[2], ""),
1275 in.index[2], "");
1276 }
1277
1278 ac_build_export_prim(&ctx->ac, &prim);
1279 }
1280 ac_build_endif(&ctx->ac, 5140);
1281
1282 /* Export position and parameter data */
1283 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1284 ac_build_ifcc(&ctx->ac, tmp, 5145);
1285 {
1286 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1287
1288 tmp = ngg_gs_vertex_ptr(ctx, tid);
1289 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
1290 tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1291 const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1292
1293 unsigned out_idx = 0;
1294 for (unsigned i = 0; i < info->num_outputs; i++) {
1295 outputs[i].semantic_name = info->output_semantic_name[i];
1296 outputs[i].semantic_index = info->output_semantic_index[i];
1297
1298 for (unsigned j = 0; j < 4; j++, out_idx++) {
1299 tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
1300 tmp = LLVMBuildLoad(builder, tmp, "");
1301 outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1302 outputs[i].vertex_stream[j] =
1303 (info->output_streams[i] >> (2 * j)) & 3;
1304 }
1305 }
1306
1307 si_llvm_export_vs(ctx, outputs, info->num_outputs);
1308 }
1309 ac_build_endif(&ctx->ac, 5145);
1310 }
1311
1312 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1313 unsigned min_verts_per_prim, bool use_adjacency)
1314 {
1315 unsigned max_reuse = max_esverts - min_verts_per_prim;
1316 if (use_adjacency)
1317 max_reuse /= 2;
1318 *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1319 }
1320
1321 /**
1322 * Determine subgroup information like maximum number of vertices and prims.
1323 *
1324 * This happens before the shader is uploaded, since LDS relocations during
1325 * upload depend on the subgroup size.
1326 */
1327 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1328 {
1329 const struct si_shader_selector *gs_sel = shader->selector;
1330 const struct si_shader_selector *es_sel =
1331 shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1332 const enum pipe_shader_type gs_type = gs_sel->type;
1333 const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1334 const unsigned input_prim = si_get_input_prim(gs_sel);
1335 const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
1336 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1337 const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1338 const unsigned min_verts_per_prim =
1339 gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1340
1341 /* All these are in dwords: */
1342 /* We can't allow using the whole LDS, because GS waves compete with
1343 * other shader stages for LDS space.
1344 *
1345 * TODO: We should really take the shader's internal LDS use into
1346 * account. The linker will fail if the size is greater than
1347 * 8K dwords.
1348 */
1349 const unsigned max_lds_size = 8 * 1024 - 768;
1350 const unsigned target_lds_size = max_lds_size;
1351 unsigned esvert_lds_size = 0;
1352 unsigned gsprim_lds_size = 0;
1353
1354 /* All these are per subgroup: */
1355 bool max_vert_out_per_gs_instance = false;
1356 unsigned max_esverts_base = 128;
1357 unsigned max_gsprims_base = 128; /* default prim group size clamp */
1358
1359 /* Hardware has the following non-natural restrictions on the value
1360 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1361 * the draw:
1362 * - at most 252 for any line input primitive type
1363 * - at most 251 for any quad input primitive type
1364 * - at most 251 for triangle strips with adjacency (this happens to
1365 * be the natural limit for triangle *lists* with adjacency)
1366 */
1367 max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1368
1369 if (gs_type == PIPE_SHADER_GEOMETRY) {
1370 unsigned max_out_verts_per_gsprim =
1371 gs_sel->gs_max_out_vertices * gs_num_invocations;
1372
1373 if (max_out_verts_per_gsprim <= 256) {
1374 if (max_out_verts_per_gsprim) {
1375 max_gsprims_base = MIN2(max_gsprims_base,
1376 256 / max_out_verts_per_gsprim);
1377 }
1378 } else {
1379 /* Use special multi-cycling mode in which each GS
1380 * instance gets its own subgroup. Does not work with
1381 * tessellation. */
1382 max_vert_out_per_gs_instance = true;
1383 max_gsprims_base = 1;
1384 max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
1385 }
1386
1387 esvert_lds_size = es_sel->esgs_itemsize / 4;
1388 gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1389 } else {
1390 /* VS and TES. */
1391 /* LDS size for passing data from ES to GS. */
1392 esvert_lds_size = ngg_nogs_vertex_size(shader);
1393 }
1394
1395 unsigned max_gsprims = max_gsprims_base;
1396 unsigned max_esverts = max_esverts_base;
1397
1398 if (esvert_lds_size)
1399 max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1400 if (gsprim_lds_size)
1401 max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1402
1403 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1404 clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
1405 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1406
1407 if (esvert_lds_size || gsprim_lds_size) {
1408 /* Now that we have a rough proportionality between esverts
1409 * and gsprims based on the primitive type, scale both of them
1410 * down simultaneously based on required LDS space.
1411 *
1412 * We could be smarter about this if we knew how much vertex
1413 * reuse to expect.
1414 */
1415 unsigned lds_total = max_esverts * esvert_lds_size +
1416 max_gsprims * gsprim_lds_size;
1417 if (lds_total > target_lds_size) {
1418 max_esverts = max_esverts * target_lds_size / lds_total;
1419 max_gsprims = max_gsprims * target_lds_size / lds_total;
1420
1421 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1422 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1423 min_verts_per_prim, use_adjacency);
1424 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1425 }
1426 }
1427
1428 /* Round up towards full wave sizes for better ALU utilization. */
1429 if (!max_vert_out_per_gs_instance) {
1430 const unsigned wavesize = gs_sel->screen->ge_wave_size;
1431 unsigned orig_max_esverts;
1432 unsigned orig_max_gsprims;
1433 do {
1434 orig_max_esverts = max_esverts;
1435 orig_max_gsprims = max_gsprims;
1436
1437 max_esverts = align(max_esverts, wavesize);
1438 max_esverts = MIN2(max_esverts, max_esverts_base);
1439 if (esvert_lds_size)
1440 max_esverts = MIN2(max_esverts,
1441 (max_lds_size - max_gsprims * gsprim_lds_size) /
1442 esvert_lds_size);
1443 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1444
1445 max_gsprims = align(max_gsprims, wavesize);
1446 max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1447 if (gsprim_lds_size)
1448 max_gsprims = MIN2(max_gsprims,
1449 (max_lds_size - max_esverts * esvert_lds_size) /
1450 gsprim_lds_size);
1451 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1452 min_verts_per_prim, use_adjacency);
1453 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1454 } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1455 }
1456
1457 /* Hardware restriction: minimum value of max_esverts */
1458 max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
1459
1460 unsigned max_out_vertices =
1461 max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
1462 gs_type == PIPE_SHADER_GEOMETRY ?
1463 max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
1464 max_esverts;
1465 assert(max_out_vertices <= 256);
1466
1467 unsigned prim_amp_factor = 1;
1468 if (gs_type == PIPE_SHADER_GEOMETRY) {
1469 /* Number of output primitives per GS input primitive after
1470 * GS instancing. */
1471 prim_amp_factor = gs_sel->gs_max_out_vertices;
1472 }
1473
1474 /* The GE only checks against the maximum number of ES verts after
1475 * allocating a full GS primitive. So we need to ensure that whenever
1476 * this check passes, there is enough space for a full primitive without
1477 * vertex reuse.
1478 */
1479 shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1480 shader->ngg.max_gsprims = max_gsprims;
1481 shader->ngg.max_out_verts = max_out_vertices;
1482 shader->ngg.prim_amp_factor = prim_amp_factor;
1483 shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1484
1485 shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
1486 shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
1487
1488 assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
1489 }