radeonsi/gfx10: fix the vertex order for triangle strips emitted by a GS
[mesa.git] / src / gallium / drivers / radeonsi / gfx10_shader_ngg.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_shader_internal.h"
26
27 #include "sid.h"
28
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31
32 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
33 {
34 return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
35 }
36
37 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
38 {
39 return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
40 }
41
42 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
43 {
44 LLVMBuilderRef builder = ctx->ac.builder;
45 LLVMValueRef tmp;
46 tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
47 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
48 return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
49 }
50
51 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
52 {
53 return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
54 }
55
56 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
57 {
58 return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
59 }
60
61 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
62 {
63 return si_unpack_param(ctx, ctx->gs_tg_info, 0, 11);
64 }
65
66 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
67 {
68 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
69
70 return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
71 LLVMConstInt(ctx->i32, GFX10_GS_QUERY_BUF, false));
72 }
73
74 /* Send GS Alloc Req message from the first wave of the group to SPI.
75 * Message payload is:
76 * - bits 0..10: vertices in group
77 * - bits 12..22: primitives in group
78 */
79 static void build_sendmsg_gs_alloc_req(struct si_shader_context *ctx,
80 LLVMValueRef vtx_cnt,
81 LLVMValueRef prim_cnt)
82 {
83 LLVMBuilderRef builder = ctx->ac.builder;
84 LLVMValueRef tmp;
85
86 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
87 ac_build_ifcc(&ctx->ac, tmp, 5020);
88
89 tmp = LLVMBuildShl(builder, prim_cnt, LLVMConstInt(ctx->ac.i32, 12, false),"");
90 tmp = LLVMBuildOr(builder, tmp, vtx_cnt, "");
91 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_ALLOC_REQ, tmp);
92
93 ac_build_endif(&ctx->ac, 5020);
94 }
95
96 struct ngg_prim {
97 unsigned num_vertices;
98 LLVMValueRef isnull;
99 LLVMValueRef index[3];
100 LLVMValueRef edgeflag[3];
101 };
102
103 static void build_export_prim(struct si_shader_context *ctx,
104 const struct ngg_prim *prim)
105 {
106 LLVMBuilderRef builder = ctx->ac.builder;
107 struct ac_export_args args;
108 LLVMValueRef tmp;
109
110 tmp = LLVMBuildZExt(builder, prim->isnull, ctx->ac.i32, "");
111 args.out[0] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 31, false), "");
112
113 for (unsigned i = 0; i < prim->num_vertices; ++i) {
114 tmp = LLVMBuildShl(builder, prim->index[i],
115 LLVMConstInt(ctx->ac.i32, 10 * i, false), "");
116 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
117 tmp = LLVMBuildZExt(builder, prim->edgeflag[i], ctx->ac.i32, "");
118 tmp = LLVMBuildShl(builder, tmp,
119 LLVMConstInt(ctx->ac.i32, 10 * i + 9, false), "");
120 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
121 }
122
123 args.out[0] = LLVMBuildBitCast(builder, args.out[0], ctx->ac.f32, "");
124 args.out[1] = LLVMGetUndef(ctx->ac.f32);
125 args.out[2] = LLVMGetUndef(ctx->ac.f32);
126 args.out[3] = LLVMGetUndef(ctx->ac.f32);
127
128 args.target = V_008DFC_SQ_EXP_PRIM;
129 args.enabled_channels = 1;
130 args.done = true;
131 args.valid_mask = false;
132 args.compr = false;
133
134 ac_build_export(&ctx->ac, &args);
135 }
136
137 static void build_streamout_vertex(struct si_shader_context *ctx,
138 LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
139 unsigned stream, LLVMValueRef offset_vtx,
140 LLVMValueRef vertexptr)
141 {
142 struct tgsi_shader_info *info = &ctx->shader->selector->info;
143 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
144 LLVMBuilderRef builder = ctx->ac.builder;
145 LLVMValueRef offset[4] = {};
146 LLVMValueRef tmp;
147
148 for (unsigned buffer = 0; buffer < 4; ++buffer) {
149 if (!wg_offset_dw[buffer])
150 continue;
151
152 tmp = LLVMBuildMul(builder, offset_vtx,
153 LLVMConstInt(ctx->i32, so->stride[buffer], false), "");
154 tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
155 offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 2, false), "");
156 }
157
158 for (unsigned i = 0; i < so->num_outputs; ++i) {
159 if (so->output[i].stream != stream)
160 continue;
161
162 unsigned reg = so->output[i].register_index;
163 struct si_shader_output_values out;
164 out.semantic_name = info->output_semantic_name[reg];
165 out.semantic_index = info->output_semantic_index[reg];
166
167 for (unsigned comp = 0; comp < 4; comp++) {
168 tmp = ac_build_gep0(&ctx->ac, vertexptr,
169 LLVMConstInt(ctx->i32, 4 * reg + comp, false));
170 out.values[comp] = LLVMBuildLoad(builder, tmp, "");
171 out.vertex_stream[comp] =
172 (info->output_streams[reg] >> (2 * comp)) & 3;
173 }
174
175 si_emit_streamout_output(ctx, so_buffer, offset, &so->output[i], &out);
176 }
177 }
178
179 struct ngg_streamout {
180 LLVMValueRef num_vertices;
181
182 /* per-thread data */
183 LLVMValueRef prim_enable[4]; /* i1 per stream */
184 LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
185
186 /* Output */
187 LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
188 };
189
190 /**
191 * Build streamout logic.
192 *
193 * Implies a barrier.
194 *
195 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
196 *
197 * Clobbers gs_ngg_scratch[8:].
198 */
199 static void build_streamout(struct si_shader_context *ctx,
200 struct ngg_streamout *nggso)
201 {
202 struct tgsi_shader_info *info = &ctx->shader->selector->info;
203 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
204 LLVMBuilderRef builder = ctx->ac.builder;
205 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
206 LLVMValueRef tid = get_thread_id_in_tg(ctx);
207 LLVMValueRef tmp, tmp2;
208 LLVMValueRef i32_2 = LLVMConstInt(ctx->i32, 2, false);
209 LLVMValueRef i32_4 = LLVMConstInt(ctx->i32, 4, false);
210 LLVMValueRef i32_8 = LLVMConstInt(ctx->i32, 8, false);
211 LLVMValueRef so_buffer[4] = {};
212 unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
213 (nggso->vertices[2] ? 1 : 0);
214 LLVMValueRef prim_stride_dw[4] = {};
215 LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->i32);
216 int stream_for_buffer[4] = { -1, -1, -1, -1 };
217 unsigned bufmask_for_stream[4] = {};
218 bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
219 unsigned scratch_emit_base = isgs ? 4 : 0;
220 LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->i32_0;
221 unsigned scratch_offset_base = isgs ? 8 : 4;
222 LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
223
224 ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
225
226 /* Determine the mapping of streamout buffers to vertex streams. */
227 for (unsigned i = 0; i < so->num_outputs; ++i) {
228 unsigned buf = so->output[i].output_buffer;
229 unsigned stream = so->output[i].stream;
230 assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
231 stream_for_buffer[buf] = stream;
232 bufmask_for_stream[stream] |= 1 << buf;
233 }
234
235 for (unsigned buffer = 0; buffer < 4; ++buffer) {
236 if (stream_for_buffer[buffer] == -1)
237 continue;
238
239 assert(so->stride[buffer]);
240
241 tmp = LLVMConstInt(ctx->i32, so->stride[buffer], false);
242 prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
243 prim_stride_dw_vgpr = ac_build_writelane(
244 &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
245 LLVMConstInt(ctx->i32, buffer, false));
246
247 so_buffer[buffer] = ac_build_load_to_sgpr(
248 &ctx->ac, buf_ptr,
249 LLVMConstInt(ctx->i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
250 }
251
252 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->i32_0, "");
253 ac_build_ifcc(&ctx->ac, tmp, 5200);
254 {
255 LLVMTypeRef gdsptr = LLVMPointerType(ctx->i32, AC_ADDR_SPACE_GDS);
256 LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->i32_0, gdsptr, "");
257
258 /* Advance the streamout offsets in GDS. */
259 LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
260 LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
261
262 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
263 ac_build_ifcc(&ctx->ac, tmp, 5210);
264 {
265 if (isgs) {
266 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
267 tmp = LLVMBuildLoad(builder, tmp, "");
268 } else {
269 tmp = ac_build_writelane(&ctx->ac, ctx->i32_0,
270 ngg_get_prim_cnt(ctx), ctx->i32_0);
271 }
272 LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
273
274 unsigned swizzle[4];
275 int unused_stream = -1;
276 for (unsigned stream = 0; stream < 4; ++stream) {
277 if (!info->num_stream_output_components[stream]) {
278 unused_stream = stream;
279 break;
280 }
281 }
282 for (unsigned buffer = 0; buffer < 4; ++buffer) {
283 if (stream_for_buffer[buffer] >= 0) {
284 swizzle[buffer] = stream_for_buffer[buffer];
285 } else {
286 assert(unused_stream >= 0);
287 swizzle[buffer] = unused_stream;
288 }
289 }
290
291 tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
292 swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
293 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
294
295 LLVMValueRef args[] = {
296 LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
297 tmp,
298 ctx->i32_0, // ordering
299 ctx->i32_0, // scope
300 ctx->ac.i1false, // isVolatile
301 LLVMConstInt(ctx->i32, 4 << 24, false), // OA index
302 ctx->ac.i1true, // wave release
303 ctx->ac.i1true, // wave done
304 };
305 tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
306 ctx->i32, args, ARRAY_SIZE(args), 0);
307
308 /* Keep offsets in a VGPR for quick retrieval via readlane by
309 * the first wave for bounds checking, and also store in LDS
310 * for retrieval by all waves later. */
311 LLVMBuildStore(builder, tmp, offsets_vgpr);
312
313 tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
314 scratch_offset_basev, "");
315 tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
316 LLVMBuildStore(builder, tmp, tmp2);
317 }
318 ac_build_endif(&ctx->ac, 5210);
319
320 /* Determine the max emit per buffer. This is done via the SALU, in part
321 * because LLVM can't generate divide-by-multiply if we try to do this
322 * via VALU with one lane per buffer.
323 */
324 LLVMValueRef max_emit[4] = {};
325 for (unsigned buffer = 0; buffer < 4; ++buffer) {
326 if (stream_for_buffer[buffer] == -1)
327 continue;
328
329 LLVMValueRef bufsize_dw =
330 LLVMBuildLShr(builder,
331 LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
332 i32_2, "");
333
334 tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
335 LLVMValueRef offset_dw =
336 ac_build_readlane(&ctx->ac, tmp,
337 LLVMConstInt(ctx->i32, buffer, false));
338
339 tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
340 tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
341
342 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
343 max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->i32_0, tmp, "");
344 }
345
346 /* Determine the number of emitted primitives per stream and fixup the
347 * GDS counter if necessary.
348 *
349 * This is complicated by the fact that a single stream can emit to
350 * multiple buffers (but luckily not vice versa).
351 */
352 LLVMValueRef emit_vgpr = ctx->i32_0;
353
354 for (unsigned stream = 0; stream < 4; ++stream) {
355 if (!info->num_stream_output_components[stream])
356 continue;
357
358 tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
359 LLVMValueRef generated =
360 ac_build_readlane(&ctx->ac, tmp,
361 LLVMConstInt(ctx->i32, stream, false));
362
363 LLVMValueRef emit = generated;
364 for (unsigned buffer = 0; buffer < 4; ++buffer) {
365 if (stream_for_buffer[buffer] == stream)
366 emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
367 }
368
369 emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
370 LLVMConstInt(ctx->i32, stream, false));
371
372 /* Fixup the offset using a plain GDS atomic if we overflowed. */
373 tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
374 ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
375 tmp = LLVMBuildLShr(builder,
376 LLVMConstInt(ctx->i32, bufmask_for_stream[stream], false),
377 ac_get_thread_id(&ctx->ac), "");
378 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
379 ac_build_ifcc(&ctx->ac, tmp, 5222);
380 {
381 tmp = LLVMBuildSub(builder, generated, emit, "");
382 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
383 tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
384 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
385 LLVMAtomicOrderingMonotonic, false);
386 }
387 ac_build_endif(&ctx->ac, 5222);
388 ac_build_endif(&ctx->ac, 5221);
389 }
390
391 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
392 ac_build_ifcc(&ctx->ac, tmp, 5225);
393 {
394 tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
395 scratch_emit_basev, "");
396 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
397 LLVMBuildStore(builder, emit_vgpr, tmp);
398 }
399 ac_build_endif(&ctx->ac, 5225);
400 }
401 ac_build_endif(&ctx->ac, 5200);
402
403 /* Determine the workgroup-relative per-thread / primitive offset into
404 * the streamout buffers */
405 struct ac_wg_scan primemit_scan[4] = {};
406
407 if (isgs) {
408 for (unsigned stream = 0; stream < 4; ++stream) {
409 if (!info->num_stream_output_components[stream])
410 continue;
411
412 primemit_scan[stream].enable_exclusive = true;
413 primemit_scan[stream].op = nir_op_iadd;
414 primemit_scan[stream].src = nggso->prim_enable[stream];
415 primemit_scan[stream].scratch =
416 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
417 LLVMConstInt(ctx->i32, 12 + 8 * stream, false));
418 primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
419 primemit_scan[stream].numwaves = get_tgsize(ctx);
420 primemit_scan[stream].maxwaves = 8;
421 ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
422 }
423 }
424
425 ac_build_s_barrier(&ctx->ac);
426
427 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
428 LLVMValueRef wgoffset_dw[4] = {};
429
430 {
431 LLVMValueRef scratch_vgpr;
432
433 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
434 scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
435
436 for (unsigned buffer = 0; buffer < 4; ++buffer) {
437 if (stream_for_buffer[buffer] >= 0) {
438 wgoffset_dw[buffer] = ac_build_readlane(
439 &ctx->ac, scratch_vgpr,
440 LLVMConstInt(ctx->i32, scratch_offset_base + buffer, false));
441 }
442 }
443
444 for (unsigned stream = 0; stream < 4; ++stream) {
445 if (info->num_stream_output_components[stream]) {
446 nggso->emit[stream] = ac_build_readlane(
447 &ctx->ac, scratch_vgpr,
448 LLVMConstInt(ctx->i32, scratch_emit_base + stream, false));
449 }
450 }
451 }
452
453 /* Write out primitive data */
454 for (unsigned stream = 0; stream < 4; ++stream) {
455 if (!info->num_stream_output_components[stream])
456 continue;
457
458 if (isgs) {
459 ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
460 } else {
461 primemit_scan[stream].result_exclusive = tid;
462 }
463
464 tmp = LLVMBuildICmp(builder, LLVMIntULT,
465 primemit_scan[stream].result_exclusive,
466 nggso->emit[stream], "");
467 tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
468 ac_build_ifcc(&ctx->ac, tmp, 5240);
469 {
470 LLVMValueRef offset_vtx =
471 LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
472 nggso->num_vertices, "");
473
474 for (unsigned i = 0; i < max_num_vertices; ++i) {
475 tmp = LLVMBuildICmp(builder, LLVMIntULT,
476 LLVMConstInt(ctx->i32, i, false),
477 nggso->num_vertices, "");
478 ac_build_ifcc(&ctx->ac, tmp, 5241);
479 build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
480 stream, offset_vtx, nggso->vertices[i]);
481 ac_build_endif(&ctx->ac, 5241);
482 offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->i32_1, "");
483 }
484 }
485 ac_build_endif(&ctx->ac, 5240);
486 }
487 }
488
489 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
490 {
491 unsigned lds_vertex_size = 0;
492
493 /* The edgeflag is always stored in the last element that's also
494 * used for padding to reduce LDS bank conflicts. */
495 if (shader->selector->so.num_outputs)
496 lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
497 if (shader->selector->info.writes_edgeflag)
498 lds_vertex_size = MAX2(lds_vertex_size, 1);
499
500 return lds_vertex_size;
501 }
502
503 /**
504 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
505 * for the vertex outputs.
506 */
507 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
508 LLVMValueRef vtxid)
509 {
510 /* The extra dword is used to avoid LDS bank conflicts. */
511 unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
512 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, vertex_size);
513 LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
514 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
515 return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
516 }
517
518 /**
519 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
520 */
521 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
522 unsigned max_outputs,
523 LLVMValueRef *addrs)
524 {
525 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
526 struct si_shader_selector *sel = ctx->shader->selector;
527 struct tgsi_shader_info *info = &sel->info;
528 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
529 LLVMBuilderRef builder = ctx->ac.builder;
530 LLVMValueRef tmp, tmp2;
531
532 assert(!ctx->shader->is_gs_copy_shader);
533 assert(info->num_outputs <= max_outputs);
534
535 LLVMValueRef vertex_ptr = NULL;
536
537 if (sel->so.num_outputs || sel->info.writes_edgeflag)
538 vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
539
540 for (unsigned i = 0; i < info->num_outputs; i++) {
541 outputs[i].semantic_name = info->output_semantic_name[i];
542 outputs[i].semantic_index = info->output_semantic_index[i];
543
544 for (unsigned j = 0; j < 4; j++) {
545 outputs[i].vertex_stream[j] =
546 (info->output_streams[i] >> (2 * j)) & 3;
547
548 /* TODO: we may store more outputs than streamout needs,
549 * but streamout performance isn't that important.
550 */
551 if (sel->so.num_outputs) {
552 tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
553 LLVMConstInt(ctx->i32, 4 * i + j, false));
554 tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
555 tmp2 = ac_to_integer(&ctx->ac, tmp2);
556 LLVMBuildStore(builder, tmp2, tmp);
557 }
558 }
559
560 /* Store the edgeflag at the end (if streamout is enabled) */
561 if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG &&
562 sel->info.writes_edgeflag) {
563 LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
564 /* The output is a float, but the hw expects a 1-bit integer. */
565 edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->i32, "");
566 edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->i32_1);
567
568 tmp = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
569 tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
570 LLVMBuildStore(builder, edgeflag, tmp);
571 }
572 }
573
574 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
575
576 LLVMValueRef prims_in_wave = si_unpack_param(ctx, ctx->merged_wave_info, 8, 8);
577 LLVMValueRef vtx_in_wave = si_unpack_param(ctx, ctx->merged_wave_info, 0, 8);
578 LLVMValueRef is_gs_thread = LLVMBuildICmp(builder, LLVMIntULT,
579 ac_get_thread_id(&ctx->ac), prims_in_wave, "");
580 LLVMValueRef is_es_thread = LLVMBuildICmp(builder, LLVMIntULT,
581 ac_get_thread_id(&ctx->ac), vtx_in_wave, "");
582 LLVMValueRef vtxindex[] = {
583 si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16),
584 si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16),
585 si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16),
586 };
587
588 /* Determine the number of vertices per primitive. */
589 unsigned num_vertices;
590 LLVMValueRef num_vertices_val;
591
592 if (ctx->type == PIPE_SHADER_VERTEX) {
593 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
594 /* Blits always use axis-aligned rectangles with 3 vertices. */
595 num_vertices = 3;
596 num_vertices_val = LLVMConstInt(ctx->i32, 3, 0);
597 } else {
598 /* Extract OUTPRIM field. */
599 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
600 num_vertices_val = LLVMBuildAdd(builder, tmp, ctx->i32_1, "");
601 num_vertices = 3; /* TODO: optimize for points & lines */
602 }
603 } else {
604 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
605
606 if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
607 num_vertices = 1;
608 else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
609 num_vertices = 2;
610 else
611 num_vertices = 3;
612
613 num_vertices_val = LLVMConstInt(ctx->i32, num_vertices, false);
614 }
615
616 /* Streamout */
617 LLVMValueRef emitted_prims = NULL;
618
619 if (sel->so.num_outputs) {
620 struct ngg_streamout nggso = {};
621
622 nggso.num_vertices = num_vertices_val;
623 nggso.prim_enable[0] = is_gs_thread;
624
625 for (unsigned i = 0; i < num_vertices; ++i)
626 nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
627
628 build_streamout(ctx, &nggso);
629 emitted_prims = nggso.emit[0];
630 }
631
632 LLVMValueRef user_edgeflags[3] = {};
633
634 if (sel->info.writes_edgeflag) {
635 /* Streamout already inserted the barrier, so don't insert it again. */
636 if (!sel->so.num_outputs)
637 ac_build_s_barrier(&ctx->ac);
638
639 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
640 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
641 for (unsigned i = 0; i < num_vertices; i++) {
642 tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
643 tmp2 = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
644 tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
645 tmp = LLVMBuildLoad(builder, tmp, "");
646 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
647
648 user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
649 LLVMBuildStore(builder, tmp, user_edgeflags[i]);
650 }
651 ac_build_endif(&ctx->ac, 5400);
652 }
653
654 /* Copy Primitive IDs from GS threads to the LDS address corresponding
655 * to the ES thread of the provoking vertex.
656 */
657 if (ctx->type == PIPE_SHADER_VERTEX &&
658 ctx->shader->key.mono.u.vs_export_prim_id) {
659 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
660 if (sel->so.num_outputs || sel->info.writes_edgeflag)
661 ac_build_s_barrier(&ctx->ac);
662
663 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
664 /* Extract the PROVOKING_VTX_INDEX field. */
665 LLVMValueRef provoking_vtx_in_prim =
666 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
667
668 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
669 LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
670 LLVMValueRef provoking_vtx_index =
671 LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
672
673 LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
674 ac_build_gep0(&ctx->ac, ctx->esgs_ring, provoking_vtx_index));
675 ac_build_endif(&ctx->ac, 5400);
676 }
677
678 build_sendmsg_gs_alloc_req(ctx, ngg_get_vtx_cnt(ctx), ngg_get_prim_cnt(ctx));
679
680 /* Update query buffer */
681 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
682 if (!info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
683 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
684 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
685 ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
686 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
687 ac_build_ifcc(&ctx->ac, tmp, 5030);
688 tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
689 sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
690 ac_build_ifcc(&ctx->ac, tmp, 5031);
691 {
692 LLVMValueRef args[] = {
693 ngg_get_prim_cnt(ctx),
694 ngg_get_query_buf(ctx),
695 LLVMConstInt(ctx->i32, 16, false), /* offset of stream[0].generated_primitives */
696 ctx->i32_0, /* soffset */
697 ctx->i32_0, /* cachepolicy */
698 };
699
700 if (sel->so.num_outputs) {
701 args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->i32_1);
702 args[2] = ac_build_writelane(&ctx->ac, args[2],
703 LLVMConstInt(ctx->i32, 24, false), ctx->i32_1);
704 }
705
706 /* TODO: should this be 64-bit atomics? */
707 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
708 ctx->i32, args, 5, 0);
709 }
710 ac_build_endif(&ctx->ac, 5031);
711 ac_build_endif(&ctx->ac, 5030);
712 ac_build_endif(&ctx->ac, 5029);
713 }
714
715 /* Export primitive data to the index buffer. Format is:
716 * - bits 0..8: index 0
717 * - bit 9: edge flag 0
718 * - bits 10..18: index 1
719 * - bit 19: edge flag 1
720 * - bits 20..28: index 2
721 * - bit 29: edge flag 2
722 * - bit 31: null primitive (skip)
723 *
724 * For the first version, we will always build up all three indices
725 * independent of the primitive type. The additional garbage data
726 * shouldn't hurt.
727 *
728 * TODO: culling depends on the primitive type, so can have some
729 * interaction here.
730 */
731 ac_build_ifcc(&ctx->ac, is_gs_thread, 6001);
732 {
733 struct ngg_prim prim = {};
734
735 prim.num_vertices = num_vertices;
736 prim.isnull = ctx->ac.i1false;
737 memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
738
739 for (unsigned i = 0; i < num_vertices; ++i) {
740 if (ctx->type != PIPE_SHADER_VERTEX) {
741 prim.edgeflag[i] = ctx->i1false;
742 continue;
743 }
744
745 tmp = LLVMBuildLShr(builder,
746 ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
747 LLVMConstInt(ctx->ac.i32, 8 + i, false), "");
748 prim.edgeflag[i] = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
749
750 if (sel->info.writes_edgeflag) {
751 tmp2 = LLVMBuildLoad(builder, user_edgeflags[i], "");
752 prim.edgeflag[i] = LLVMBuildAnd(builder, prim.edgeflag[i],
753 tmp2, "");
754 }
755 }
756
757 build_export_prim(ctx, &prim);
758 }
759 ac_build_endif(&ctx->ac, 6001);
760
761 /* Export per-vertex data (positions and parameters). */
762 ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
763 {
764 unsigned i;
765
766 /* Unconditionally (re-)load the values for proper SSA form. */
767 for (i = 0; i < info->num_outputs; i++) {
768 for (unsigned j = 0; j < 4; j++) {
769 outputs[i].values[j] =
770 LLVMBuildLoad(builder,
771 addrs[4 * i + j],
772 "");
773 }
774 }
775
776 if (ctx->shader->key.mono.u.vs_export_prim_id) {
777 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
778 outputs[i].semantic_index = 0;
779
780 if (ctx->type == PIPE_SHADER_VERTEX) {
781 /* Wait for GS stores to finish. */
782 ac_build_s_barrier(&ctx->ac);
783
784 tmp = ac_build_gep0(&ctx->ac, ctx->esgs_ring,
785 get_thread_id_in_tg(ctx));
786 outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
787 } else {
788 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
789 outputs[i].values[0] = si_get_primitive_id(ctx, 0);
790 }
791
792 outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
793 for (unsigned j = 1; j < 4; j++)
794 outputs[i].values[j] = LLVMGetUndef(ctx->f32);
795
796 memset(outputs[i].vertex_stream, 0,
797 sizeof(outputs[i].vertex_stream));
798 i++;
799 }
800
801 si_llvm_export_vs(ctx, outputs, i);
802 }
803 ac_build_endif(&ctx->ac, 6002);
804 }
805
806 static LLVMValueRef
807 ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
808 {
809 const struct si_shader_selector *sel = ctx->shader->selector;
810 const struct tgsi_shader_info *info = &sel->info;
811
812 LLVMTypeRef elements[2] = {
813 LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
814 LLVMArrayType(ctx->ac.i8, 4),
815 };
816 LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
817 type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
818 return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
819 }
820
821 /**
822 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
823 * is in emit order; that is:
824 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
825 * - during vertex emit, i.e. while the API GS shader invocation is running,
826 * N = threadidx * gs_max_out_vertices + emitidx
827 *
828 * Goals of the LDS memory layout:
829 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
830 * in uniform control flow
831 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
832 * culling
833 * 3. Agnostic to the number of waves (since we don't know it before compiling)
834 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
835 * 5. Avoid wasting memory.
836 *
837 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
838 * layout, elimination of bank conflicts requires that each vertex occupy an
839 * odd number of dwords. We use the additional dword to store the output stream
840 * index as well as a flag to indicate whether this vertex ends a primitive
841 * for rasterization.
842 *
843 * Swizzling is required to satisfy points 1 and 2 simultaneously.
844 *
845 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
846 * Indices are swizzled in groups of 32, which ensures point 1 without
847 * disturbing point 2.
848 *
849 * \return an LDS pointer to type {[N x i32], [4 x i8]}
850 */
851 static LLVMValueRef
852 ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
853 {
854 struct si_shader_selector *sel = ctx->shader->selector;
855 LLVMBuilderRef builder = ctx->ac.builder;
856 LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
857
858 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
859 unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
860 if (write_stride_2exp) {
861 LLVMValueRef row =
862 LLVMBuildLShr(builder, vertexidx,
863 LLVMConstInt(ctx->ac.i32, 5, false), "");
864 LLVMValueRef swizzle =
865 LLVMBuildAnd(builder, row,
866 LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
867 false), "");
868 vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
869 }
870
871 return ac_build_gep0(&ctx->ac, storage, vertexidx);
872 }
873
874 static LLVMValueRef
875 ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
876 LLVMValueRef emitidx)
877 {
878 struct si_shader_selector *sel = ctx->shader->selector;
879 LLVMBuilderRef builder = ctx->ac.builder;
880 LLVMValueRef tmp;
881
882 tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
883 tmp = LLVMBuildMul(builder, tmp, gsthread, "");
884 const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
885 return ngg_gs_vertex_ptr(ctx, vertexidx);
886 }
887
888 static LLVMValueRef
889 ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
890 unsigned out_idx)
891 {
892 LLVMValueRef gep_idx[3] = {
893 ctx->ac.i32_0, /* implied C-style array */
894 ctx->ac.i32_0, /* first struct entry */
895 LLVMConstInt(ctx->ac.i32, out_idx, false),
896 };
897 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
898 }
899
900 static LLVMValueRef
901 ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
902 unsigned stream)
903 {
904 LLVMValueRef gep_idx[3] = {
905 ctx->ac.i32_0, /* implied C-style array */
906 ctx->ac.i32_1, /* second struct entry */
907 LLVMConstInt(ctx->ac.i32, stream, false),
908 };
909 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
910 }
911
912 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
913 unsigned stream,
914 LLVMValueRef *addrs)
915 {
916 const struct si_shader_selector *sel = ctx->shader->selector;
917 const struct tgsi_shader_info *info = &sel->info;
918 LLVMBuilderRef builder = ctx->ac.builder;
919 LLVMValueRef tmp;
920 const LLVMValueRef vertexidx =
921 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
922
923 /* If this thread has already emitted the declared maximum number of
924 * vertices, skip the write: excessive vertex emissions are not
925 * supposed to have any effect.
926 */
927 const LLVMValueRef can_emit =
928 LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
929 LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
930
931 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
932 tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
933 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
934
935 ac_build_ifcc(&ctx->ac, can_emit, 9001);
936
937 const LLVMValueRef vertexptr =
938 ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
939 unsigned out_idx = 0;
940 for (unsigned i = 0; i < info->num_outputs; i++) {
941 for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
942 if (!(info->output_usagemask[i] & (1 << chan)) ||
943 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
944 continue;
945
946 LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
947 out_val = ac_to_integer(&ctx->ac, out_val);
948 LLVMBuildStore(builder, out_val,
949 ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
950 }
951 }
952 assert(out_idx * 4 == sel->gsvs_vertex_size);
953
954 /* Determine and store whether this vertex completed a primitive. */
955 const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
956
957 tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
958 const LLVMValueRef iscompleteprim =
959 LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
960
961 /* Since the geometry shader emits triangle strips, we need to
962 * track which primitive is odd and swap vertex indices to get
963 * the correct vertex order.
964 */
965 LLVMValueRef is_odd = ctx->i1false;
966 if (stream == 0 && u_vertices_per_prim(sel->gs_output_prim) == 3) {
967 tmp = LLVMBuildAnd(builder, curverts, ctx->i32_1, "");
968 is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->i32_1, "");
969 }
970
971 tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
972 LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
973
974 /* The per-vertex primitive flag encoding:
975 * bit 0: whether this vertex finishes a primitive
976 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
977 */
978 tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
979 tmp = LLVMBuildOr(builder, tmp,
980 LLVMBuildShl(builder,
981 LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""),
982 ctx->ac.i8_1, ""), "");
983 LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
984
985 tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
986 tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
987 LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
988
989 ac_build_endif(&ctx->ac, 9001);
990 }
991
992 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
993 {
994 /* Zero out the part of LDS scratch that is used to accumulate the
995 * per-stream generated primitive count.
996 */
997 LLVMBuilderRef builder = ctx->ac.builder;
998 LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
999 LLVMValueRef tid = get_thread_id_in_tg(ctx);
1000 LLVMValueRef tmp;
1001
1002 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->i32, 4, false), "");
1003 ac_build_ifcc(&ctx->ac, tmp, 5090);
1004 {
1005 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
1006 LLVMBuildStore(builder, ctx->i32_0, ptr);
1007 }
1008 ac_build_endif(&ctx->ac, 5090);
1009
1010 ac_build_s_barrier(&ctx->ac);
1011 }
1012
1013 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
1014 {
1015 const struct si_shader_selector *sel = ctx->shader->selector;
1016 const struct tgsi_shader_info *info = &sel->info;
1017 const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
1018 LLVMBuilderRef builder = ctx->ac.builder;
1019 LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
1020 LLVMValueRef tmp, tmp2;
1021
1022 /* Zero out remaining (non-emitted) primitive flags.
1023 *
1024 * Note: Alternatively, we could pass the relevant gs_next_vertex to
1025 * the emit threads via LDS. This is likely worse in the expected
1026 * typical case where each GS thread emits the full set of
1027 * vertices.
1028 */
1029 for (unsigned stream = 0; stream < 4; ++stream) {
1030 if (!info->num_stream_output_components[stream])
1031 continue;
1032
1033 const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
1034
1035 ac_build_bgnloop(&ctx->ac, 5100);
1036
1037 const LLVMValueRef vertexidx =
1038 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1039 tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
1040 LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
1041 ac_build_ifcc(&ctx->ac, tmp, 5101);
1042 ac_build_break(&ctx->ac);
1043 ac_build_endif(&ctx->ac, 5101);
1044
1045 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1046 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1047
1048 tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
1049 LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
1050
1051 ac_build_endloop(&ctx->ac, 5100);
1052 }
1053
1054 /* Accumulate generated primitives counts across the entire threadgroup. */
1055 for (unsigned stream = 0; stream < 4; ++stream) {
1056 if (!info->num_stream_output_components[stream])
1057 continue;
1058
1059 LLVMValueRef numprims =
1060 LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1061 numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
1062
1063 tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->i32_0, "");
1064 ac_build_ifcc(&ctx->ac, tmp, 5105);
1065 {
1066 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
1067 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
1068 LLVMConstInt(ctx->i32, stream, false)),
1069 numprims, LLVMAtomicOrderingMonotonic, false);
1070 }
1071 ac_build_endif(&ctx->ac, 5105);
1072 }
1073
1074 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1075
1076 ac_build_s_barrier(&ctx->ac);
1077
1078 const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1079 LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1080
1081 /* Streamout */
1082 if (sel->so.num_outputs) {
1083 struct ngg_streamout nggso = {};
1084
1085 nggso.num_vertices = LLVMConstInt(ctx->i32, verts_per_prim, false);
1086
1087 LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1088 for (unsigned stream = 0; stream < 4; ++stream) {
1089 if (!info->num_stream_output_components[stream])
1090 continue;
1091
1092 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
1093 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1094 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1095 nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1096 }
1097
1098 for (unsigned i = 0; i < verts_per_prim; ++i) {
1099 tmp = LLVMBuildSub(builder, tid,
1100 LLVMConstInt(ctx->i32, verts_per_prim - i - 1, false), "");
1101 tmp = ngg_gs_vertex_ptr(ctx, tmp);
1102 nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
1103 }
1104
1105 build_streamout(ctx, &nggso);
1106 }
1107
1108 /* Write shader query data. */
1109 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1110 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1111 ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1112 unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1113 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1114 LLVMConstInt(ctx->i32, num_query_comps, false), "");
1115 ac_build_ifcc(&ctx->ac, tmp, 5110);
1116 {
1117 LLVMValueRef offset;
1118 tmp = tid;
1119 if (sel->so.num_outputs)
1120 tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->i32, 3, false), "");
1121 offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 32, false), "");
1122 if (sel->so.num_outputs) {
1123 tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->i32, 2, false), "");
1124 tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 8, false), "");
1125 offset = LLVMBuildAdd(builder, offset, tmp, "");
1126 }
1127
1128 tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1129 LLVMValueRef args[] = {
1130 tmp,
1131 ngg_get_query_buf(ctx),
1132 offset,
1133 LLVMConstInt(ctx->i32, 16, false), /* soffset */
1134 ctx->i32_0, /* cachepolicy */
1135 };
1136 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1137 ctx->i32, args, 5, 0);
1138 }
1139 ac_build_endif(&ctx->ac, 5110);
1140 ac_build_endif(&ctx->ac, 5109);
1141
1142 /* TODO: culling */
1143
1144 /* Determine vertex liveness. */
1145 LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
1146
1147 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1148 ac_build_ifcc(&ctx->ac, tmp, 5120);
1149 {
1150 for (unsigned i = 0; i < verts_per_prim; ++i) {
1151 const LLVMValueRef primidx =
1152 LLVMBuildAdd(builder, tid,
1153 LLVMConstInt(ctx->ac.i32, i, false), "");
1154
1155 if (i > 0) {
1156 tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1157 ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1158 }
1159
1160 /* Load primitive liveness */
1161 tmp = ngg_gs_vertex_ptr(ctx, primidx);
1162 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1163 const LLVMValueRef primlive =
1164 LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1165
1166 tmp = LLVMBuildLoad(builder, vertliveptr, "");
1167 tmp = LLVMBuildOr(builder, tmp, primlive, ""),
1168 LLVMBuildStore(builder, tmp, vertliveptr);
1169
1170 if (i > 0)
1171 ac_build_endif(&ctx->ac, 5121 + i);
1172 }
1173 }
1174 ac_build_endif(&ctx->ac, 5120);
1175
1176 /* Inclusive scan addition across the current wave. */
1177 LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1178 struct ac_wg_scan vertlive_scan = {};
1179 vertlive_scan.op = nir_op_iadd;
1180 vertlive_scan.enable_reduce = true;
1181 vertlive_scan.enable_exclusive = true;
1182 vertlive_scan.src = vertlive;
1183 vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
1184 vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1185 vertlive_scan.numwaves = get_tgsize(ctx);
1186 vertlive_scan.maxwaves = 8;
1187
1188 ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1189
1190 /* Skip all exports (including index exports) when possible. At least on
1191 * early gfx10 revisions this is also to avoid hangs.
1192 */
1193 LLVMValueRef have_exports =
1194 LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1195 num_emit_threads =
1196 LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1197
1198 /* Allocate export space. Send this message as early as possible, to
1199 * hide the latency of the SQ <-> SPI roundtrip.
1200 *
1201 * Note: We could consider compacting primitives for export as well.
1202 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1203 * prim data per clock and skips null primitives at no additional
1204 * cost. So compacting primitives can only be beneficial when
1205 * there are 4 or more contiguous null primitives in the export
1206 * (in the common case of single-dword prim exports).
1207 */
1208 build_sendmsg_gs_alloc_req(ctx, vertlive_scan.result_reduce, num_emit_threads);
1209
1210 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1211 * of the primitive liveness flags, relying on the fact that each
1212 * threadgroup can have at most 256 threads. */
1213 ac_build_ifcc(&ctx->ac, vertlive, 5130);
1214 {
1215 tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1216 tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1217 LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
1218 }
1219 ac_build_endif(&ctx->ac, 5130);
1220
1221 ac_build_s_barrier(&ctx->ac);
1222
1223 /* Export primitive data */
1224 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1225 ac_build_ifcc(&ctx->ac, tmp, 5140);
1226 {
1227 LLVMValueRef flags;
1228 struct ngg_prim prim = {};
1229 prim.num_vertices = verts_per_prim;
1230
1231 tmp = ngg_gs_vertex_ptr(ctx, tid);
1232 flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1233 prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->i1, ""), "");
1234
1235 for (unsigned i = 0; i < verts_per_prim; ++i) {
1236 prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1237 LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1238 prim.edgeflag[i] = ctx->ac.i1false;
1239 }
1240
1241 /* Geometry shaders output triangle strips, but NGG expects triangles.
1242 * We need to change the vertex order for odd triangles to get correct
1243 * front/back facing by swapping 2 vertex indices, but we also have to
1244 * keep the provoking vertex in the same place.
1245 *
1246 * If the first vertex is provoking, swap index 1 and 2.
1247 * If the last vertex is provoking, swap index 0 and 1.
1248 */
1249 if (verts_per_prim == 3) {
1250 LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
1251 is_odd = LLVMBuildTrunc(builder, is_odd, ctx->i1, "");
1252 LLVMValueRef flatshade_first =
1253 LLVMBuildICmp(builder, LLVMIntEQ,
1254 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2),
1255 ctx->i32_0, "");
1256
1257 struct ngg_prim in = prim;
1258 prim.index[0] = LLVMBuildSelect(builder, flatshade_first,
1259 in.index[0],
1260 LLVMBuildSelect(builder, is_odd,
1261 in.index[1], in.index[0], ""), "");
1262 prim.index[1] = LLVMBuildSelect(builder, flatshade_first,
1263 LLVMBuildSelect(builder, is_odd,
1264 in.index[2], in.index[1], ""),
1265 LLVMBuildSelect(builder, is_odd,
1266 in.index[0], in.index[1], ""), "");
1267 prim.index[2] = LLVMBuildSelect(builder, flatshade_first,
1268 LLVMBuildSelect(builder, is_odd,
1269 in.index[1], in.index[2], ""),
1270 in.index[2], "");
1271 }
1272
1273 build_export_prim(ctx, &prim);
1274 }
1275 ac_build_endif(&ctx->ac, 5140);
1276
1277 /* Export position and parameter data */
1278 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1279 ac_build_ifcc(&ctx->ac, tmp, 5145);
1280 {
1281 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1282
1283 tmp = ngg_gs_vertex_ptr(ctx, tid);
1284 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
1285 tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1286 const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1287
1288 unsigned out_idx = 0;
1289 for (unsigned i = 0; i < info->num_outputs; i++) {
1290 outputs[i].semantic_name = info->output_semantic_name[i];
1291 outputs[i].semantic_index = info->output_semantic_index[i];
1292
1293 for (unsigned j = 0; j < 4; j++, out_idx++) {
1294 tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
1295 tmp = LLVMBuildLoad(builder, tmp, "");
1296 outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1297 outputs[i].vertex_stream[j] =
1298 (info->output_streams[i] >> (2 * j)) & 3;
1299 }
1300 }
1301
1302 si_llvm_export_vs(ctx, outputs, info->num_outputs);
1303 }
1304 ac_build_endif(&ctx->ac, 5145);
1305 }
1306
1307 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1308 unsigned min_verts_per_prim, bool use_adjacency)
1309 {
1310 unsigned max_reuse = max_esverts - min_verts_per_prim;
1311 if (use_adjacency)
1312 max_reuse /= 2;
1313 *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1314 }
1315
1316 /**
1317 * Determine subgroup information like maximum number of vertices and prims.
1318 *
1319 * This happens before the shader is uploaded, since LDS relocations during
1320 * upload depend on the subgroup size.
1321 */
1322 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1323 {
1324 const struct si_shader_selector *gs_sel = shader->selector;
1325 const struct si_shader_selector *es_sel =
1326 shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1327 const enum pipe_shader_type gs_type = gs_sel->type;
1328 const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1329 const unsigned input_prim = si_get_input_prim(gs_sel);
1330 const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
1331 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1332 const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1333 const unsigned min_verts_per_prim =
1334 gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1335
1336 /* All these are in dwords: */
1337 /* We can't allow using the whole LDS, because GS waves compete with
1338 * other shader stages for LDS space.
1339 *
1340 * TODO: We should really take the shader's internal LDS use into
1341 * account. The linker will fail if the size is greater than
1342 * 8K dwords.
1343 */
1344 const unsigned max_lds_size = 8 * 1024 - 768;
1345 const unsigned target_lds_size = max_lds_size;
1346 unsigned esvert_lds_size = 0;
1347 unsigned gsprim_lds_size = 0;
1348
1349 /* All these are per subgroup: */
1350 bool max_vert_out_per_gs_instance = false;
1351 unsigned max_esverts_base = 128;
1352 unsigned max_gsprims_base = 128; /* default prim group size clamp */
1353
1354 /* Hardware has the following non-natural restrictions on the value
1355 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1356 * the draw:
1357 * - at most 252 for any line input primitive type
1358 * - at most 251 for any quad input primitive type
1359 * - at most 251 for triangle strips with adjacency (this happens to
1360 * be the natural limit for triangle *lists* with adjacency)
1361 */
1362 max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1363
1364 if (gs_type == PIPE_SHADER_GEOMETRY) {
1365 unsigned max_out_verts_per_gsprim =
1366 gs_sel->gs_max_out_vertices * gs_num_invocations;
1367
1368 if (max_out_verts_per_gsprim <= 256) {
1369 if (max_out_verts_per_gsprim) {
1370 max_gsprims_base = MIN2(max_gsprims_base,
1371 256 / max_out_verts_per_gsprim);
1372 }
1373 } else {
1374 /* Use special multi-cycling mode in which each GS
1375 * instance gets its own subgroup. Does not work with
1376 * tessellation. */
1377 max_vert_out_per_gs_instance = true;
1378 max_gsprims_base = 1;
1379 max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
1380 }
1381
1382 esvert_lds_size = es_sel->esgs_itemsize / 4;
1383 gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1384 } else {
1385 /* VS and TES. */
1386 /* LDS size for passing data from ES to GS. */
1387 esvert_lds_size = ngg_nogs_vertex_size(shader);
1388
1389 /* LDS size for passing data from GS to ES.
1390 * GS stores Primitive IDs into LDS at the address corresponding
1391 * to the ES thread of the provoking vertex. All ES threads
1392 * load and export PrimitiveID for their thread.
1393 */
1394 if (gs_sel->type == PIPE_SHADER_VERTEX &&
1395 shader->key.mono.u.vs_export_prim_id)
1396 esvert_lds_size = MAX2(esvert_lds_size, 1);
1397 }
1398
1399 unsigned max_gsprims = max_gsprims_base;
1400 unsigned max_esverts = max_esverts_base;
1401
1402 if (esvert_lds_size)
1403 max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1404 if (gsprim_lds_size)
1405 max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1406
1407 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1408 clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
1409 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1410
1411 if (esvert_lds_size || gsprim_lds_size) {
1412 /* Now that we have a rough proportionality between esverts
1413 * and gsprims based on the primitive type, scale both of them
1414 * down simultaneously based on required LDS space.
1415 *
1416 * We could be smarter about this if we knew how much vertex
1417 * reuse to expect.
1418 */
1419 unsigned lds_total = max_esverts * esvert_lds_size +
1420 max_gsprims * gsprim_lds_size;
1421 if (lds_total > target_lds_size) {
1422 max_esverts = max_esverts * target_lds_size / lds_total;
1423 max_gsprims = max_gsprims * target_lds_size / lds_total;
1424
1425 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1426 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1427 min_verts_per_prim, use_adjacency);
1428 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1429 }
1430 }
1431
1432 /* Round up towards full wave sizes for better ALU utilization. */
1433 if (!max_vert_out_per_gs_instance) {
1434 const unsigned wavesize = gs_sel->screen->ge_wave_size;
1435 unsigned orig_max_esverts;
1436 unsigned orig_max_gsprims;
1437 do {
1438 orig_max_esverts = max_esverts;
1439 orig_max_gsprims = max_gsprims;
1440
1441 max_esverts = align(max_esverts, wavesize);
1442 max_esverts = MIN2(max_esverts, max_esverts_base);
1443 if (esvert_lds_size)
1444 max_esverts = MIN2(max_esverts,
1445 (max_lds_size - max_gsprims * gsprim_lds_size) /
1446 esvert_lds_size);
1447 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1448
1449 max_gsprims = align(max_gsprims, wavesize);
1450 max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1451 if (gsprim_lds_size)
1452 max_gsprims = MIN2(max_gsprims,
1453 (max_lds_size - max_esverts * esvert_lds_size) /
1454 gsprim_lds_size);
1455 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1456 min_verts_per_prim, use_adjacency);
1457 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1458 } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1459 }
1460
1461 /* Hardware restriction: minimum value of max_esverts */
1462 max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
1463
1464 unsigned max_out_vertices =
1465 max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
1466 gs_type == PIPE_SHADER_GEOMETRY ?
1467 max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
1468 max_esverts;
1469 assert(max_out_vertices <= 256);
1470
1471 unsigned prim_amp_factor = 1;
1472 if (gs_type == PIPE_SHADER_GEOMETRY) {
1473 /* Number of output primitives per GS input primitive after
1474 * GS instancing. */
1475 prim_amp_factor = gs_sel->gs_max_out_vertices;
1476 }
1477
1478 /* The GE only checks against the maximum number of ES verts after
1479 * allocating a full GS primitive. So we need to ensure that whenever
1480 * this check passes, there is enough space for a full primitive without
1481 * vertex reuse.
1482 */
1483 shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1484 shader->ngg.max_gsprims = max_gsprims;
1485 shader->ngg.max_out_verts = max_out_vertices;
1486 shader->ngg.prim_amp_factor = prim_amp_factor;
1487 shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1488
1489 shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
1490 shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
1491
1492 assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
1493 }