ac: unify build_sendmsg_gs_alloc_req
[mesa.git] / src / gallium / drivers / radeonsi / gfx10_shader_ngg.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_shader_internal.h"
26
27 #include "sid.h"
28
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31
32 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
33 {
34 return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
35 }
36
37 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
38 {
39 return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
40 }
41
42 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
43 {
44 LLVMBuilderRef builder = ctx->ac.builder;
45 LLVMValueRef tmp;
46 tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
47 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
48 return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
49 }
50
51 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
52 {
53 return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
54 }
55
56 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
57 {
58 return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
59 }
60
61 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
62 {
63 return si_unpack_param(ctx, ctx->gs_tg_info, 0, 12);
64 }
65
66 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
67 {
68 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
69
70 return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
71 LLVMConstInt(ctx->i32, GFX10_GS_QUERY_BUF, false));
72 }
73
74 struct ngg_prim {
75 unsigned num_vertices;
76 LLVMValueRef isnull;
77 LLVMValueRef index[3];
78 LLVMValueRef edgeflag[3];
79 LLVMValueRef passthrough;
80 };
81
82 static void build_export_prim(struct si_shader_context *ctx,
83 const struct ngg_prim *prim)
84 {
85 LLVMBuilderRef builder = ctx->ac.builder;
86 struct ac_export_args args;
87 LLVMValueRef tmp;
88
89 if (prim->passthrough) {
90 args.out[0] = prim->passthrough;
91 } else {
92 tmp = LLVMBuildZExt(builder, prim->isnull, ctx->ac.i32, "");
93 args.out[0] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 31, false), "");
94
95 for (unsigned i = 0; i < prim->num_vertices; ++i) {
96 tmp = LLVMBuildShl(builder, prim->index[i],
97 LLVMConstInt(ctx->ac.i32, 10 * i, false), "");
98 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
99 tmp = LLVMBuildZExt(builder, prim->edgeflag[i], ctx->ac.i32, "");
100 tmp = LLVMBuildShl(builder, tmp,
101 LLVMConstInt(ctx->ac.i32, 10 * i + 9, false), "");
102 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
103 }
104 }
105
106 args.out[0] = LLVMBuildBitCast(builder, args.out[0], ctx->ac.f32, "");
107 args.out[1] = LLVMGetUndef(ctx->ac.f32);
108 args.out[2] = LLVMGetUndef(ctx->ac.f32);
109 args.out[3] = LLVMGetUndef(ctx->ac.f32);
110
111 args.target = V_008DFC_SQ_EXP_PRIM;
112 args.enabled_channels = 1;
113 args.done = true;
114 args.valid_mask = false;
115 args.compr = false;
116
117 ac_build_export(&ctx->ac, &args);
118 }
119
120 static void build_streamout_vertex(struct si_shader_context *ctx,
121 LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
122 unsigned stream, LLVMValueRef offset_vtx,
123 LLVMValueRef vertexptr)
124 {
125 struct tgsi_shader_info *info = &ctx->shader->selector->info;
126 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
127 LLVMBuilderRef builder = ctx->ac.builder;
128 LLVMValueRef offset[4] = {};
129 LLVMValueRef tmp;
130
131 for (unsigned buffer = 0; buffer < 4; ++buffer) {
132 if (!wg_offset_dw[buffer])
133 continue;
134
135 tmp = LLVMBuildMul(builder, offset_vtx,
136 LLVMConstInt(ctx->i32, so->stride[buffer], false), "");
137 tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
138 offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 2, false), "");
139 }
140
141 for (unsigned i = 0; i < so->num_outputs; ++i) {
142 if (so->output[i].stream != stream)
143 continue;
144
145 unsigned reg = so->output[i].register_index;
146 struct si_shader_output_values out;
147 out.semantic_name = info->output_semantic_name[reg];
148 out.semantic_index = info->output_semantic_index[reg];
149
150 for (unsigned comp = 0; comp < 4; comp++) {
151 tmp = ac_build_gep0(&ctx->ac, vertexptr,
152 LLVMConstInt(ctx->i32, 4 * reg + comp, false));
153 out.values[comp] = LLVMBuildLoad(builder, tmp, "");
154 out.vertex_stream[comp] =
155 (info->output_streams[reg] >> (2 * comp)) & 3;
156 }
157
158 si_emit_streamout_output(ctx, so_buffer, offset, &so->output[i], &out);
159 }
160 }
161
162 struct ngg_streamout {
163 LLVMValueRef num_vertices;
164
165 /* per-thread data */
166 LLVMValueRef prim_enable[4]; /* i1 per stream */
167 LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
168
169 /* Output */
170 LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
171 };
172
173 /**
174 * Build streamout logic.
175 *
176 * Implies a barrier.
177 *
178 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
179 *
180 * Clobbers gs_ngg_scratch[8:].
181 */
182 static void build_streamout(struct si_shader_context *ctx,
183 struct ngg_streamout *nggso)
184 {
185 struct tgsi_shader_info *info = &ctx->shader->selector->info;
186 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
187 LLVMBuilderRef builder = ctx->ac.builder;
188 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
189 LLVMValueRef tid = get_thread_id_in_tg(ctx);
190 LLVMValueRef tmp, tmp2;
191 LLVMValueRef i32_2 = LLVMConstInt(ctx->i32, 2, false);
192 LLVMValueRef i32_4 = LLVMConstInt(ctx->i32, 4, false);
193 LLVMValueRef i32_8 = LLVMConstInt(ctx->i32, 8, false);
194 LLVMValueRef so_buffer[4] = {};
195 unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
196 (nggso->vertices[2] ? 1 : 0);
197 LLVMValueRef prim_stride_dw[4] = {};
198 LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->i32);
199 int stream_for_buffer[4] = { -1, -1, -1, -1 };
200 unsigned bufmask_for_stream[4] = {};
201 bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
202 unsigned scratch_emit_base = isgs ? 4 : 0;
203 LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->i32_0;
204 unsigned scratch_offset_base = isgs ? 8 : 4;
205 LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
206
207 ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
208
209 /* Determine the mapping of streamout buffers to vertex streams. */
210 for (unsigned i = 0; i < so->num_outputs; ++i) {
211 unsigned buf = so->output[i].output_buffer;
212 unsigned stream = so->output[i].stream;
213 assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
214 stream_for_buffer[buf] = stream;
215 bufmask_for_stream[stream] |= 1 << buf;
216 }
217
218 for (unsigned buffer = 0; buffer < 4; ++buffer) {
219 if (stream_for_buffer[buffer] == -1)
220 continue;
221
222 assert(so->stride[buffer]);
223
224 tmp = LLVMConstInt(ctx->i32, so->stride[buffer], false);
225 prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
226 prim_stride_dw_vgpr = ac_build_writelane(
227 &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
228 LLVMConstInt(ctx->i32, buffer, false));
229
230 so_buffer[buffer] = ac_build_load_to_sgpr(
231 &ctx->ac, buf_ptr,
232 LLVMConstInt(ctx->i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
233 }
234
235 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->i32_0, "");
236 ac_build_ifcc(&ctx->ac, tmp, 5200);
237 {
238 LLVMTypeRef gdsptr = LLVMPointerType(ctx->i32, AC_ADDR_SPACE_GDS);
239 LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->i32_0, gdsptr, "");
240
241 /* Advance the streamout offsets in GDS. */
242 LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
243 LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
244
245 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
246 ac_build_ifcc(&ctx->ac, tmp, 5210);
247 {
248 if (isgs) {
249 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
250 tmp = LLVMBuildLoad(builder, tmp, "");
251 } else {
252 tmp = ac_build_writelane(&ctx->ac, ctx->i32_0,
253 ngg_get_prim_cnt(ctx), ctx->i32_0);
254 }
255 LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
256
257 unsigned swizzle[4];
258 int unused_stream = -1;
259 for (unsigned stream = 0; stream < 4; ++stream) {
260 if (!info->num_stream_output_components[stream]) {
261 unused_stream = stream;
262 break;
263 }
264 }
265 for (unsigned buffer = 0; buffer < 4; ++buffer) {
266 if (stream_for_buffer[buffer] >= 0) {
267 swizzle[buffer] = stream_for_buffer[buffer];
268 } else {
269 assert(unused_stream >= 0);
270 swizzle[buffer] = unused_stream;
271 }
272 }
273
274 tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
275 swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
276 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
277
278 LLVMValueRef args[] = {
279 LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
280 tmp,
281 ctx->i32_0, // ordering
282 ctx->i32_0, // scope
283 ctx->ac.i1false, // isVolatile
284 LLVMConstInt(ctx->i32, 4 << 24, false), // OA index
285 ctx->ac.i1true, // wave release
286 ctx->ac.i1true, // wave done
287 };
288 tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
289 ctx->i32, args, ARRAY_SIZE(args), 0);
290
291 /* Keep offsets in a VGPR for quick retrieval via readlane by
292 * the first wave for bounds checking, and also store in LDS
293 * for retrieval by all waves later. */
294 LLVMBuildStore(builder, tmp, offsets_vgpr);
295
296 tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
297 scratch_offset_basev, "");
298 tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
299 LLVMBuildStore(builder, tmp, tmp2);
300 }
301 ac_build_endif(&ctx->ac, 5210);
302
303 /* Determine the max emit per buffer. This is done via the SALU, in part
304 * because LLVM can't generate divide-by-multiply if we try to do this
305 * via VALU with one lane per buffer.
306 */
307 LLVMValueRef max_emit[4] = {};
308 for (unsigned buffer = 0; buffer < 4; ++buffer) {
309 if (stream_for_buffer[buffer] == -1)
310 continue;
311
312 LLVMValueRef bufsize_dw =
313 LLVMBuildLShr(builder,
314 LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
315 i32_2, "");
316
317 tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
318 LLVMValueRef offset_dw =
319 ac_build_readlane(&ctx->ac, tmp,
320 LLVMConstInt(ctx->i32, buffer, false));
321
322 tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
323 tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
324
325 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
326 max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->i32_0, tmp, "");
327 }
328
329 /* Determine the number of emitted primitives per stream and fixup the
330 * GDS counter if necessary.
331 *
332 * This is complicated by the fact that a single stream can emit to
333 * multiple buffers (but luckily not vice versa).
334 */
335 LLVMValueRef emit_vgpr = ctx->i32_0;
336
337 for (unsigned stream = 0; stream < 4; ++stream) {
338 if (!info->num_stream_output_components[stream])
339 continue;
340
341 tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
342 LLVMValueRef generated =
343 ac_build_readlane(&ctx->ac, tmp,
344 LLVMConstInt(ctx->i32, stream, false));
345
346 LLVMValueRef emit = generated;
347 for (unsigned buffer = 0; buffer < 4; ++buffer) {
348 if (stream_for_buffer[buffer] == stream)
349 emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
350 }
351
352 emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
353 LLVMConstInt(ctx->i32, stream, false));
354
355 /* Fixup the offset using a plain GDS atomic if we overflowed. */
356 tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
357 ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
358 tmp = LLVMBuildLShr(builder,
359 LLVMConstInt(ctx->i32, bufmask_for_stream[stream], false),
360 ac_get_thread_id(&ctx->ac), "");
361 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
362 ac_build_ifcc(&ctx->ac, tmp, 5222);
363 {
364 tmp = LLVMBuildSub(builder, generated, emit, "");
365 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
366 tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
367 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
368 LLVMAtomicOrderingMonotonic, false);
369 }
370 ac_build_endif(&ctx->ac, 5222);
371 ac_build_endif(&ctx->ac, 5221);
372 }
373
374 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
375 ac_build_ifcc(&ctx->ac, tmp, 5225);
376 {
377 tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
378 scratch_emit_basev, "");
379 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
380 LLVMBuildStore(builder, emit_vgpr, tmp);
381 }
382 ac_build_endif(&ctx->ac, 5225);
383 }
384 ac_build_endif(&ctx->ac, 5200);
385
386 /* Determine the workgroup-relative per-thread / primitive offset into
387 * the streamout buffers */
388 struct ac_wg_scan primemit_scan[4] = {};
389
390 if (isgs) {
391 for (unsigned stream = 0; stream < 4; ++stream) {
392 if (!info->num_stream_output_components[stream])
393 continue;
394
395 primemit_scan[stream].enable_exclusive = true;
396 primemit_scan[stream].op = nir_op_iadd;
397 primemit_scan[stream].src = nggso->prim_enable[stream];
398 primemit_scan[stream].scratch =
399 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
400 LLVMConstInt(ctx->i32, 12 + 8 * stream, false));
401 primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
402 primemit_scan[stream].numwaves = get_tgsize(ctx);
403 primemit_scan[stream].maxwaves = 8;
404 ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
405 }
406 }
407
408 ac_build_s_barrier(&ctx->ac);
409
410 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
411 LLVMValueRef wgoffset_dw[4] = {};
412
413 {
414 LLVMValueRef scratch_vgpr;
415
416 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
417 scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
418
419 for (unsigned buffer = 0; buffer < 4; ++buffer) {
420 if (stream_for_buffer[buffer] >= 0) {
421 wgoffset_dw[buffer] = ac_build_readlane(
422 &ctx->ac, scratch_vgpr,
423 LLVMConstInt(ctx->i32, scratch_offset_base + buffer, false));
424 }
425 }
426
427 for (unsigned stream = 0; stream < 4; ++stream) {
428 if (info->num_stream_output_components[stream]) {
429 nggso->emit[stream] = ac_build_readlane(
430 &ctx->ac, scratch_vgpr,
431 LLVMConstInt(ctx->i32, scratch_emit_base + stream, false));
432 }
433 }
434 }
435
436 /* Write out primitive data */
437 for (unsigned stream = 0; stream < 4; ++stream) {
438 if (!info->num_stream_output_components[stream])
439 continue;
440
441 if (isgs) {
442 ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
443 } else {
444 primemit_scan[stream].result_exclusive = tid;
445 }
446
447 tmp = LLVMBuildICmp(builder, LLVMIntULT,
448 primemit_scan[stream].result_exclusive,
449 nggso->emit[stream], "");
450 tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
451 ac_build_ifcc(&ctx->ac, tmp, 5240);
452 {
453 LLVMValueRef offset_vtx =
454 LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
455 nggso->num_vertices, "");
456
457 for (unsigned i = 0; i < max_num_vertices; ++i) {
458 tmp = LLVMBuildICmp(builder, LLVMIntULT,
459 LLVMConstInt(ctx->i32, i, false),
460 nggso->num_vertices, "");
461 ac_build_ifcc(&ctx->ac, tmp, 5241);
462 build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
463 stream, offset_vtx, nggso->vertices[i]);
464 ac_build_endif(&ctx->ac, 5241);
465 offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->i32_1, "");
466 }
467 }
468 ac_build_endif(&ctx->ac, 5240);
469 }
470 }
471
472 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
473 {
474 unsigned lds_vertex_size = 0;
475
476 /* The edgeflag is always stored in the last element that's also
477 * used for padding to reduce LDS bank conflicts. */
478 if (shader->selector->so.num_outputs)
479 lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
480 if (shader->selector->info.writes_edgeflag)
481 lds_vertex_size = MAX2(lds_vertex_size, 1);
482
483 return lds_vertex_size;
484 }
485
486 /**
487 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
488 * for the vertex outputs.
489 */
490 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
491 LLVMValueRef vtxid)
492 {
493 /* The extra dword is used to avoid LDS bank conflicts. */
494 unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
495 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, vertex_size);
496 LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
497 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
498 return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
499 }
500
501 /**
502 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
503 */
504 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
505 unsigned max_outputs,
506 LLVMValueRef *addrs)
507 {
508 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
509 struct si_shader_selector *sel = ctx->shader->selector;
510 struct tgsi_shader_info *info = &sel->info;
511 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
512 LLVMBuilderRef builder = ctx->ac.builder;
513 LLVMValueRef tmp, tmp2;
514
515 assert(!ctx->shader->is_gs_copy_shader);
516 assert(info->num_outputs <= max_outputs);
517
518 LLVMValueRef vertex_ptr = NULL;
519
520 if (sel->so.num_outputs || sel->info.writes_edgeflag)
521 vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
522
523 for (unsigned i = 0; i < info->num_outputs; i++) {
524 outputs[i].semantic_name = info->output_semantic_name[i];
525 outputs[i].semantic_index = info->output_semantic_index[i];
526
527 for (unsigned j = 0; j < 4; j++) {
528 outputs[i].vertex_stream[j] =
529 (info->output_streams[i] >> (2 * j)) & 3;
530
531 /* TODO: we may store more outputs than streamout needs,
532 * but streamout performance isn't that important.
533 */
534 if (sel->so.num_outputs) {
535 tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
536 LLVMConstInt(ctx->i32, 4 * i + j, false));
537 tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
538 tmp2 = ac_to_integer(&ctx->ac, tmp2);
539 LLVMBuildStore(builder, tmp2, tmp);
540 }
541 }
542
543 /* Store the edgeflag at the end (if streamout is enabled) */
544 if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG &&
545 sel->info.writes_edgeflag) {
546 LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
547 /* The output is a float, but the hw expects a 1-bit integer. */
548 edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->i32, "");
549 edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->i32_1);
550
551 tmp = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
552 tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
553 LLVMBuildStore(builder, edgeflag, tmp);
554 }
555 }
556
557 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
558
559 LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
560 LLVMValueRef is_es_thread = si_is_es_thread(ctx);
561 LLVMValueRef vtxindex[] = {
562 si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16),
563 si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16),
564 si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16),
565 };
566
567 /* Determine the number of vertices per primitive. */
568 unsigned num_vertices;
569 LLVMValueRef num_vertices_val;
570
571 if (ctx->type == PIPE_SHADER_VERTEX) {
572 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
573 /* Blits always use axis-aligned rectangles with 3 vertices. */
574 num_vertices = 3;
575 num_vertices_val = LLVMConstInt(ctx->i32, 3, 0);
576 } else {
577 /* Extract OUTPRIM field. */
578 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
579 num_vertices_val = LLVMBuildAdd(builder, tmp, ctx->i32_1, "");
580 num_vertices = 3; /* TODO: optimize for points & lines */
581 }
582 } else {
583 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
584
585 if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
586 num_vertices = 1;
587 else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
588 num_vertices = 2;
589 else
590 num_vertices = 3;
591
592 num_vertices_val = LLVMConstInt(ctx->i32, num_vertices, false);
593 }
594
595 /* Streamout */
596 LLVMValueRef emitted_prims = NULL;
597
598 if (sel->so.num_outputs) {
599 struct ngg_streamout nggso = {};
600
601 nggso.num_vertices = num_vertices_val;
602 nggso.prim_enable[0] = is_gs_thread;
603
604 for (unsigned i = 0; i < num_vertices; ++i)
605 nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
606
607 build_streamout(ctx, &nggso);
608 emitted_prims = nggso.emit[0];
609 }
610
611 LLVMValueRef user_edgeflags[3] = {};
612
613 if (sel->info.writes_edgeflag) {
614 /* Streamout already inserted the barrier, so don't insert it again. */
615 if (!sel->so.num_outputs)
616 ac_build_s_barrier(&ctx->ac);
617
618 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
619 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
620 for (unsigned i = 0; i < num_vertices; i++) {
621 tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
622 tmp2 = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
623 tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
624 tmp = LLVMBuildLoad(builder, tmp, "");
625 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
626
627 user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
628 LLVMBuildStore(builder, tmp, user_edgeflags[i]);
629 }
630 ac_build_endif(&ctx->ac, 5400);
631 }
632
633 /* Copy Primitive IDs from GS threads to the LDS address corresponding
634 * to the ES thread of the provoking vertex.
635 */
636 if (ctx->type == PIPE_SHADER_VERTEX &&
637 ctx->shader->key.mono.u.vs_export_prim_id) {
638 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
639 if (sel->so.num_outputs || sel->info.writes_edgeflag)
640 ac_build_s_barrier(&ctx->ac);
641
642 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
643 /* Extract the PROVOKING_VTX_INDEX field. */
644 LLVMValueRef provoking_vtx_in_prim =
645 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
646
647 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
648 LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
649 LLVMValueRef provoking_vtx_index =
650 LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
651
652 LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
653 ac_build_gep0(&ctx->ac, ctx->esgs_ring, provoking_vtx_index));
654 ac_build_endif(&ctx->ac, 5400);
655 }
656
657 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
658 ngg_get_vtx_cnt(ctx), ngg_get_prim_cnt(ctx));
659
660 /* Update query buffer */
661 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
662 if (ctx->screen->use_ngg_streamout &&
663 !info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
664 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
665 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
666 ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
667 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
668 ac_build_ifcc(&ctx->ac, tmp, 5030);
669 tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
670 sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
671 ac_build_ifcc(&ctx->ac, tmp, 5031);
672 {
673 LLVMValueRef args[] = {
674 ngg_get_prim_cnt(ctx),
675 ngg_get_query_buf(ctx),
676 LLVMConstInt(ctx->i32, 16, false), /* offset of stream[0].generated_primitives */
677 ctx->i32_0, /* soffset */
678 ctx->i32_0, /* cachepolicy */
679 };
680
681 if (sel->so.num_outputs) {
682 args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->i32_1);
683 args[2] = ac_build_writelane(&ctx->ac, args[2],
684 LLVMConstInt(ctx->i32, 24, false), ctx->i32_1);
685 }
686
687 /* TODO: should this be 64-bit atomics? */
688 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
689 ctx->i32, args, 5, 0);
690 }
691 ac_build_endif(&ctx->ac, 5031);
692 ac_build_endif(&ctx->ac, 5030);
693 ac_build_endif(&ctx->ac, 5029);
694 }
695
696 /* Export primitive data to the index buffer. Format is:
697 * - bits 0..8: index 0
698 * - bit 9: edge flag 0
699 * - bits 10..18: index 1
700 * - bit 19: edge flag 1
701 * - bits 20..28: index 2
702 * - bit 29: edge flag 2
703 * - bit 31: null primitive (skip)
704 *
705 * For the first version, we will always build up all three indices
706 * independent of the primitive type. The additional garbage data
707 * shouldn't hurt.
708 *
709 * TODO: culling depends on the primitive type, so can have some
710 * interaction here.
711 */
712 ac_build_ifcc(&ctx->ac, is_gs_thread, 6001);
713 {
714 struct ngg_prim prim = {};
715
716 if (gfx10_is_ngg_passthrough(ctx->shader)) {
717 prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
718 } else {
719 prim.num_vertices = num_vertices;
720 prim.isnull = ctx->ac.i1false;
721 memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
722
723 for (unsigned i = 0; i < num_vertices; ++i) {
724 if (ctx->type != PIPE_SHADER_VERTEX) {
725 prim.edgeflag[i] = ctx->i1false;
726 continue;
727 }
728
729 tmp = LLVMBuildLShr(builder,
730 ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
731 LLVMConstInt(ctx->ac.i32, 8 + i, false), "");
732 prim.edgeflag[i] = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
733
734 if (sel->info.writes_edgeflag) {
735 tmp2 = LLVMBuildLoad(builder, user_edgeflags[i], "");
736 prim.edgeflag[i] = LLVMBuildAnd(builder, prim.edgeflag[i],
737 tmp2, "");
738 }
739 }
740 }
741
742 build_export_prim(ctx, &prim);
743 }
744 ac_build_endif(&ctx->ac, 6001);
745
746 /* Export per-vertex data (positions and parameters). */
747 ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
748 {
749 unsigned i;
750
751 /* Unconditionally (re-)load the values for proper SSA form. */
752 for (i = 0; i < info->num_outputs; i++) {
753 for (unsigned j = 0; j < 4; j++) {
754 outputs[i].values[j] =
755 LLVMBuildLoad(builder,
756 addrs[4 * i + j],
757 "");
758 }
759 }
760
761 if (ctx->shader->key.mono.u.vs_export_prim_id) {
762 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
763 outputs[i].semantic_index = 0;
764
765 if (ctx->type == PIPE_SHADER_VERTEX) {
766 /* Wait for GS stores to finish. */
767 ac_build_s_barrier(&ctx->ac);
768
769 tmp = ac_build_gep0(&ctx->ac, ctx->esgs_ring,
770 get_thread_id_in_tg(ctx));
771 outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
772 } else {
773 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
774 outputs[i].values[0] = si_get_primitive_id(ctx, 0);
775 }
776
777 outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
778 for (unsigned j = 1; j < 4; j++)
779 outputs[i].values[j] = LLVMGetUndef(ctx->f32);
780
781 memset(outputs[i].vertex_stream, 0,
782 sizeof(outputs[i].vertex_stream));
783 i++;
784 }
785
786 si_llvm_export_vs(ctx, outputs, i);
787 }
788 ac_build_endif(&ctx->ac, 6002);
789 }
790
791 static LLVMValueRef
792 ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
793 {
794 const struct si_shader_selector *sel = ctx->shader->selector;
795 const struct tgsi_shader_info *info = &sel->info;
796
797 LLVMTypeRef elements[2] = {
798 LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
799 LLVMArrayType(ctx->ac.i8, 4),
800 };
801 LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
802 type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
803 return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
804 }
805
806 /**
807 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
808 * is in emit order; that is:
809 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
810 * - during vertex emit, i.e. while the API GS shader invocation is running,
811 * N = threadidx * gs_max_out_vertices + emitidx
812 *
813 * Goals of the LDS memory layout:
814 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
815 * in uniform control flow
816 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
817 * culling
818 * 3. Agnostic to the number of waves (since we don't know it before compiling)
819 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
820 * 5. Avoid wasting memory.
821 *
822 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
823 * layout, elimination of bank conflicts requires that each vertex occupy an
824 * odd number of dwords. We use the additional dword to store the output stream
825 * index as well as a flag to indicate whether this vertex ends a primitive
826 * for rasterization.
827 *
828 * Swizzling is required to satisfy points 1 and 2 simultaneously.
829 *
830 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
831 * Indices are swizzled in groups of 32, which ensures point 1 without
832 * disturbing point 2.
833 *
834 * \return an LDS pointer to type {[N x i32], [4 x i8]}
835 */
836 static LLVMValueRef
837 ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
838 {
839 struct si_shader_selector *sel = ctx->shader->selector;
840 LLVMBuilderRef builder = ctx->ac.builder;
841 LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
842
843 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
844 unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
845 if (write_stride_2exp) {
846 LLVMValueRef row =
847 LLVMBuildLShr(builder, vertexidx,
848 LLVMConstInt(ctx->ac.i32, 5, false), "");
849 LLVMValueRef swizzle =
850 LLVMBuildAnd(builder, row,
851 LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
852 false), "");
853 vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
854 }
855
856 return ac_build_gep0(&ctx->ac, storage, vertexidx);
857 }
858
859 static LLVMValueRef
860 ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
861 LLVMValueRef emitidx)
862 {
863 struct si_shader_selector *sel = ctx->shader->selector;
864 LLVMBuilderRef builder = ctx->ac.builder;
865 LLVMValueRef tmp;
866
867 tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
868 tmp = LLVMBuildMul(builder, tmp, gsthread, "");
869 const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
870 return ngg_gs_vertex_ptr(ctx, vertexidx);
871 }
872
873 static LLVMValueRef
874 ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
875 unsigned out_idx)
876 {
877 LLVMValueRef gep_idx[3] = {
878 ctx->ac.i32_0, /* implied C-style array */
879 ctx->ac.i32_0, /* first struct entry */
880 LLVMConstInt(ctx->ac.i32, out_idx, false),
881 };
882 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
883 }
884
885 static LLVMValueRef
886 ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
887 unsigned stream)
888 {
889 LLVMValueRef gep_idx[3] = {
890 ctx->ac.i32_0, /* implied C-style array */
891 ctx->ac.i32_1, /* second struct entry */
892 LLVMConstInt(ctx->ac.i32, stream, false),
893 };
894 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
895 }
896
897 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
898 unsigned stream,
899 LLVMValueRef *addrs)
900 {
901 const struct si_shader_selector *sel = ctx->shader->selector;
902 const struct tgsi_shader_info *info = &sel->info;
903 LLVMBuilderRef builder = ctx->ac.builder;
904 LLVMValueRef tmp;
905 const LLVMValueRef vertexidx =
906 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
907
908 /* If this thread has already emitted the declared maximum number of
909 * vertices, skip the write: excessive vertex emissions are not
910 * supposed to have any effect.
911 */
912 const LLVMValueRef can_emit =
913 LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
914 LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
915
916 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
917 tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
918 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
919
920 ac_build_ifcc(&ctx->ac, can_emit, 9001);
921
922 const LLVMValueRef vertexptr =
923 ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
924 unsigned out_idx = 0;
925 for (unsigned i = 0; i < info->num_outputs; i++) {
926 for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
927 if (!(info->output_usagemask[i] & (1 << chan)) ||
928 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
929 continue;
930
931 LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
932 out_val = ac_to_integer(&ctx->ac, out_val);
933 LLVMBuildStore(builder, out_val,
934 ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
935 }
936 }
937 assert(out_idx * 4 == sel->gsvs_vertex_size);
938
939 /* Determine and store whether this vertex completed a primitive. */
940 const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
941
942 tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
943 const LLVMValueRef iscompleteprim =
944 LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
945
946 /* Since the geometry shader emits triangle strips, we need to
947 * track which primitive is odd and swap vertex indices to get
948 * the correct vertex order.
949 */
950 LLVMValueRef is_odd = ctx->i1false;
951 if (stream == 0 && u_vertices_per_prim(sel->gs_output_prim) == 3) {
952 tmp = LLVMBuildAnd(builder, curverts, ctx->i32_1, "");
953 is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->i32_1, "");
954 }
955
956 tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
957 LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
958
959 /* The per-vertex primitive flag encoding:
960 * bit 0: whether this vertex finishes a primitive
961 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
962 */
963 tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
964 tmp = LLVMBuildOr(builder, tmp,
965 LLVMBuildShl(builder,
966 LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""),
967 ctx->ac.i8_1, ""), "");
968 LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
969
970 tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
971 tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
972 LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
973
974 ac_build_endif(&ctx->ac, 9001);
975 }
976
977 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
978 {
979 /* Zero out the part of LDS scratch that is used to accumulate the
980 * per-stream generated primitive count.
981 */
982 LLVMBuilderRef builder = ctx->ac.builder;
983 LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
984 LLVMValueRef tid = get_thread_id_in_tg(ctx);
985 LLVMValueRef tmp;
986
987 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->i32, 4, false), "");
988 ac_build_ifcc(&ctx->ac, tmp, 5090);
989 {
990 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
991 LLVMBuildStore(builder, ctx->i32_0, ptr);
992 }
993 ac_build_endif(&ctx->ac, 5090);
994
995 ac_build_s_barrier(&ctx->ac);
996 }
997
998 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
999 {
1000 const struct si_shader_selector *sel = ctx->shader->selector;
1001 const struct tgsi_shader_info *info = &sel->info;
1002 const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
1003 LLVMBuilderRef builder = ctx->ac.builder;
1004 LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
1005 LLVMValueRef tmp, tmp2;
1006
1007 /* Zero out remaining (non-emitted) primitive flags.
1008 *
1009 * Note: Alternatively, we could pass the relevant gs_next_vertex to
1010 * the emit threads via LDS. This is likely worse in the expected
1011 * typical case where each GS thread emits the full set of
1012 * vertices.
1013 */
1014 for (unsigned stream = 0; stream < 4; ++stream) {
1015 if (!info->num_stream_output_components[stream])
1016 continue;
1017
1018 const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
1019
1020 ac_build_bgnloop(&ctx->ac, 5100);
1021
1022 const LLVMValueRef vertexidx =
1023 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1024 tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
1025 LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
1026 ac_build_ifcc(&ctx->ac, tmp, 5101);
1027 ac_build_break(&ctx->ac);
1028 ac_build_endif(&ctx->ac, 5101);
1029
1030 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1031 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1032
1033 tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
1034 LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
1035
1036 ac_build_endloop(&ctx->ac, 5100);
1037 }
1038
1039 /* Accumulate generated primitives counts across the entire threadgroup. */
1040 for (unsigned stream = 0; stream < 4; ++stream) {
1041 if (!info->num_stream_output_components[stream])
1042 continue;
1043
1044 LLVMValueRef numprims =
1045 LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1046 numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
1047
1048 tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->i32_0, "");
1049 ac_build_ifcc(&ctx->ac, tmp, 5105);
1050 {
1051 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
1052 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
1053 LLVMConstInt(ctx->i32, stream, false)),
1054 numprims, LLVMAtomicOrderingMonotonic, false);
1055 }
1056 ac_build_endif(&ctx->ac, 5105);
1057 }
1058
1059 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1060
1061 ac_build_s_barrier(&ctx->ac);
1062
1063 const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1064 LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1065
1066 /* Streamout */
1067 if (sel->so.num_outputs) {
1068 struct ngg_streamout nggso = {};
1069
1070 nggso.num_vertices = LLVMConstInt(ctx->i32, verts_per_prim, false);
1071
1072 LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1073 for (unsigned stream = 0; stream < 4; ++stream) {
1074 if (!info->num_stream_output_components[stream])
1075 continue;
1076
1077 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
1078 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1079 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1080 nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1081 }
1082
1083 for (unsigned i = 0; i < verts_per_prim; ++i) {
1084 tmp = LLVMBuildSub(builder, tid,
1085 LLVMConstInt(ctx->i32, verts_per_prim - i - 1, false), "");
1086 tmp = ngg_gs_vertex_ptr(ctx, tmp);
1087 nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
1088 }
1089
1090 build_streamout(ctx, &nggso);
1091 }
1092
1093 /* Write shader query data. */
1094 if (ctx->screen->use_ngg_streamout) {
1095 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1096 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1097 ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1098 unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1099 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1100 LLVMConstInt(ctx->i32, num_query_comps, false), "");
1101 ac_build_ifcc(&ctx->ac, tmp, 5110);
1102 {
1103 LLVMValueRef offset;
1104 tmp = tid;
1105 if (sel->so.num_outputs)
1106 tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->i32, 3, false), "");
1107 offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 32, false), "");
1108 if (sel->so.num_outputs) {
1109 tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->i32, 2, false), "");
1110 tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 8, false), "");
1111 offset = LLVMBuildAdd(builder, offset, tmp, "");
1112 }
1113
1114 tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1115 LLVMValueRef args[] = {
1116 tmp,
1117 ngg_get_query_buf(ctx),
1118 offset,
1119 LLVMConstInt(ctx->i32, 16, false), /* soffset */
1120 ctx->i32_0, /* cachepolicy */
1121 };
1122 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1123 ctx->i32, args, 5, 0);
1124 }
1125 ac_build_endif(&ctx->ac, 5110);
1126 ac_build_endif(&ctx->ac, 5109);
1127 }
1128
1129 /* TODO: culling */
1130
1131 /* Determine vertex liveness. */
1132 LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
1133
1134 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1135 ac_build_ifcc(&ctx->ac, tmp, 5120);
1136 {
1137 for (unsigned i = 0; i < verts_per_prim; ++i) {
1138 const LLVMValueRef primidx =
1139 LLVMBuildAdd(builder, tid,
1140 LLVMConstInt(ctx->ac.i32, i, false), "");
1141
1142 if (i > 0) {
1143 tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1144 ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1145 }
1146
1147 /* Load primitive liveness */
1148 tmp = ngg_gs_vertex_ptr(ctx, primidx);
1149 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1150 const LLVMValueRef primlive =
1151 LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1152
1153 tmp = LLVMBuildLoad(builder, vertliveptr, "");
1154 tmp = LLVMBuildOr(builder, tmp, primlive, ""),
1155 LLVMBuildStore(builder, tmp, vertliveptr);
1156
1157 if (i > 0)
1158 ac_build_endif(&ctx->ac, 5121 + i);
1159 }
1160 }
1161 ac_build_endif(&ctx->ac, 5120);
1162
1163 /* Inclusive scan addition across the current wave. */
1164 LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1165 struct ac_wg_scan vertlive_scan = {};
1166 vertlive_scan.op = nir_op_iadd;
1167 vertlive_scan.enable_reduce = true;
1168 vertlive_scan.enable_exclusive = true;
1169 vertlive_scan.src = vertlive;
1170 vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
1171 vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1172 vertlive_scan.numwaves = get_tgsize(ctx);
1173 vertlive_scan.maxwaves = 8;
1174
1175 ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1176
1177 /* Skip all exports (including index exports) when possible. At least on
1178 * early gfx10 revisions this is also to avoid hangs.
1179 */
1180 LLVMValueRef have_exports =
1181 LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1182 num_emit_threads =
1183 LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1184
1185 /* Allocate export space. Send this message as early as possible, to
1186 * hide the latency of the SQ <-> SPI roundtrip.
1187 *
1188 * Note: We could consider compacting primitives for export as well.
1189 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1190 * prim data per clock and skips null primitives at no additional
1191 * cost. So compacting primitives can only be beneficial when
1192 * there are 4 or more contiguous null primitives in the export
1193 * (in the common case of single-dword prim exports).
1194 */
1195 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
1196 vertlive_scan.result_reduce, num_emit_threads);
1197
1198 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1199 * of the primitive liveness flags, relying on the fact that each
1200 * threadgroup can have at most 256 threads. */
1201 ac_build_ifcc(&ctx->ac, vertlive, 5130);
1202 {
1203 tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1204 tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1205 LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
1206 }
1207 ac_build_endif(&ctx->ac, 5130);
1208
1209 ac_build_s_barrier(&ctx->ac);
1210
1211 /* Export primitive data */
1212 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1213 ac_build_ifcc(&ctx->ac, tmp, 5140);
1214 {
1215 LLVMValueRef flags;
1216 struct ngg_prim prim = {};
1217 prim.num_vertices = verts_per_prim;
1218
1219 tmp = ngg_gs_vertex_ptr(ctx, tid);
1220 flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1221 prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->i1, ""), "");
1222
1223 for (unsigned i = 0; i < verts_per_prim; ++i) {
1224 prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1225 LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1226 prim.edgeflag[i] = ctx->ac.i1false;
1227 }
1228
1229 /* Geometry shaders output triangle strips, but NGG expects triangles.
1230 * We need to change the vertex order for odd triangles to get correct
1231 * front/back facing by swapping 2 vertex indices, but we also have to
1232 * keep the provoking vertex in the same place.
1233 *
1234 * If the first vertex is provoking, swap index 1 and 2.
1235 * If the last vertex is provoking, swap index 0 and 1.
1236 */
1237 if (verts_per_prim == 3) {
1238 LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
1239 is_odd = LLVMBuildTrunc(builder, is_odd, ctx->i1, "");
1240 LLVMValueRef flatshade_first =
1241 LLVMBuildICmp(builder, LLVMIntEQ,
1242 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2),
1243 ctx->i32_0, "");
1244
1245 struct ngg_prim in = prim;
1246 prim.index[0] = LLVMBuildSelect(builder, flatshade_first,
1247 in.index[0],
1248 LLVMBuildSelect(builder, is_odd,
1249 in.index[1], in.index[0], ""), "");
1250 prim.index[1] = LLVMBuildSelect(builder, flatshade_first,
1251 LLVMBuildSelect(builder, is_odd,
1252 in.index[2], in.index[1], ""),
1253 LLVMBuildSelect(builder, is_odd,
1254 in.index[0], in.index[1], ""), "");
1255 prim.index[2] = LLVMBuildSelect(builder, flatshade_first,
1256 LLVMBuildSelect(builder, is_odd,
1257 in.index[1], in.index[2], ""),
1258 in.index[2], "");
1259 }
1260
1261 build_export_prim(ctx, &prim);
1262 }
1263 ac_build_endif(&ctx->ac, 5140);
1264
1265 /* Export position and parameter data */
1266 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1267 ac_build_ifcc(&ctx->ac, tmp, 5145);
1268 {
1269 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1270
1271 tmp = ngg_gs_vertex_ptr(ctx, tid);
1272 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
1273 tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1274 const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1275
1276 unsigned out_idx = 0;
1277 for (unsigned i = 0; i < info->num_outputs; i++) {
1278 outputs[i].semantic_name = info->output_semantic_name[i];
1279 outputs[i].semantic_index = info->output_semantic_index[i];
1280
1281 for (unsigned j = 0; j < 4; j++, out_idx++) {
1282 tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
1283 tmp = LLVMBuildLoad(builder, tmp, "");
1284 outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1285 outputs[i].vertex_stream[j] =
1286 (info->output_streams[i] >> (2 * j)) & 3;
1287 }
1288 }
1289
1290 si_llvm_export_vs(ctx, outputs, info->num_outputs);
1291 }
1292 ac_build_endif(&ctx->ac, 5145);
1293 }
1294
1295 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1296 unsigned min_verts_per_prim, bool use_adjacency)
1297 {
1298 unsigned max_reuse = max_esverts - min_verts_per_prim;
1299 if (use_adjacency)
1300 max_reuse /= 2;
1301 *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1302 }
1303
1304 /**
1305 * Determine subgroup information like maximum number of vertices and prims.
1306 *
1307 * This happens before the shader is uploaded, since LDS relocations during
1308 * upload depend on the subgroup size.
1309 */
1310 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1311 {
1312 const struct si_shader_selector *gs_sel = shader->selector;
1313 const struct si_shader_selector *es_sel =
1314 shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1315 const enum pipe_shader_type gs_type = gs_sel->type;
1316 const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1317 const unsigned input_prim = si_get_input_prim(gs_sel);
1318 const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
1319 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1320 const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1321 const unsigned min_verts_per_prim =
1322 gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1323
1324 /* All these are in dwords: */
1325 /* We can't allow using the whole LDS, because GS waves compete with
1326 * other shader stages for LDS space.
1327 *
1328 * TODO: We should really take the shader's internal LDS use into
1329 * account. The linker will fail if the size is greater than
1330 * 8K dwords.
1331 */
1332 const unsigned max_lds_size = 8 * 1024 - 768;
1333 const unsigned target_lds_size = max_lds_size;
1334 unsigned esvert_lds_size = 0;
1335 unsigned gsprim_lds_size = 0;
1336
1337 /* All these are per subgroup: */
1338 bool max_vert_out_per_gs_instance = false;
1339 unsigned max_esverts_base = 128;
1340 unsigned max_gsprims_base = 128; /* default prim group size clamp */
1341
1342 /* Hardware has the following non-natural restrictions on the value
1343 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1344 * the draw:
1345 * - at most 252 for any line input primitive type
1346 * - at most 251 for any quad input primitive type
1347 * - at most 251 for triangle strips with adjacency (this happens to
1348 * be the natural limit for triangle *lists* with adjacency)
1349 */
1350 max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1351
1352 if (gs_type == PIPE_SHADER_GEOMETRY) {
1353 unsigned max_out_verts_per_gsprim =
1354 gs_sel->gs_max_out_vertices * gs_num_invocations;
1355
1356 if (max_out_verts_per_gsprim <= 256) {
1357 if (max_out_verts_per_gsprim) {
1358 max_gsprims_base = MIN2(max_gsprims_base,
1359 256 / max_out_verts_per_gsprim);
1360 }
1361 } else {
1362 /* Use special multi-cycling mode in which each GS
1363 * instance gets its own subgroup. Does not work with
1364 * tessellation. */
1365 max_vert_out_per_gs_instance = true;
1366 max_gsprims_base = 1;
1367 max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
1368 }
1369
1370 esvert_lds_size = es_sel->esgs_itemsize / 4;
1371 gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1372 } else {
1373 /* VS and TES. */
1374 /* LDS size for passing data from ES to GS. */
1375 esvert_lds_size = ngg_nogs_vertex_size(shader);
1376
1377 /* LDS size for passing data from GS to ES.
1378 * GS stores Primitive IDs into LDS at the address corresponding
1379 * to the ES thread of the provoking vertex. All ES threads
1380 * load and export PrimitiveID for their thread.
1381 */
1382 if (gs_sel->type == PIPE_SHADER_VERTEX &&
1383 shader->key.mono.u.vs_export_prim_id)
1384 esvert_lds_size = MAX2(esvert_lds_size, 1);
1385 }
1386
1387 unsigned max_gsprims = max_gsprims_base;
1388 unsigned max_esverts = max_esverts_base;
1389
1390 if (esvert_lds_size)
1391 max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1392 if (gsprim_lds_size)
1393 max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1394
1395 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1396 clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
1397 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1398
1399 if (esvert_lds_size || gsprim_lds_size) {
1400 /* Now that we have a rough proportionality between esverts
1401 * and gsprims based on the primitive type, scale both of them
1402 * down simultaneously based on required LDS space.
1403 *
1404 * We could be smarter about this if we knew how much vertex
1405 * reuse to expect.
1406 */
1407 unsigned lds_total = max_esverts * esvert_lds_size +
1408 max_gsprims * gsprim_lds_size;
1409 if (lds_total > target_lds_size) {
1410 max_esverts = max_esverts * target_lds_size / lds_total;
1411 max_gsprims = max_gsprims * target_lds_size / lds_total;
1412
1413 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1414 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1415 min_verts_per_prim, use_adjacency);
1416 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1417 }
1418 }
1419
1420 /* Round up towards full wave sizes for better ALU utilization. */
1421 if (!max_vert_out_per_gs_instance) {
1422 const unsigned wavesize = gs_sel->screen->ge_wave_size;
1423 unsigned orig_max_esverts;
1424 unsigned orig_max_gsprims;
1425 do {
1426 orig_max_esverts = max_esverts;
1427 orig_max_gsprims = max_gsprims;
1428
1429 max_esverts = align(max_esverts, wavesize);
1430 max_esverts = MIN2(max_esverts, max_esverts_base);
1431 if (esvert_lds_size)
1432 max_esverts = MIN2(max_esverts,
1433 (max_lds_size - max_gsprims * gsprim_lds_size) /
1434 esvert_lds_size);
1435 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1436
1437 max_gsprims = align(max_gsprims, wavesize);
1438 max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1439 if (gsprim_lds_size)
1440 max_gsprims = MIN2(max_gsprims,
1441 (max_lds_size - max_esverts * esvert_lds_size) /
1442 gsprim_lds_size);
1443 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1444 min_verts_per_prim, use_adjacency);
1445 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1446 } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1447 }
1448
1449 /* Hardware restriction: minimum value of max_esverts */
1450 max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
1451
1452 unsigned max_out_vertices =
1453 max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
1454 gs_type == PIPE_SHADER_GEOMETRY ?
1455 max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
1456 max_esverts;
1457 assert(max_out_vertices <= 256);
1458
1459 unsigned prim_amp_factor = 1;
1460 if (gs_type == PIPE_SHADER_GEOMETRY) {
1461 /* Number of output primitives per GS input primitive after
1462 * GS instancing. */
1463 prim_amp_factor = gs_sel->gs_max_out_vertices;
1464 }
1465
1466 /* The GE only checks against the maximum number of ES verts after
1467 * allocating a full GS primitive. So we need to ensure that whenever
1468 * this check passes, there is enough space for a full primitive without
1469 * vertex reuse.
1470 */
1471 shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1472 shader->ngg.max_gsprims = max_gsprims;
1473 shader->ngg.max_out_verts = max_out_vertices;
1474 shader->ngg.prim_amp_factor = prim_amp_factor;
1475 shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1476
1477 shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
1478 shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
1479
1480 assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
1481 }