radeonsi/gfx10: implement streamout
[mesa.git] / src / gallium / drivers / radeonsi / gfx10_shader_ngg.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_shader_internal.h"
26
27 #include "sid.h"
28
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31
32 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
33 {
34 return si_unpack_param(ctx, ctx->param_merged_wave_info, 24, 4);
35 }
36
37 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
38 {
39 return si_unpack_param(ctx, ctx->param_merged_wave_info, 28, 4);
40 }
41
42 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
43 {
44 LLVMBuilderRef builder = ctx->ac.builder;
45 LLVMValueRef tmp;
46 tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
47 LLVMConstInt(ctx->ac.i32, 64, false), "");
48 return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
49 }
50
51 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
52 {
53 return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
54 LLVMConstInt(ctx->ac.i32, 12, false),
55 LLVMConstInt(ctx->ac.i32, 9, false),
56 false);
57 }
58
59 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
60 {
61 return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
62 LLVMConstInt(ctx->ac.i32, 22, false),
63 LLVMConstInt(ctx->ac.i32, 9, false),
64 false);
65 }
66
67 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
68 {
69 return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
70 ctx->i32_0,
71 LLVMConstInt(ctx->ac.i32, 11, false),
72 false);
73 }
74
75 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
76 {
77 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
78 ctx->param_rw_buffers);
79
80 return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
81 LLVMConstInt(ctx->i32, GFX10_GS_QUERY_BUF, false));
82 }
83
84 /* Send GS Alloc Req message from the first wave of the group to SPI.
85 * Message payload is:
86 * - bits 0..10: vertices in group
87 * - bits 12..22: primitives in group
88 */
89 static void build_sendmsg_gs_alloc_req(struct si_shader_context *ctx,
90 LLVMValueRef vtx_cnt,
91 LLVMValueRef prim_cnt)
92 {
93 LLVMBuilderRef builder = ctx->ac.builder;
94 LLVMValueRef tmp;
95
96 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
97 ac_build_ifcc(&ctx->ac, tmp, 5020);
98
99 tmp = LLVMBuildShl(builder, prim_cnt, LLVMConstInt(ctx->ac.i32, 12, false),"");
100 tmp = LLVMBuildOr(builder, tmp, vtx_cnt, "");
101 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_ALLOC_REQ, tmp);
102
103 ac_build_endif(&ctx->ac, 5020);
104 }
105
106 struct ngg_prim {
107 unsigned num_vertices;
108 LLVMValueRef isnull;
109 LLVMValueRef index[3];
110 LLVMValueRef edgeflag[3];
111 };
112
113 static void build_export_prim(struct si_shader_context *ctx,
114 const struct ngg_prim *prim)
115 {
116 LLVMBuilderRef builder = ctx->ac.builder;
117 struct ac_export_args args;
118 LLVMValueRef tmp;
119
120 tmp = LLVMBuildZExt(builder, prim->isnull, ctx->ac.i32, "");
121 args.out[0] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 31, false), "");
122
123 for (unsigned i = 0; i < prim->num_vertices; ++i) {
124 tmp = LLVMBuildShl(builder, prim->index[i],
125 LLVMConstInt(ctx->ac.i32, 10 * i, false), "");
126 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
127 tmp = LLVMBuildZExt(builder, prim->edgeflag[i], ctx->ac.i32, "");
128 tmp = LLVMBuildShl(builder, tmp,
129 LLVMConstInt(ctx->ac.i32, 10 * i + 9, false), "");
130 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
131 }
132
133 args.out[0] = LLVMBuildBitCast(builder, args.out[0], ctx->ac.f32, "");
134 args.out[1] = LLVMGetUndef(ctx->ac.f32);
135 args.out[2] = LLVMGetUndef(ctx->ac.f32);
136 args.out[3] = LLVMGetUndef(ctx->ac.f32);
137
138 args.target = V_008DFC_SQ_EXP_PRIM;
139 args.enabled_channels = 1;
140 args.done = true;
141 args.valid_mask = false;
142 args.compr = false;
143
144 ac_build_export(&ctx->ac, &args);
145 }
146
147 static void build_streamout_vertex(struct si_shader_context *ctx,
148 LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
149 unsigned stream, LLVMValueRef offset_vtx,
150 LLVMValueRef vertexptr)
151 {
152 struct tgsi_shader_info *info = &ctx->shader->selector->info;
153 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
154 LLVMBuilderRef builder = ctx->ac.builder;
155 LLVMValueRef offset[4] = {};
156 LLVMValueRef tmp;
157
158 for (unsigned buffer = 0; buffer < 4; ++buffer) {
159 if (!wg_offset_dw[buffer])
160 continue;
161
162 tmp = LLVMBuildMul(builder, offset_vtx,
163 LLVMConstInt(ctx->i32, so->stride[buffer], false), "");
164 tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
165 offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 2, false), "");
166 }
167
168 for (unsigned i = 0; i < so->num_outputs; ++i) {
169 if (so->output[i].stream != stream)
170 continue;
171
172 unsigned reg = so->output[i].register_index;
173 struct si_shader_output_values out;
174 out.semantic_name = info->output_semantic_name[reg];
175 out.semantic_index = info->output_semantic_index[reg];
176
177 for (unsigned comp = 0; comp < 4; comp++) {
178 tmp = ac_build_gep0(&ctx->ac, vertexptr,
179 LLVMConstInt(ctx->i32, 4 * reg + comp, false));
180 out.values[comp] = LLVMBuildLoad(builder, tmp, "");
181 out.vertex_stream[comp] =
182 (info->output_streams[reg] >> (2 * comp)) & 3;
183 }
184
185 si_emit_streamout_output(ctx, so_buffer, offset, &so->output[i], &out);
186 }
187 }
188
189 struct ngg_streamout {
190 LLVMValueRef num_vertices;
191
192 /* per-thread data */
193 LLVMValueRef prim_enable[4]; /* i1 per stream */
194 LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
195
196 /* Output */
197 LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
198 };
199
200 /**
201 * Build streamout logic.
202 *
203 * Implies a barrier.
204 *
205 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
206 *
207 * Clobbers gs_ngg_scratch[8:].
208 */
209 static void build_streamout(struct si_shader_context *ctx,
210 struct ngg_streamout *nggso)
211 {
212 struct tgsi_shader_info *info = &ctx->shader->selector->info;
213 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
214 LLVMBuilderRef builder = ctx->ac.builder;
215 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
216 LLVMValueRef tid = get_thread_id_in_tg(ctx);
217 LLVMValueRef tmp, tmp2;
218 LLVMValueRef i32_2 = LLVMConstInt(ctx->i32, 2, false);
219 LLVMValueRef i32_4 = LLVMConstInt(ctx->i32, 4, false);
220 LLVMValueRef i32_8 = LLVMConstInt(ctx->i32, 8, false);
221 LLVMValueRef so_buffer[4] = {};
222 unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
223 (nggso->vertices[2] ? 1 : 0);
224 LLVMValueRef prim_stride_dw[4] = {};
225 LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->i32);
226 int stream_for_buffer[4] = { -1, -1, -1, -1 };
227 unsigned bufmask_for_stream[4] = {};
228 bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
229 unsigned scratch_emit_base = isgs ? 4 : 0;
230 LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->i32_0;
231 unsigned scratch_offset_base = isgs ? 8 : 4;
232 LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
233
234 /* Determine the mapping of streamout buffers to vertex streams. */
235 for (unsigned i = 0; i < so->num_outputs; ++i) {
236 unsigned buf = so->output[i].output_buffer;
237 unsigned stream = so->output[i].stream;
238 assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
239 stream_for_buffer[buf] = stream;
240 bufmask_for_stream[stream] |= 1 << buf;
241 }
242
243 for (unsigned buffer = 0; buffer < 4; ++buffer) {
244 if (stream_for_buffer[buffer] == -1)
245 continue;
246
247 assert(so->stride[buffer]);
248
249 tmp = LLVMConstInt(ctx->i32, so->stride[buffer], false);
250 prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
251 prim_stride_dw_vgpr = ac_build_writelane(
252 &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
253 LLVMConstInt(ctx->i32, buffer, false));
254
255 so_buffer[buffer] = ac_build_load_to_sgpr(
256 &ctx->ac, buf_ptr,
257 LLVMConstInt(ctx->i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
258 }
259
260 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->i32_0, "");
261 ac_build_ifcc(&ctx->ac, tmp, 5200);
262 {
263 LLVMTypeRef gdsptr = LLVMPointerType(ctx->i32, AC_ADDR_SPACE_GDS);
264 LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->i32_0, gdsptr, "");
265
266 /* Advance the streamout offsets in GDS. */
267 LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
268 LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
269
270 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
271 ac_build_ifcc(&ctx->ac, tmp, 5210);
272 {
273 if (isgs) {
274 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
275 tmp = LLVMBuildLoad(builder, tmp, "");
276 } else {
277 tmp = ac_build_writelane(&ctx->ac, ctx->i32_0,
278 ngg_get_prim_cnt(ctx), ctx->i32_0);
279 }
280 LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
281
282 unsigned swizzle[4];
283 int unused_stream = -1;
284 for (unsigned stream = 0; stream < 4; ++stream) {
285 if (!info->num_stream_output_components[stream]) {
286 unused_stream = stream;
287 break;
288 }
289 }
290 for (unsigned buffer = 0; buffer < 4; ++buffer) {
291 if (stream_for_buffer[buffer] >= 0) {
292 swizzle[buffer] = stream_for_buffer[buffer];
293 } else {
294 assert(unused_stream >= 0);
295 swizzle[buffer] = unused_stream;
296 }
297 }
298
299 tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
300 swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
301 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
302
303 LLVMValueRef args[] = {
304 LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
305 tmp,
306 ctx->i32_0, // ordering
307 ctx->i32_0, // scope
308 ctx->ac.i1false, // isVolatile
309 LLVMConstInt(ctx->i32, 4 << 24, false), // OA index
310 ctx->ac.i1true, // wave release
311 ctx->ac.i1true, // wave done
312 };
313 tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
314 ctx->i32, args, ARRAY_SIZE(args), 0);
315
316 /* Keep offsets in a VGPR for quick retrieval via readlane by
317 * the first wave for bounds checking, and also store in LDS
318 * for retrieval by all waves later. */
319 LLVMBuildStore(builder, tmp, offsets_vgpr);
320
321 tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
322 scratch_offset_basev, "");
323 tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
324 LLVMBuildStore(builder, tmp, tmp2);
325 }
326 ac_build_endif(&ctx->ac, 5210);
327
328 /* Determine the max emit per buffer. This is done via the SALU, in part
329 * because LLVM can't generate divide-by-multiply if we try to do this
330 * via VALU with one lane per buffer.
331 */
332 LLVMValueRef max_emit[4] = {};
333 for (unsigned buffer = 0; buffer < 4; ++buffer) {
334 if (stream_for_buffer[buffer] == -1)
335 continue;
336
337 LLVMValueRef bufsize_dw =
338 LLVMBuildLShr(builder,
339 LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
340 i32_2, "");
341
342 tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
343 LLVMValueRef offset_dw =
344 ac_build_readlane(&ctx->ac, tmp,
345 LLVMConstInt(ctx->i32, buffer, false));
346
347 tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
348 tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
349
350 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
351 max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->i32_0, tmp, "");
352 }
353
354 /* Determine the number of emitted primitives per stream and fixup the
355 * GDS counter if necessary.
356 *
357 * This is complicated by the fact that a single stream can emit to
358 * multiple buffers (but luckily not vice versa).
359 */
360 LLVMValueRef emit_vgpr = ctx->i32_0;
361
362 for (unsigned stream = 0; stream < 4; ++stream) {
363 if (!info->num_stream_output_components[stream])
364 continue;
365
366 tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
367 LLVMValueRef generated =
368 ac_build_readlane(&ctx->ac, tmp,
369 LLVMConstInt(ctx->i32, stream, false));
370
371 LLVMValueRef emit = generated;
372 for (unsigned buffer = 0; buffer < 4; ++buffer) {
373 if (stream_for_buffer[buffer] == stream)
374 emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
375 }
376
377 emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
378 LLVMConstInt(ctx->i32, stream, false));
379
380 /* Fixup the offset using a plain GDS atomic if we overflowed. */
381 tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
382 ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
383 tmp = LLVMBuildLShr(builder,
384 LLVMConstInt(ctx->i32, bufmask_for_stream[stream], false),
385 ac_get_thread_id(&ctx->ac), "");
386 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
387 ac_build_ifcc(&ctx->ac, tmp, 5222);
388 {
389 tmp = LLVMBuildSub(builder, generated, emit, "");
390 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
391 tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
392 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
393 LLVMAtomicOrderingMonotonic, false);
394 }
395 ac_build_endif(&ctx->ac, 5222);
396 ac_build_endif(&ctx->ac, 5221);
397 }
398
399 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
400 ac_build_ifcc(&ctx->ac, tmp, 5225);
401 {
402 tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
403 scratch_emit_basev, "");
404 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
405 LLVMBuildStore(builder, emit_vgpr, tmp);
406 }
407 ac_build_endif(&ctx->ac, 5225);
408 }
409 ac_build_endif(&ctx->ac, 5200);
410
411 /* Determine the workgroup-relative per-thread / primitive offset into
412 * the streamout buffers */
413 struct ac_wg_scan primemit_scan[4] = {};
414
415 if (isgs) {
416 for (unsigned stream = 0; stream < 4; ++stream) {
417 if (!info->num_stream_output_components[stream])
418 continue;
419
420 primemit_scan[stream].enable_exclusive = true;
421 primemit_scan[stream].op = nir_op_iadd;
422 primemit_scan[stream].src = nggso->prim_enable[stream];
423 primemit_scan[stream].scratch =
424 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
425 LLVMConstInt(ctx->i32, 12 + 8 * stream, false));
426 primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
427 primemit_scan[stream].numwaves = get_tgsize(ctx);
428 primemit_scan[stream].maxwaves = 8;
429 ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
430 }
431 }
432
433 ac_build_s_barrier(&ctx->ac);
434
435 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
436 LLVMValueRef wgoffset_dw[4] = {};
437
438 {
439 LLVMValueRef scratch_vgpr;
440
441 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
442 scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
443
444 for (unsigned buffer = 0; buffer < 4; ++buffer) {
445 if (stream_for_buffer[buffer] >= 0) {
446 wgoffset_dw[buffer] = ac_build_readlane(
447 &ctx->ac, scratch_vgpr,
448 LLVMConstInt(ctx->i32, scratch_offset_base + buffer, false));
449 }
450 }
451
452 for (unsigned stream = 0; stream < 4; ++stream) {
453 if (info->num_stream_output_components[stream]) {
454 nggso->emit[stream] = ac_build_readlane(
455 &ctx->ac, scratch_vgpr,
456 LLVMConstInt(ctx->i32, scratch_emit_base + stream, false));
457 }
458 }
459 }
460
461 /* Write out primitive data */
462 for (unsigned stream = 0; stream < 4; ++stream) {
463 if (!info->num_stream_output_components[stream])
464 continue;
465
466 if (isgs) {
467 ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
468 } else {
469 primemit_scan[stream].result_exclusive = tid;
470 }
471
472 tmp = LLVMBuildICmp(builder, LLVMIntULT,
473 primemit_scan[stream].result_exclusive,
474 nggso->emit[stream], "");
475 tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
476 ac_build_ifcc(&ctx->ac, tmp, 5240);
477 {
478 LLVMValueRef offset_vtx =
479 LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
480 nggso->num_vertices, "");
481
482 for (unsigned i = 0; i < max_num_vertices; ++i) {
483 tmp = LLVMBuildICmp(builder, LLVMIntULT,
484 LLVMConstInt(ctx->i32, i, false),
485 nggso->num_vertices, "");
486 ac_build_ifcc(&ctx->ac, tmp, 5241);
487 build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
488 stream, offset_vtx, nggso->vertices[i]);
489 ac_build_endif(&ctx->ac, 5241);
490 offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->i32_1, "");
491 }
492 }
493 ac_build_endif(&ctx->ac, 5240);
494 }
495 }
496
497 /**
498 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
499 * for the vertex outputs.
500 */
501 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
502 LLVMValueRef vtxid)
503 {
504 /* The extra dword is used to avoid LDS bank conflicts. */
505 unsigned vertex_size = 4 * ctx->shader->selector->info.num_outputs + 1;
506 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, vertex_size);
507 LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
508 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
509 return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
510 }
511
512 /**
513 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
514 */
515 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
516 unsigned max_outputs,
517 LLVMValueRef *addrs)
518 {
519 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
520 struct si_shader_selector *sel = ctx->shader->selector;
521 struct tgsi_shader_info *info = &sel->info;
522 struct si_shader_output_values *outputs = NULL;
523 LLVMBuilderRef builder = ctx->ac.builder;
524 struct lp_build_if_state if_state;
525 LLVMValueRef tmp, tmp2;
526
527 assert(!ctx->shader->is_gs_copy_shader);
528 assert(info->num_outputs <= max_outputs);
529
530 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
531
532 LLVMValueRef vertex_ptr = NULL;
533
534 if (sel->so.num_outputs)
535 vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
536
537 for (unsigned i = 0; i < info->num_outputs; i++) {
538 outputs[i].semantic_name = info->output_semantic_name[i];
539 outputs[i].semantic_index = info->output_semantic_index[i];
540
541 /* This is used only by streamout. */
542 for (unsigned j = 0; j < 4; j++) {
543 outputs[i].values[j] =
544 LLVMBuildLoad(builder,
545 addrs[4 * i + j],
546 "");
547 outputs[i].vertex_stream[j] =
548 (info->output_streams[i] >> (2 * j)) & 3;
549
550 if (vertex_ptr) {
551 tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
552 LLVMConstInt(ctx->i32, 4 * i + j, false));
553 tmp2 = ac_to_integer(&ctx->ac, outputs[i].values[j]);
554 LLVMBuildStore(builder, tmp2, tmp);
555 }
556 }
557 }
558
559 lp_build_endif(&ctx->merged_wrap_if_state);
560
561 LLVMValueRef prims_in_wave = si_unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
562 LLVMValueRef vtx_in_wave = si_unpack_param(ctx, ctx->param_merged_wave_info, 0, 8);
563 LLVMValueRef is_gs_thread = LLVMBuildICmp(builder, LLVMIntULT,
564 ac_get_thread_id(&ctx->ac), prims_in_wave, "");
565 LLVMValueRef is_es_thread = LLVMBuildICmp(builder, LLVMIntULT,
566 ac_get_thread_id(&ctx->ac), vtx_in_wave, "");
567 LLVMValueRef vtxindex[] = {
568 si_unpack_param(ctx, ctx->param_gs_vtx01_offset, 0, 16),
569 si_unpack_param(ctx, ctx->param_gs_vtx01_offset, 16, 16),
570 si_unpack_param(ctx, ctx->param_gs_vtx23_offset, 0, 16),
571 };
572
573 /* Determine the number of vertices per primitive. */
574 unsigned num_vertices;
575 LLVMValueRef num_vertices_val;
576
577 if (ctx->type == PIPE_SHADER_VERTEX) {
578 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) {
579 /* Blits always use axis-aligned rectangles with 3 vertices. */
580 num_vertices = 3;
581 num_vertices_val = LLVMConstInt(ctx->i32, 3, 0);
582 } else {
583 /* Extract OUTPRIM field. */
584 tmp = si_unpack_param(ctx, ctx->param_vs_state_bits, 2, 2);
585 num_vertices_val = LLVMBuildAdd(builder, tmp, ctx->i32_1, "");
586 num_vertices = 3; /* TODO: optimize for points & lines */
587 }
588 } else {
589 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
590
591 if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
592 num_vertices = 1;
593 else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
594 num_vertices = 2;
595 else
596 num_vertices = 3;
597
598 num_vertices_val = LLVMConstInt(ctx->i32, num_vertices, false);
599 }
600
601 /* Streamout */
602 LLVMValueRef emitted_prims = NULL;
603
604 if (sel->so.num_outputs) {
605 struct ngg_streamout nggso = {};
606
607 nggso.num_vertices = num_vertices_val;
608 nggso.prim_enable[0] = is_gs_thread;
609
610 for (unsigned i = 0; i < num_vertices; ++i)
611 nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
612
613 build_streamout(ctx, &nggso);
614 emitted_prims = nggso.emit[0];
615 }
616
617 /* TODO: primitive culling */
618
619 build_sendmsg_gs_alloc_req(ctx, ngg_get_vtx_cnt(ctx), ngg_get_prim_cnt(ctx));
620
621 /* Update query buffer */
622 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
623 ac_build_ifcc(&ctx->ac, tmp, 5030);
624 tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
625 sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
626 ac_build_ifcc(&ctx->ac, tmp, 5031);
627 {
628 LLVMValueRef args[] = {
629 ngg_get_prim_cnt(ctx),
630 ngg_get_query_buf(ctx),
631 LLVMConstInt(ctx->i32, 16, false), /* offset of stream[0].generated_primitives */
632 ctx->i32_0, /* soffset */
633 ctx->i32_0, /* cachepolicy */
634 };
635
636 if (sel->so.num_outputs) {
637 args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->i32_1);
638 args[2] = ac_build_writelane(&ctx->ac, args[2],
639 LLVMConstInt(ctx->i32, 24, false), ctx->i32_1);
640 }
641
642 /* TODO: should this be 64-bit atomics? */
643 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
644 ctx->i32, args, 5, 0);
645 }
646 ac_build_endif(&ctx->ac, 5031);
647 ac_build_endif(&ctx->ac, 5030);
648
649 /* Export primitive data to the index buffer. Format is:
650 * - bits 0..8: index 0
651 * - bit 9: edge flag 0
652 * - bits 10..18: index 1
653 * - bit 19: edge flag 1
654 * - bits 20..28: index 2
655 * - bit 29: edge flag 2
656 * - bit 31: null primitive (skip)
657 *
658 * For the first version, we will always build up all three indices
659 * independent of the primitive type. The additional garbage data
660 * shouldn't hurt.
661 *
662 * TODO: culling depends on the primitive type, so can have some
663 * interaction here.
664 */
665 lp_build_if(&if_state, &ctx->gallivm, is_gs_thread);
666 {
667 struct ngg_prim prim = {};
668
669 prim.num_vertices = num_vertices;
670 prim.isnull = ctx->ac.i1false;
671 memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
672
673 for (unsigned i = 0; i < num_vertices; ++i) {
674 tmp = LLVMBuildLShr(builder, ctx->abi.gs_invocation_id,
675 LLVMConstInt(ctx->ac.i32, 8 + i, false), "");
676 prim.edgeflag[i] = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
677 }
678
679 build_export_prim(ctx, &prim);
680 }
681 lp_build_endif(&if_state);
682
683 /* Export per-vertex data (positions and parameters). */
684 lp_build_if(&if_state, &ctx->gallivm, is_es_thread);
685 {
686 unsigned i;
687
688 /* Unconditionally (re-)load the values for proper SSA form. */
689 for (i = 0; i < info->num_outputs; i++) {
690 for (unsigned j = 0; j < 4; j++) {
691 outputs[i].values[j] =
692 LLVMBuildLoad(builder,
693 addrs[4 * i + j],
694 "");
695 }
696 }
697
698 /* TODO: Vertex shaders have to get PrimitiveID from GS VGPRs. */
699 if (ctx->type == PIPE_SHADER_TESS_EVAL &&
700 ctx->shader->key.mono.u.vs_export_prim_id) {
701 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
702 outputs[i].semantic_index = 0;
703 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
704 for (unsigned j = 1; j < 4; j++)
705 outputs[i].values[j] = LLVMGetUndef(ctx->f32);
706
707 memset(outputs[i].vertex_stream, 0,
708 sizeof(outputs[i].vertex_stream));
709 i++;
710 }
711
712 si_llvm_export_vs(ctx, outputs, i);
713 }
714 lp_build_endif(&if_state);
715
716 FREE(outputs);
717 }
718
719 static LLVMValueRef
720 ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
721 {
722 const struct si_shader_selector *sel = ctx->shader->selector;
723 const struct tgsi_shader_info *info = &sel->info;
724
725 LLVMTypeRef elements[2] = {
726 LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
727 LLVMArrayType(ctx->ac.i8, 4),
728 };
729 LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
730 type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
731 return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
732 }
733
734 /**
735 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
736 * is in emit order; that is:
737 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
738 * - during vertex emit, i.e. while the API GS shader invocation is running,
739 * N = threadidx * gs_max_out_vertices + emitidx
740 *
741 * Goals of the LDS memory layout:
742 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
743 * in uniform control flow
744 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
745 * culling
746 * 3. Agnostic to the number of waves (since we don't know it before compiling)
747 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
748 * 5. Avoid wasting memory.
749 *
750 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
751 * layout, elimination of bank conflicts requires that each vertex occupy an
752 * odd number of dwords. We use the additional dword to store the output stream
753 * index as well as a flag to indicate whether this vertex ends a primitive
754 * for rasterization.
755 *
756 * Swizzling is required to satisfy points 1 and 2 simultaneously.
757 *
758 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
759 * Indices are swizzled in groups of 32, which ensures point 1 without
760 * disturbing point 2.
761 *
762 * \return an LDS pointer to type {[N x i32], [4 x i8]}
763 */
764 static LLVMValueRef
765 ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
766 {
767 struct si_shader_selector *sel = ctx->shader->selector;
768 LLVMBuilderRef builder = ctx->ac.builder;
769 LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
770
771 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
772 unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
773 if (write_stride_2exp) {
774 LLVMValueRef row =
775 LLVMBuildLShr(builder, vertexidx,
776 LLVMConstInt(ctx->ac.i32, 5, false), "");
777 LLVMValueRef swizzle =
778 LLVMBuildAnd(builder, row,
779 LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
780 false), "");
781 vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
782 }
783
784 return ac_build_gep0(&ctx->ac, storage, vertexidx);
785 }
786
787 static LLVMValueRef
788 ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
789 LLVMValueRef emitidx)
790 {
791 struct si_shader_selector *sel = ctx->shader->selector;
792 LLVMBuilderRef builder = ctx->ac.builder;
793 LLVMValueRef tmp;
794
795 tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
796 tmp = LLVMBuildMul(builder, tmp, gsthread, "");
797 const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
798 return ngg_gs_vertex_ptr(ctx, vertexidx);
799 }
800
801 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
802 unsigned stream,
803 LLVMValueRef *addrs)
804 {
805 const struct si_shader_selector *sel = ctx->shader->selector;
806 const struct tgsi_shader_info *info = &sel->info;
807 LLVMBuilderRef builder = ctx->ac.builder;
808 struct lp_build_if_state if_state;
809 LLVMValueRef tmp;
810 const LLVMValueRef vertexidx =
811 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
812
813 /* If this thread has already emitted the declared maximum number of
814 * vertices, skip the write: excessive vertex emissions are not
815 * supposed to have any effect.
816 */
817 const LLVMValueRef can_emit =
818 LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
819 LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
820
821 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
822 tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
823 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
824
825 lp_build_if(&if_state, &ctx->gallivm, can_emit);
826
827 const LLVMValueRef vertexptr =
828 ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
829 unsigned out_idx = 0;
830 for (unsigned i = 0; i < info->num_outputs; i++) {
831 for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
832 if (!(info->output_usagemask[i] & (1 << chan)) ||
833 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
834 continue;
835
836 LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
837 LLVMValueRef gep_idx[3] = {
838 ctx->ac.i32_0, /* implied C-style array */
839 ctx->ac.i32_0, /* first entry of struct */
840 LLVMConstInt(ctx->ac.i32, out_idx, false),
841 };
842 LLVMValueRef ptr = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
843
844 out_val = ac_to_integer(&ctx->ac, out_val);
845 LLVMBuildStore(builder, out_val, ptr);
846 }
847 }
848 assert(out_idx * 4 == sel->gsvs_vertex_size);
849
850 /* Determine and store whether this vertex completed a primitive. */
851 const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
852
853 tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
854 const LLVMValueRef iscompleteprim =
855 LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
856
857 tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
858 LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
859
860 LLVMValueRef gep_idx[3] = {
861 ctx->ac.i32_0, /* implied C-style array */
862 ctx->ac.i32_1, /* second struct entry */
863 LLVMConstInt(ctx->ac.i32, stream, false),
864 };
865 const LLVMValueRef primflagptr =
866 LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
867
868 tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
869 LLVMBuildStore(builder, tmp, primflagptr);
870
871 tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
872 tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
873 LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
874
875 lp_build_endif(&if_state);
876 }
877
878 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
879 {
880 /* Zero out the part of LDS scratch that is used to accumulate the
881 * per-stream generated primitive count.
882 */
883 LLVMBuilderRef builder = ctx->ac.builder;
884 LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
885 LLVMValueRef tid = get_thread_id_in_tg(ctx);
886 LLVMValueRef tmp;
887
888 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->i32, 4, false), "");
889 ac_build_ifcc(&ctx->ac, tmp, 5090);
890 {
891 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
892 LLVMBuildStore(builder, ctx->i32_0, ptr);
893 }
894 ac_build_endif(&ctx->ac, 5090);
895
896 ac_build_s_barrier(&ctx->ac);
897 }
898
899 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
900 {
901 const struct si_shader_selector *sel = ctx->shader->selector;
902 const struct tgsi_shader_info *info = &sel->info;
903 const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
904 LLVMBuilderRef builder = ctx->ac.builder;
905 LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
906 LLVMValueRef tmp, tmp2;
907
908 /* Zero out remaining (non-emitted) primitive flags.
909 *
910 * Note: Alternatively, we could pass the relevant gs_next_vertex to
911 * the emit threads via LDS. This is likely worse in the expected
912 * typical case where each GS thread emits the full set of
913 * vertices.
914 */
915 for (unsigned stream = 0; stream < 4; ++stream) {
916 if (!info->num_stream_output_components[stream])
917 continue;
918
919 const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
920
921 ac_build_bgnloop(&ctx->ac, 5100);
922
923 const LLVMValueRef vertexidx =
924 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
925 tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
926 LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
927 ac_build_ifcc(&ctx->ac, tmp, 5101);
928 ac_build_break(&ctx->ac);
929 ac_build_endif(&ctx->ac, 5101);
930
931 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
932 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
933
934 tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
935 LLVMValueRef gep_idx[3] = {
936 ctx->ac.i32_0, /* implied C-style array */
937 ctx->ac.i32_1, /* second entry of struct */
938 LLVMConstInt(ctx->ac.i32, stream, false),
939 };
940 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
941 LLVMBuildStore(builder, i8_0, tmp);
942
943 ac_build_endloop(&ctx->ac, 5100);
944 }
945
946 /* Accumulate generated primitives counts across the entire threadgroup. */
947 for (unsigned stream = 0; stream < 4; ++stream) {
948 if (!info->num_stream_output_components[stream])
949 continue;
950
951 LLVMValueRef numprims =
952 LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
953 numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, 64);
954
955 tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->i32_0, "");
956 ac_build_ifcc(&ctx->ac, tmp, 5105);
957 {
958 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
959 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
960 LLVMConstInt(ctx->i32, stream, false)),
961 numprims, LLVMAtomicOrderingMonotonic, false);
962 }
963 ac_build_endif(&ctx->ac, 5105);
964 }
965
966 lp_build_endif(&ctx->merged_wrap_if_state);
967
968 ac_build_s_barrier(&ctx->ac);
969
970 const LLVMValueRef tid = get_thread_id_in_tg(ctx);
971 LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
972
973 /* Streamout */
974 if (sel->so.num_outputs) {
975 struct ngg_streamout nggso = {};
976
977 nggso.num_vertices = LLVMConstInt(ctx->i32, verts_per_prim, false);
978
979 LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
980 for (unsigned stream = 0; stream < 4; ++stream) {
981 if (!info->num_stream_output_components[stream])
982 continue;
983
984 LLVMValueRef gep_idx[3] = {
985 ctx->i32_0, /* implicit C-style array */
986 ctx->i32_1, /* second value of struct */
987 LLVMConstInt(ctx->i32, stream, false),
988 };
989 tmp = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
990 tmp = LLVMBuildLoad(builder, tmp, "");
991 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
992 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
993 nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
994 }
995
996 for (unsigned i = 0; i < verts_per_prim; ++i) {
997 tmp = LLVMBuildSub(builder, tid,
998 LLVMConstInt(ctx->i32, verts_per_prim - i - 1, false), "");
999 tmp = ngg_gs_vertex_ptr(ctx, tmp);
1000 nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
1001 }
1002
1003 build_streamout(ctx, &nggso);
1004 }
1005
1006 /* Write shader query data. */
1007 unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1008 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1009 LLVMConstInt(ctx->i32, num_query_comps, false), "");
1010 ac_build_ifcc(&ctx->ac, tmp, 5110);
1011 {
1012 LLVMValueRef offset;
1013 tmp = tid;
1014 if (sel->so.num_outputs)
1015 tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->i32, 3, false), "");
1016 offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 32, false), "");
1017 if (sel->so.num_outputs) {
1018 tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->i32, 2, false), "");
1019 tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 8, false), "");
1020 offset = LLVMBuildAdd(builder, offset, tmp, "");
1021 }
1022
1023 tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1024 LLVMValueRef args[] = {
1025 tmp,
1026 ngg_get_query_buf(ctx),
1027 offset,
1028 LLVMConstInt(ctx->i32, 16, false), /* soffset */
1029 ctx->i32_0, /* cachepolicy */
1030 };
1031 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1032 ctx->i32, args, 5, 0);
1033 }
1034 ac_build_endif(&ctx->ac, 5110);
1035
1036 /* TODO: culling */
1037
1038 /* Determine vertex liveness. */
1039 LLVMValueRef vertliveptr = lp_build_alloca(&ctx->gallivm, ctx->ac.i1, "vertexlive");
1040
1041 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1042 ac_build_ifcc(&ctx->ac, tmp, 5120);
1043 {
1044 for (unsigned i = 0; i < verts_per_prim; ++i) {
1045 const LLVMValueRef primidx =
1046 LLVMBuildAdd(builder, tid,
1047 LLVMConstInt(ctx->ac.i32, i, false), "");
1048
1049 if (i > 0) {
1050 tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1051 ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1052 }
1053
1054 /* Load primitive liveness */
1055 tmp = ngg_gs_vertex_ptr(ctx, primidx);
1056 LLVMValueRef gep_idx[3] = {
1057 ctx->ac.i32_0, /* implicit C-style array */
1058 ctx->ac.i32_1, /* second value of struct */
1059 ctx->ac.i32_0, /* stream 0 */
1060 };
1061 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1062 tmp = LLVMBuildLoad(builder, tmp, "");
1063 const LLVMValueRef primlive =
1064 LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1065
1066 tmp = LLVMBuildLoad(builder, vertliveptr, "");
1067 tmp = LLVMBuildOr(builder, tmp, primlive, ""),
1068 LLVMBuildStore(builder, tmp, vertliveptr);
1069
1070 if (i > 0)
1071 ac_build_endif(&ctx->ac, 5121 + i);
1072 }
1073 }
1074 ac_build_endif(&ctx->ac, 5120);
1075
1076 /* Inclusive scan addition across the current wave. */
1077 LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1078 struct ac_wg_scan vertlive_scan = {};
1079 vertlive_scan.op = nir_op_iadd;
1080 vertlive_scan.enable_reduce = true;
1081 vertlive_scan.enable_exclusive = true;
1082 vertlive_scan.src = vertlive;
1083 vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
1084 vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1085 vertlive_scan.numwaves = get_tgsize(ctx);
1086 vertlive_scan.maxwaves = 8;
1087
1088 ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1089
1090 /* Skip all exports (including index exports) when possible. At least on
1091 * early gfx10 revisions this is also to avoid hangs.
1092 */
1093 LLVMValueRef have_exports =
1094 LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1095 num_emit_threads =
1096 LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1097
1098 /* Allocate export space. Send this message as early as possible, to
1099 * hide the latency of the SQ <-> SPI roundtrip.
1100 *
1101 * Note: We could consider compacting primitives for export as well.
1102 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1103 * prim data per clock and skips null primitives at no additional
1104 * cost. So compacting primitives can only be beneficial when
1105 * there are 4 or more contiguous null primitives in the export
1106 * (in the common case of single-dword prim exports).
1107 */
1108 build_sendmsg_gs_alloc_req(ctx, vertlive_scan.result_reduce, num_emit_threads);
1109
1110 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1111 * of the primitive liveness flags, relying on the fact that each
1112 * threadgroup can have at most 256 threads. */
1113 ac_build_ifcc(&ctx->ac, vertlive, 5130);
1114 {
1115 tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1116 LLVMValueRef gep_idx[3] = {
1117 ctx->ac.i32_0, /* implicit C-style array */
1118 ctx->ac.i32_1, /* second value of struct */
1119 ctx->ac.i32_1, /* stream 1 */
1120 };
1121 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1122 tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1123 LLVMBuildStore(builder, tmp2, tmp);
1124 }
1125 ac_build_endif(&ctx->ac, 5130);
1126
1127 ac_build_s_barrier(&ctx->ac);
1128
1129 /* Export primitive data */
1130 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1131 ac_build_ifcc(&ctx->ac, tmp, 5140);
1132 {
1133 struct ngg_prim prim = {};
1134 prim.num_vertices = verts_per_prim;
1135
1136 tmp = ngg_gs_vertex_ptr(ctx, tid);
1137 LLVMValueRef gep_idx[3] = {
1138 ctx->ac.i32_0, /* implicit C-style array */
1139 ctx->ac.i32_1, /* second value of struct */
1140 ctx->ac.i32_0, /* primflag */
1141 };
1142 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1143 tmp = LLVMBuildLoad(builder, tmp, "");
1144 prim.isnull = LLVMBuildICmp(builder, LLVMIntEQ, tmp,
1145 LLVMConstInt(ctx->ac.i8, 0, false), "");
1146
1147 for (unsigned i = 0; i < verts_per_prim; ++i) {
1148 prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1149 LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1150 prim.edgeflag[i] = ctx->ac.i1false;
1151 }
1152
1153 build_export_prim(ctx, &prim);
1154 }
1155 ac_build_endif(&ctx->ac, 5140);
1156
1157 /* Export position and parameter data */
1158 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1159 ac_build_ifcc(&ctx->ac, tmp, 5145);
1160 {
1161 struct si_shader_output_values *outputs = NULL;
1162 outputs = MALLOC(info->num_outputs * sizeof(outputs[0]));
1163
1164 tmp = ngg_gs_vertex_ptr(ctx, tid);
1165 LLVMValueRef gep_idx[3] = {
1166 ctx->ac.i32_0, /* implicit C-style array */
1167 ctx->ac.i32_1, /* second value of struct */
1168 ctx->ac.i32_1, /* stream 1: source data index */
1169 };
1170 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1171 tmp = LLVMBuildLoad(builder, tmp, "");
1172 tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1173 const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1174
1175 unsigned out_idx = 0;
1176 gep_idx[1] = ctx->ac.i32_0;
1177 for (unsigned i = 0; i < info->num_outputs; i++) {
1178 outputs[i].semantic_name = info->output_semantic_name[i];
1179 outputs[i].semantic_index = info->output_semantic_index[i];
1180
1181 for (unsigned j = 0; j < 4; j++, out_idx++) {
1182 gep_idx[2] = LLVMConstInt(ctx->ac.i32, out_idx, false);
1183 tmp = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
1184 tmp = LLVMBuildLoad(builder, tmp, "");
1185 outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1186 outputs[i].vertex_stream[j] =
1187 (info->output_streams[i] >> (2 * j)) & 3;
1188 }
1189 }
1190
1191 si_llvm_export_vs(ctx, outputs, info->num_outputs);
1192
1193 FREE(outputs);
1194 }
1195 ac_build_endif(&ctx->ac, 5145);
1196 }
1197
1198 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1199 unsigned min_verts_per_prim, bool use_adjacency)
1200 {
1201 unsigned max_reuse = max_esverts - min_verts_per_prim;
1202 if (use_adjacency)
1203 max_reuse /= 2;
1204 *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1205 }
1206
1207 /**
1208 * Determine subgroup information like maximum number of vertices and prims.
1209 *
1210 * This happens before the shader is uploaded, since LDS relocations during
1211 * upload depend on the subgroup size.
1212 */
1213 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1214 {
1215 const struct si_shader_selector *gs_sel = shader->selector;
1216 const struct si_shader_selector *es_sel =
1217 shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1218 const enum pipe_shader_type gs_type = gs_sel->type;
1219 const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1220 /* TODO: Specialize for known primitive type without GS. */
1221 const unsigned input_prim = gs_type == PIPE_SHADER_GEOMETRY ?
1222 gs_sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM] :
1223 PIPE_PRIM_TRIANGLES;
1224 const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
1225 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1226 const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1227 const unsigned min_verts_per_prim =
1228 gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1229
1230 /* All these are in dwords: */
1231 /* We can't allow using the whole LDS, because GS waves compete with
1232 * other shader stages for LDS space.
1233 *
1234 * Streamout can increase the ESGS buffer size later on, so be more
1235 * conservative with streamout and use 4K dwords. This may be suboptimal.
1236 *
1237 * Otherwise, use the limit of 7K dwords. The reason is that we need
1238 * to leave some headroom for the max_esverts increase at the end.
1239 *
1240 * TODO: We should really take the shader's internal LDS use into
1241 * account. The linker will fail if the size is greater than
1242 * 8K dwords.
1243 */
1244 const unsigned max_lds_size = (gs_sel->so.num_outputs ? 4 : 7) * 1024 - 128;
1245 const unsigned target_lds_size = max_lds_size;
1246 unsigned esvert_lds_size = 0;
1247 unsigned gsprim_lds_size = 0;
1248
1249 /* All these are per subgroup: */
1250 bool max_vert_out_per_gs_instance = false;
1251 unsigned max_esverts_base = 256;
1252 unsigned max_gsprims_base = 128; /* default prim group size clamp */
1253
1254 /* Hardware has the following non-natural restrictions on the value
1255 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1256 * the draw:
1257 * - at most 252 for any line input primitive type
1258 * - at most 251 for any quad input primitive type
1259 * - at most 251 for triangle strips with adjacency (this happens to
1260 * be the natural limit for triangle *lists* with adjacency)
1261 */
1262 max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1263
1264 if (gs_type == PIPE_SHADER_GEOMETRY) {
1265 unsigned max_out_verts_per_gsprim =
1266 gs_sel->gs_max_out_vertices * gs_num_invocations;
1267
1268 if (max_out_verts_per_gsprim <= 256) {
1269 if (max_out_verts_per_gsprim) {
1270 max_gsprims_base = MIN2(max_gsprims_base,
1271 256 / max_out_verts_per_gsprim);
1272 }
1273 } else {
1274 /* Use special multi-cycling mode in which each GS
1275 * instance gets its own subgroup. Does not work with
1276 * tessellation. */
1277 max_vert_out_per_gs_instance = true;
1278 max_gsprims_base = 1;
1279 max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
1280 }
1281
1282 esvert_lds_size = es_sel->esgs_itemsize / 4;
1283 gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1284 } else {
1285 /* TODO: This needs to be adjusted once LDS use for compaction
1286 * after culling is implemented. */
1287 if (es_sel->so.num_outputs)
1288 esvert_lds_size = 4 * es_sel->info.num_outputs + 1;
1289 }
1290
1291 unsigned max_gsprims = max_gsprims_base;
1292 unsigned max_esverts = max_esverts_base;
1293
1294 if (esvert_lds_size)
1295 max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1296 if (gsprim_lds_size)
1297 max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1298
1299 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1300 clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
1301 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1302
1303 if (esvert_lds_size || gsprim_lds_size) {
1304 /* Now that we have a rough proportionality between esverts
1305 * and gsprims based on the primitive type, scale both of them
1306 * down simultaneously based on required LDS space.
1307 *
1308 * We could be smarter about this if we knew how much vertex
1309 * reuse to expect.
1310 */
1311 unsigned lds_total = max_esverts * esvert_lds_size +
1312 max_gsprims * gsprim_lds_size;
1313 if (lds_total > target_lds_size) {
1314 max_esverts = max_esverts * target_lds_size / lds_total;
1315 max_gsprims = max_gsprims * target_lds_size / lds_total;
1316
1317 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1318 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1319 min_verts_per_prim, use_adjacency);
1320 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1321 }
1322 }
1323
1324 /* Round up towards full wave sizes for better ALU utilization. */
1325 if (!max_vert_out_per_gs_instance) {
1326 const unsigned wavesize = 64;
1327 unsigned orig_max_esverts;
1328 unsigned orig_max_gsprims;
1329 do {
1330 orig_max_esverts = max_esverts;
1331 orig_max_gsprims = max_gsprims;
1332
1333 max_esverts = align(max_esverts, wavesize);
1334 max_esverts = MIN2(max_esverts, max_esverts_base);
1335 if (esvert_lds_size)
1336 max_esverts = MIN2(max_esverts,
1337 (max_lds_size - max_gsprims * gsprim_lds_size) /
1338 esvert_lds_size);
1339 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1340
1341 max_gsprims = align(max_gsprims, wavesize);
1342 max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1343 if (gsprim_lds_size)
1344 max_gsprims = MIN2(max_gsprims,
1345 (max_lds_size - max_esverts * esvert_lds_size) /
1346 gsprim_lds_size);
1347 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1348 min_verts_per_prim, use_adjacency);
1349 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1350 } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1351 }
1352
1353 /* Hardware restriction: minimum value of max_esverts */
1354 max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
1355
1356 unsigned max_out_vertices =
1357 max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
1358 gs_type == PIPE_SHADER_GEOMETRY ?
1359 max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
1360 max_esverts;
1361 assert(max_out_vertices <= 256);
1362
1363 unsigned prim_amp_factor = 1;
1364 if (gs_type == PIPE_SHADER_GEOMETRY) {
1365 /* Number of output primitives per GS input primitive after
1366 * GS instancing. */
1367 prim_amp_factor = gs_sel->gs_max_out_vertices;
1368 }
1369
1370 /* The GE only checks against the maximum number of ES verts after
1371 * allocating a full GS primitive. So we need to ensure that whenever
1372 * this check passes, there is enough space for a full primitive without
1373 * vertex reuse.
1374 */
1375 shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1376 shader->ngg.max_gsprims = max_gsprims;
1377 shader->ngg.max_out_verts = max_out_vertices;
1378 shader->ngg.prim_amp_factor = prim_amp_factor;
1379 shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1380
1381 shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
1382 shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
1383
1384 assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
1385 }