2f6f9fe3cc2a384df05df8cee03481b96c44f394
[mesa.git] / src / gallium / drivers / radeonsi / gfx10_shader_ngg.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_shader_internal.h"
26
27 #include "sid.h"
28
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31
32 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
33 {
34 return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
35 }
36
37 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
38 {
39 return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
40 }
41
42 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
43 {
44 LLVMBuilderRef builder = ctx->ac.builder;
45 LLVMValueRef tmp;
46 tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
47 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
48 return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
49 }
50
51 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
52 {
53 return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
54 }
55
56 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
57 {
58 return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
59 }
60
61 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
62 {
63 return si_unpack_param(ctx, ctx->gs_tg_info, 0, 12);
64 }
65
66 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
67 {
68 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
69
70 return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
71 LLVMConstInt(ctx->i32, GFX10_GS_QUERY_BUF, false));
72 }
73
74 static LLVMValueRef ngg_get_initial_edgeflag(struct si_shader_context *ctx, unsigned index)
75 {
76 if (ctx->type == PIPE_SHADER_VERTEX) {
77 LLVMValueRef tmp;
78 tmp = LLVMBuildLShr(ctx->ac.builder,
79 ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
80 LLVMConstInt(ctx->ac.i32, 8 + index, false), "");
81 return LLVMBuildTrunc(ctx->ac.builder, tmp, ctx->ac.i1, "");
82 }
83 return ctx->i1false;
84 }
85
86 /**
87 * Return the number of vertices as a constant in \p num_vertices,
88 * and return a more precise value as LLVMValueRef from the function.
89 */
90 static LLVMValueRef ngg_get_vertices_per_prim(struct si_shader_context *ctx,
91 unsigned *num_vertices)
92 {
93 const struct si_shader_info *info = &ctx->shader->selector->info;
94
95 if (ctx->type == PIPE_SHADER_VERTEX) {
96 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
97 /* Blits always use axis-aligned rectangles with 3 vertices. */
98 *num_vertices = 3;
99 return LLVMConstInt(ctx->i32, 3, 0);
100 } else {
101 /* We always build up all three indices for the prim export
102 * independent of the primitive type. The additional garbage
103 * data shouldn't hurt. This number doesn't matter with
104 * NGG passthrough.
105 */
106 *num_vertices = 3;
107
108 /* Extract OUTPRIM field. */
109 LLVMValueRef num = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
110 return LLVMBuildAdd(ctx->ac.builder, num, ctx->i32_1, "");
111 }
112 } else {
113 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
114
115 if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
116 *num_vertices = 1;
117 else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
118 *num_vertices = 2;
119 else
120 *num_vertices = 3;
121
122 return LLVMConstInt(ctx->i32, *num_vertices, false);
123 }
124 }
125
126 void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx)
127 {
128 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
129 ngg_get_vtx_cnt(ctx),
130 ngg_get_prim_cnt(ctx));
131 }
132
133 static void build_streamout_vertex(struct si_shader_context *ctx,
134 LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
135 unsigned stream, LLVMValueRef offset_vtx,
136 LLVMValueRef vertexptr)
137 {
138 struct si_shader_info *info = &ctx->shader->selector->info;
139 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
140 LLVMBuilderRef builder = ctx->ac.builder;
141 LLVMValueRef offset[4] = {};
142 LLVMValueRef tmp;
143
144 for (unsigned buffer = 0; buffer < 4; ++buffer) {
145 if (!wg_offset_dw[buffer])
146 continue;
147
148 tmp = LLVMBuildMul(builder, offset_vtx,
149 LLVMConstInt(ctx->i32, so->stride[buffer], false), "");
150 tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
151 offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 2, false), "");
152 }
153
154 for (unsigned i = 0; i < so->num_outputs; ++i) {
155 if (so->output[i].stream != stream)
156 continue;
157
158 unsigned reg = so->output[i].register_index;
159 struct si_shader_output_values out;
160 out.semantic_name = info->output_semantic_name[reg];
161 out.semantic_index = info->output_semantic_index[reg];
162
163 for (unsigned comp = 0; comp < 4; comp++) {
164 tmp = ac_build_gep0(&ctx->ac, vertexptr,
165 LLVMConstInt(ctx->i32, 4 * reg + comp, false));
166 out.values[comp] = LLVMBuildLoad(builder, tmp, "");
167 out.vertex_stream[comp] =
168 (info->output_streams[reg] >> (2 * comp)) & 3;
169 }
170
171 si_emit_streamout_output(ctx, so_buffer, offset, &so->output[i], &out);
172 }
173 }
174
175 struct ngg_streamout {
176 LLVMValueRef num_vertices;
177
178 /* per-thread data */
179 LLVMValueRef prim_enable[4]; /* i1 per stream */
180 LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
181
182 /* Output */
183 LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
184 };
185
186 /**
187 * Build streamout logic.
188 *
189 * Implies a barrier.
190 *
191 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
192 *
193 * Clobbers gs_ngg_scratch[8:].
194 */
195 static void build_streamout(struct si_shader_context *ctx,
196 struct ngg_streamout *nggso)
197 {
198 struct si_shader_info *info = &ctx->shader->selector->info;
199 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
200 LLVMBuilderRef builder = ctx->ac.builder;
201 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
202 LLVMValueRef tid = get_thread_id_in_tg(ctx);
203 LLVMValueRef tmp, tmp2;
204 LLVMValueRef i32_2 = LLVMConstInt(ctx->i32, 2, false);
205 LLVMValueRef i32_4 = LLVMConstInt(ctx->i32, 4, false);
206 LLVMValueRef i32_8 = LLVMConstInt(ctx->i32, 8, false);
207 LLVMValueRef so_buffer[4] = {};
208 unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
209 (nggso->vertices[2] ? 1 : 0);
210 LLVMValueRef prim_stride_dw[4] = {};
211 LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->i32);
212 int stream_for_buffer[4] = { -1, -1, -1, -1 };
213 unsigned bufmask_for_stream[4] = {};
214 bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
215 unsigned scratch_emit_base = isgs ? 4 : 0;
216 LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->i32_0;
217 unsigned scratch_offset_base = isgs ? 8 : 4;
218 LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
219
220 ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
221
222 /* Determine the mapping of streamout buffers to vertex streams. */
223 for (unsigned i = 0; i < so->num_outputs; ++i) {
224 unsigned buf = so->output[i].output_buffer;
225 unsigned stream = so->output[i].stream;
226 assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
227 stream_for_buffer[buf] = stream;
228 bufmask_for_stream[stream] |= 1 << buf;
229 }
230
231 for (unsigned buffer = 0; buffer < 4; ++buffer) {
232 if (stream_for_buffer[buffer] == -1)
233 continue;
234
235 assert(so->stride[buffer]);
236
237 tmp = LLVMConstInt(ctx->i32, so->stride[buffer], false);
238 prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
239 prim_stride_dw_vgpr = ac_build_writelane(
240 &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
241 LLVMConstInt(ctx->i32, buffer, false));
242
243 so_buffer[buffer] = ac_build_load_to_sgpr(
244 &ctx->ac, buf_ptr,
245 LLVMConstInt(ctx->i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
246 }
247
248 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->i32_0, "");
249 ac_build_ifcc(&ctx->ac, tmp, 5200);
250 {
251 LLVMTypeRef gdsptr = LLVMPointerType(ctx->i32, AC_ADDR_SPACE_GDS);
252 LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->i32_0, gdsptr, "");
253
254 /* Advance the streamout offsets in GDS. */
255 LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
256 LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
257
258 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
259 ac_build_ifcc(&ctx->ac, tmp, 5210);
260 {
261 if (isgs) {
262 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
263 tmp = LLVMBuildLoad(builder, tmp, "");
264 } else {
265 tmp = ac_build_writelane(&ctx->ac, ctx->i32_0,
266 ngg_get_prim_cnt(ctx), ctx->i32_0);
267 }
268 LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
269
270 unsigned swizzle[4];
271 int unused_stream = -1;
272 for (unsigned stream = 0; stream < 4; ++stream) {
273 if (!info->num_stream_output_components[stream]) {
274 unused_stream = stream;
275 break;
276 }
277 }
278 for (unsigned buffer = 0; buffer < 4; ++buffer) {
279 if (stream_for_buffer[buffer] >= 0) {
280 swizzle[buffer] = stream_for_buffer[buffer];
281 } else {
282 assert(unused_stream >= 0);
283 swizzle[buffer] = unused_stream;
284 }
285 }
286
287 tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
288 swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
289 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
290
291 LLVMValueRef args[] = {
292 LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
293 tmp,
294 ctx->i32_0, // ordering
295 ctx->i32_0, // scope
296 ctx->ac.i1false, // isVolatile
297 LLVMConstInt(ctx->i32, 4 << 24, false), // OA index
298 ctx->ac.i1true, // wave release
299 ctx->ac.i1true, // wave done
300 };
301 tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
302 ctx->i32, args, ARRAY_SIZE(args), 0);
303
304 /* Keep offsets in a VGPR for quick retrieval via readlane by
305 * the first wave for bounds checking, and also store in LDS
306 * for retrieval by all waves later. */
307 LLVMBuildStore(builder, tmp, offsets_vgpr);
308
309 tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
310 scratch_offset_basev, "");
311 tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
312 LLVMBuildStore(builder, tmp, tmp2);
313 }
314 ac_build_endif(&ctx->ac, 5210);
315
316 /* Determine the max emit per buffer. This is done via the SALU, in part
317 * because LLVM can't generate divide-by-multiply if we try to do this
318 * via VALU with one lane per buffer.
319 */
320 LLVMValueRef max_emit[4] = {};
321 for (unsigned buffer = 0; buffer < 4; ++buffer) {
322 if (stream_for_buffer[buffer] == -1)
323 continue;
324
325 LLVMValueRef bufsize_dw =
326 LLVMBuildLShr(builder,
327 LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
328 i32_2, "");
329
330 tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
331 LLVMValueRef offset_dw =
332 ac_build_readlane(&ctx->ac, tmp,
333 LLVMConstInt(ctx->i32, buffer, false));
334
335 tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
336 tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
337
338 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
339 max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->i32_0, tmp, "");
340 }
341
342 /* Determine the number of emitted primitives per stream and fixup the
343 * GDS counter if necessary.
344 *
345 * This is complicated by the fact that a single stream can emit to
346 * multiple buffers (but luckily not vice versa).
347 */
348 LLVMValueRef emit_vgpr = ctx->i32_0;
349
350 for (unsigned stream = 0; stream < 4; ++stream) {
351 if (!info->num_stream_output_components[stream])
352 continue;
353
354 tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
355 LLVMValueRef generated =
356 ac_build_readlane(&ctx->ac, tmp,
357 LLVMConstInt(ctx->i32, stream, false));
358
359 LLVMValueRef emit = generated;
360 for (unsigned buffer = 0; buffer < 4; ++buffer) {
361 if (stream_for_buffer[buffer] == stream)
362 emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
363 }
364
365 emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
366 LLVMConstInt(ctx->i32, stream, false));
367
368 /* Fixup the offset using a plain GDS atomic if we overflowed. */
369 tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
370 ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
371 tmp = LLVMBuildLShr(builder,
372 LLVMConstInt(ctx->i32, bufmask_for_stream[stream], false),
373 ac_get_thread_id(&ctx->ac), "");
374 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
375 ac_build_ifcc(&ctx->ac, tmp, 5222);
376 {
377 tmp = LLVMBuildSub(builder, generated, emit, "");
378 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
379 tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
380 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
381 LLVMAtomicOrderingMonotonic, false);
382 }
383 ac_build_endif(&ctx->ac, 5222);
384 ac_build_endif(&ctx->ac, 5221);
385 }
386
387 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
388 ac_build_ifcc(&ctx->ac, tmp, 5225);
389 {
390 tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
391 scratch_emit_basev, "");
392 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
393 LLVMBuildStore(builder, emit_vgpr, tmp);
394 }
395 ac_build_endif(&ctx->ac, 5225);
396 }
397 ac_build_endif(&ctx->ac, 5200);
398
399 /* Determine the workgroup-relative per-thread / primitive offset into
400 * the streamout buffers */
401 struct ac_wg_scan primemit_scan[4] = {};
402
403 if (isgs) {
404 for (unsigned stream = 0; stream < 4; ++stream) {
405 if (!info->num_stream_output_components[stream])
406 continue;
407
408 primemit_scan[stream].enable_exclusive = true;
409 primemit_scan[stream].op = nir_op_iadd;
410 primemit_scan[stream].src = nggso->prim_enable[stream];
411 primemit_scan[stream].scratch =
412 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
413 LLVMConstInt(ctx->i32, 12 + 8 * stream, false));
414 primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
415 primemit_scan[stream].numwaves = get_tgsize(ctx);
416 primemit_scan[stream].maxwaves = 8;
417 ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
418 }
419 }
420
421 ac_build_s_barrier(&ctx->ac);
422
423 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
424 LLVMValueRef wgoffset_dw[4] = {};
425
426 {
427 LLVMValueRef scratch_vgpr;
428
429 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
430 scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
431
432 for (unsigned buffer = 0; buffer < 4; ++buffer) {
433 if (stream_for_buffer[buffer] >= 0) {
434 wgoffset_dw[buffer] = ac_build_readlane(
435 &ctx->ac, scratch_vgpr,
436 LLVMConstInt(ctx->i32, scratch_offset_base + buffer, false));
437 }
438 }
439
440 for (unsigned stream = 0; stream < 4; ++stream) {
441 if (info->num_stream_output_components[stream]) {
442 nggso->emit[stream] = ac_build_readlane(
443 &ctx->ac, scratch_vgpr,
444 LLVMConstInt(ctx->i32, scratch_emit_base + stream, false));
445 }
446 }
447 }
448
449 /* Write out primitive data */
450 for (unsigned stream = 0; stream < 4; ++stream) {
451 if (!info->num_stream_output_components[stream])
452 continue;
453
454 if (isgs) {
455 ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
456 } else {
457 primemit_scan[stream].result_exclusive = tid;
458 }
459
460 tmp = LLVMBuildICmp(builder, LLVMIntULT,
461 primemit_scan[stream].result_exclusive,
462 nggso->emit[stream], "");
463 tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
464 ac_build_ifcc(&ctx->ac, tmp, 5240);
465 {
466 LLVMValueRef offset_vtx =
467 LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
468 nggso->num_vertices, "");
469
470 for (unsigned i = 0; i < max_num_vertices; ++i) {
471 tmp = LLVMBuildICmp(builder, LLVMIntULT,
472 LLVMConstInt(ctx->i32, i, false),
473 nggso->num_vertices, "");
474 ac_build_ifcc(&ctx->ac, tmp, 5241);
475 build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
476 stream, offset_vtx, nggso->vertices[i]);
477 ac_build_endif(&ctx->ac, 5241);
478 offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->i32_1, "");
479 }
480 }
481 ac_build_endif(&ctx->ac, 5240);
482 }
483 }
484
485 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
486 {
487 unsigned lds_vertex_size = 0;
488
489 /* The edgeflag is always stored in the last element that's also
490 * used for padding to reduce LDS bank conflicts. */
491 if (shader->selector->so.num_outputs)
492 lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
493 if (shader->selector->info.writes_edgeflag)
494 lds_vertex_size = MAX2(lds_vertex_size, 1);
495
496 /* LDS size for passing data from GS to ES.
497 * GS stores Primitive IDs into LDS at the address corresponding
498 * to the ES thread of the provoking vertex. All ES threads
499 * load and export PrimitiveID for their thread.
500 */
501 if (shader->selector->type == PIPE_SHADER_VERTEX &&
502 shader->key.mono.u.vs_export_prim_id)
503 lds_vertex_size = MAX2(lds_vertex_size, 1);
504
505 return lds_vertex_size;
506 }
507
508 /**
509 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
510 * for the vertex outputs.
511 */
512 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
513 LLVMValueRef vtxid)
514 {
515 /* The extra dword is used to avoid LDS bank conflicts. */
516 unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
517 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, vertex_size);
518 LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
519 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
520 return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
521 }
522
523 /**
524 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
525 */
526 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
527 unsigned max_outputs,
528 LLVMValueRef *addrs)
529 {
530 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
531 struct si_shader_selector *sel = ctx->shader->selector;
532 struct si_shader_info *info = &sel->info;
533 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
534 LLVMBuilderRef builder = ctx->ac.builder;
535 LLVMValueRef tmp, tmp2;
536
537 assert(!ctx->shader->is_gs_copy_shader);
538 assert(info->num_outputs <= max_outputs);
539
540 LLVMValueRef vertex_ptr = NULL;
541
542 if (sel->so.num_outputs || sel->info.writes_edgeflag)
543 vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
544
545 for (unsigned i = 0; i < info->num_outputs; i++) {
546 outputs[i].semantic_name = info->output_semantic_name[i];
547 outputs[i].semantic_index = info->output_semantic_index[i];
548
549 for (unsigned j = 0; j < 4; j++) {
550 outputs[i].vertex_stream[j] =
551 (info->output_streams[i] >> (2 * j)) & 3;
552
553 /* TODO: we may store more outputs than streamout needs,
554 * but streamout performance isn't that important.
555 */
556 if (sel->so.num_outputs) {
557 tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
558 LLVMConstInt(ctx->i32, 4 * i + j, false));
559 tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
560 tmp2 = ac_to_integer(&ctx->ac, tmp2);
561 LLVMBuildStore(builder, tmp2, tmp);
562 }
563 }
564
565 /* Store the edgeflag at the end (if streamout is enabled) */
566 if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG &&
567 sel->info.writes_edgeflag) {
568 LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
569 /* The output is a float, but the hw expects a 1-bit integer. */
570 edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->i32, "");
571 edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->i32_1);
572
573 tmp = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
574 tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
575 LLVMBuildStore(builder, edgeflag, tmp);
576 }
577 }
578
579 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
580
581 LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
582 LLVMValueRef is_es_thread = si_is_es_thread(ctx);
583 LLVMValueRef vtxindex[] = {
584 si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16),
585 si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16),
586 si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16),
587 };
588
589 /* Determine the number of vertices per primitive. */
590 unsigned num_vertices;
591 LLVMValueRef num_vertices_val = ngg_get_vertices_per_prim(ctx, &num_vertices);
592
593 /* Streamout */
594 LLVMValueRef emitted_prims = NULL;
595
596 if (sel->so.num_outputs) {
597 struct ngg_streamout nggso = {};
598
599 nggso.num_vertices = num_vertices_val;
600 nggso.prim_enable[0] = is_gs_thread;
601
602 for (unsigned i = 0; i < num_vertices; ++i)
603 nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
604
605 build_streamout(ctx, &nggso);
606 emitted_prims = nggso.emit[0];
607 }
608
609 LLVMValueRef user_edgeflags[3] = {};
610
611 if (sel->info.writes_edgeflag) {
612 /* Streamout already inserted the barrier, so don't insert it again. */
613 if (!sel->so.num_outputs)
614 ac_build_s_barrier(&ctx->ac);
615
616 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
617 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
618 for (unsigned i = 0; i < num_vertices; i++) {
619 tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
620 tmp2 = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
621 tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
622 tmp = LLVMBuildLoad(builder, tmp, "");
623 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
624
625 user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
626 LLVMBuildStore(builder, tmp, user_edgeflags[i]);
627 }
628 ac_build_endif(&ctx->ac, 5400);
629 }
630
631 /* Copy Primitive IDs from GS threads to the LDS address corresponding
632 * to the ES thread of the provoking vertex.
633 */
634 if (ctx->type == PIPE_SHADER_VERTEX &&
635 ctx->shader->key.mono.u.vs_export_prim_id) {
636 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
637 if (sel->so.num_outputs || sel->info.writes_edgeflag)
638 ac_build_s_barrier(&ctx->ac);
639
640 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
641 /* Extract the PROVOKING_VTX_INDEX field. */
642 LLVMValueRef provoking_vtx_in_prim =
643 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
644
645 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
646 LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
647 LLVMValueRef provoking_vtx_index =
648 LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
649 LLVMValueRef vertex_ptr = ngg_nogs_vertex_ptr(ctx, provoking_vtx_index);
650
651 LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
652 ac_build_gep0(&ctx->ac, vertex_ptr, ctx->i32_0));
653 ac_build_endif(&ctx->ac, 5400);
654 }
655
656 /* Update query buffer */
657 if (ctx->screen->use_ngg_streamout &&
658 !info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
659 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
660 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
661 ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
662 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
663 ac_build_ifcc(&ctx->ac, tmp, 5030);
664 tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
665 sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
666 ac_build_ifcc(&ctx->ac, tmp, 5031);
667 {
668 LLVMValueRef args[] = {
669 ngg_get_prim_cnt(ctx),
670 ngg_get_query_buf(ctx),
671 LLVMConstInt(ctx->i32, 16, false), /* offset of stream[0].generated_primitives */
672 ctx->i32_0, /* soffset */
673 ctx->i32_0, /* cachepolicy */
674 };
675
676 if (sel->so.num_outputs) {
677 args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->i32_1);
678 args[2] = ac_build_writelane(&ctx->ac, args[2],
679 LLVMConstInt(ctx->i32, 24, false), ctx->i32_1);
680 }
681
682 /* TODO: should this be 64-bit atomics? */
683 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
684 ctx->i32, args, 5, 0);
685 }
686 ac_build_endif(&ctx->ac, 5031);
687 ac_build_endif(&ctx->ac, 5030);
688 ac_build_endif(&ctx->ac, 5029);
689 }
690
691 /* Build the primitive export. */
692 ac_build_ifcc(&ctx->ac, is_gs_thread, 6001);
693 {
694 struct ac_ngg_prim prim = {};
695
696 if (gfx10_is_ngg_passthrough(ctx->shader)) {
697 prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
698 } else {
699 prim.num_vertices = num_vertices;
700 prim.isnull = ctx->ac.i1false;
701 memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
702
703 for (unsigned i = 0; i < num_vertices; ++i) {
704 prim.edgeflag[i] = ngg_get_initial_edgeflag(ctx, i);
705
706 if (sel->info.writes_edgeflag) {
707 tmp2 = LLVMBuildLoad(builder, user_edgeflags[i], "");
708 prim.edgeflag[i] = LLVMBuildAnd(builder, prim.edgeflag[i],
709 tmp2, "");
710 }
711 }
712 }
713
714 ac_build_export_prim(&ctx->ac, &prim);
715 }
716 ac_build_endif(&ctx->ac, 6001);
717
718 /* Export per-vertex data (positions and parameters). */
719 ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
720 {
721 unsigned i;
722
723 /* Unconditionally (re-)load the values for proper SSA form. */
724 for (i = 0; i < info->num_outputs; i++) {
725 for (unsigned j = 0; j < 4; j++) {
726 outputs[i].values[j] =
727 LLVMBuildLoad(builder,
728 addrs[4 * i + j],
729 "");
730 }
731 }
732
733 if (ctx->shader->key.mono.u.vs_export_prim_id) {
734 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
735 outputs[i].semantic_index = 0;
736
737 if (ctx->type == PIPE_SHADER_VERTEX) {
738 /* Wait for GS stores to finish. */
739 ac_build_s_barrier(&ctx->ac);
740
741 tmp = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
742 tmp = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
743 outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
744 } else {
745 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
746 outputs[i].values[0] = si_get_primitive_id(ctx, 0);
747 }
748
749 outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
750 for (unsigned j = 1; j < 4; j++)
751 outputs[i].values[j] = LLVMGetUndef(ctx->f32);
752
753 memset(outputs[i].vertex_stream, 0,
754 sizeof(outputs[i].vertex_stream));
755 i++;
756 }
757
758 si_llvm_export_vs(ctx, outputs, i);
759 }
760 ac_build_endif(&ctx->ac, 6002);
761 }
762
763 static LLVMValueRef
764 ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
765 {
766 const struct si_shader_selector *sel = ctx->shader->selector;
767 const struct si_shader_info *info = &sel->info;
768
769 LLVMTypeRef elements[2] = {
770 LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
771 LLVMArrayType(ctx->ac.i8, 4),
772 };
773 LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
774 type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
775 return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
776 }
777
778 /**
779 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
780 * is in emit order; that is:
781 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
782 * - during vertex emit, i.e. while the API GS shader invocation is running,
783 * N = threadidx * gs_max_out_vertices + emitidx
784 *
785 * Goals of the LDS memory layout:
786 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
787 * in uniform control flow
788 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
789 * culling
790 * 3. Agnostic to the number of waves (since we don't know it before compiling)
791 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
792 * 5. Avoid wasting memory.
793 *
794 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
795 * layout, elimination of bank conflicts requires that each vertex occupy an
796 * odd number of dwords. We use the additional dword to store the output stream
797 * index as well as a flag to indicate whether this vertex ends a primitive
798 * for rasterization.
799 *
800 * Swizzling is required to satisfy points 1 and 2 simultaneously.
801 *
802 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
803 * Indices are swizzled in groups of 32, which ensures point 1 without
804 * disturbing point 2.
805 *
806 * \return an LDS pointer to type {[N x i32], [4 x i8]}
807 */
808 static LLVMValueRef
809 ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
810 {
811 struct si_shader_selector *sel = ctx->shader->selector;
812 LLVMBuilderRef builder = ctx->ac.builder;
813 LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
814
815 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
816 unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
817 if (write_stride_2exp) {
818 LLVMValueRef row =
819 LLVMBuildLShr(builder, vertexidx,
820 LLVMConstInt(ctx->ac.i32, 5, false), "");
821 LLVMValueRef swizzle =
822 LLVMBuildAnd(builder, row,
823 LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
824 false), "");
825 vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
826 }
827
828 return ac_build_gep0(&ctx->ac, storage, vertexidx);
829 }
830
831 static LLVMValueRef
832 ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
833 LLVMValueRef emitidx)
834 {
835 struct si_shader_selector *sel = ctx->shader->selector;
836 LLVMBuilderRef builder = ctx->ac.builder;
837 LLVMValueRef tmp;
838
839 tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
840 tmp = LLVMBuildMul(builder, tmp, gsthread, "");
841 const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
842 return ngg_gs_vertex_ptr(ctx, vertexidx);
843 }
844
845 static LLVMValueRef
846 ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
847 unsigned out_idx)
848 {
849 LLVMValueRef gep_idx[3] = {
850 ctx->ac.i32_0, /* implied C-style array */
851 ctx->ac.i32_0, /* first struct entry */
852 LLVMConstInt(ctx->ac.i32, out_idx, false),
853 };
854 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
855 }
856
857 static LLVMValueRef
858 ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
859 unsigned stream)
860 {
861 LLVMValueRef gep_idx[3] = {
862 ctx->ac.i32_0, /* implied C-style array */
863 ctx->ac.i32_1, /* second struct entry */
864 LLVMConstInt(ctx->ac.i32, stream, false),
865 };
866 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
867 }
868
869 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
870 unsigned stream,
871 LLVMValueRef *addrs)
872 {
873 const struct si_shader_selector *sel = ctx->shader->selector;
874 const struct si_shader_info *info = &sel->info;
875 LLVMBuilderRef builder = ctx->ac.builder;
876 LLVMValueRef tmp;
877 const LLVMValueRef vertexidx =
878 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
879
880 /* If this thread has already emitted the declared maximum number of
881 * vertices, skip the write: excessive vertex emissions are not
882 * supposed to have any effect.
883 */
884 const LLVMValueRef can_emit =
885 LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
886 LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
887
888 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
889 tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
890 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
891
892 ac_build_ifcc(&ctx->ac, can_emit, 9001);
893
894 const LLVMValueRef vertexptr =
895 ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
896 unsigned out_idx = 0;
897 for (unsigned i = 0; i < info->num_outputs; i++) {
898 for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
899 if (!(info->output_usagemask[i] & (1 << chan)) ||
900 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
901 continue;
902
903 LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
904 out_val = ac_to_integer(&ctx->ac, out_val);
905 LLVMBuildStore(builder, out_val,
906 ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
907 }
908 }
909 assert(out_idx * 4 == sel->gsvs_vertex_size);
910
911 /* Determine and store whether this vertex completed a primitive. */
912 const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
913
914 tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
915 const LLVMValueRef iscompleteprim =
916 LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
917
918 /* Since the geometry shader emits triangle strips, we need to
919 * track which primitive is odd and swap vertex indices to get
920 * the correct vertex order.
921 */
922 LLVMValueRef is_odd = ctx->i1false;
923 if (stream == 0 && u_vertices_per_prim(sel->gs_output_prim) == 3) {
924 tmp = LLVMBuildAnd(builder, curverts, ctx->i32_1, "");
925 is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->i32_1, "");
926 }
927
928 tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
929 LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
930
931 /* The per-vertex primitive flag encoding:
932 * bit 0: whether this vertex finishes a primitive
933 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
934 */
935 tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
936 tmp = LLVMBuildOr(builder, tmp,
937 LLVMBuildShl(builder,
938 LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""),
939 ctx->ac.i8_1, ""), "");
940 LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
941
942 tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
943 tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
944 LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
945
946 ac_build_endif(&ctx->ac, 9001);
947 }
948
949 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
950 {
951 /* Zero out the part of LDS scratch that is used to accumulate the
952 * per-stream generated primitive count.
953 */
954 LLVMBuilderRef builder = ctx->ac.builder;
955 LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
956 LLVMValueRef tid = get_thread_id_in_tg(ctx);
957 LLVMValueRef tmp;
958
959 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->i32, 4, false), "");
960 ac_build_ifcc(&ctx->ac, tmp, 5090);
961 {
962 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
963 LLVMBuildStore(builder, ctx->i32_0, ptr);
964 }
965 ac_build_endif(&ctx->ac, 5090);
966
967 ac_build_s_barrier(&ctx->ac);
968 }
969
970 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
971 {
972 const struct si_shader_selector *sel = ctx->shader->selector;
973 const struct si_shader_info *info = &sel->info;
974 const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
975 LLVMBuilderRef builder = ctx->ac.builder;
976 LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
977 LLVMValueRef tmp, tmp2;
978
979 /* Zero out remaining (non-emitted) primitive flags.
980 *
981 * Note: Alternatively, we could pass the relevant gs_next_vertex to
982 * the emit threads via LDS. This is likely worse in the expected
983 * typical case where each GS thread emits the full set of
984 * vertices.
985 */
986 for (unsigned stream = 0; stream < 4; ++stream) {
987 if (!info->num_stream_output_components[stream])
988 continue;
989
990 const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
991
992 ac_build_bgnloop(&ctx->ac, 5100);
993
994 const LLVMValueRef vertexidx =
995 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
996 tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
997 LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
998 ac_build_ifcc(&ctx->ac, tmp, 5101);
999 ac_build_break(&ctx->ac);
1000 ac_build_endif(&ctx->ac, 5101);
1001
1002 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1003 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1004
1005 tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
1006 LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
1007
1008 ac_build_endloop(&ctx->ac, 5100);
1009 }
1010
1011 /* Accumulate generated primitives counts across the entire threadgroup. */
1012 for (unsigned stream = 0; stream < 4; ++stream) {
1013 if (!info->num_stream_output_components[stream])
1014 continue;
1015
1016 LLVMValueRef numprims =
1017 LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1018 numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
1019
1020 tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->i32_0, "");
1021 ac_build_ifcc(&ctx->ac, tmp, 5105);
1022 {
1023 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
1024 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
1025 LLVMConstInt(ctx->i32, stream, false)),
1026 numprims, LLVMAtomicOrderingMonotonic, false);
1027 }
1028 ac_build_endif(&ctx->ac, 5105);
1029 }
1030
1031 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1032
1033 ac_build_s_barrier(&ctx->ac);
1034
1035 const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1036 LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1037
1038 /* Streamout */
1039 if (sel->so.num_outputs) {
1040 struct ngg_streamout nggso = {};
1041
1042 nggso.num_vertices = LLVMConstInt(ctx->i32, verts_per_prim, false);
1043
1044 LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1045 for (unsigned stream = 0; stream < 4; ++stream) {
1046 if (!info->num_stream_output_components[stream])
1047 continue;
1048
1049 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
1050 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1051 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1052 nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1053 }
1054
1055 for (unsigned i = 0; i < verts_per_prim; ++i) {
1056 tmp = LLVMBuildSub(builder, tid,
1057 LLVMConstInt(ctx->i32, verts_per_prim - i - 1, false), "");
1058 tmp = ngg_gs_vertex_ptr(ctx, tmp);
1059 nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
1060 }
1061
1062 build_streamout(ctx, &nggso);
1063 }
1064
1065 /* Write shader query data. */
1066 if (ctx->screen->use_ngg_streamout) {
1067 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1068 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1069 ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1070 unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1071 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1072 LLVMConstInt(ctx->i32, num_query_comps, false), "");
1073 ac_build_ifcc(&ctx->ac, tmp, 5110);
1074 {
1075 LLVMValueRef offset;
1076 tmp = tid;
1077 if (sel->so.num_outputs)
1078 tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->i32, 3, false), "");
1079 offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 32, false), "");
1080 if (sel->so.num_outputs) {
1081 tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->i32, 2, false), "");
1082 tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 8, false), "");
1083 offset = LLVMBuildAdd(builder, offset, tmp, "");
1084 }
1085
1086 tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1087 LLVMValueRef args[] = {
1088 tmp,
1089 ngg_get_query_buf(ctx),
1090 offset,
1091 LLVMConstInt(ctx->i32, 16, false), /* soffset */
1092 ctx->i32_0, /* cachepolicy */
1093 };
1094 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1095 ctx->i32, args, 5, 0);
1096 }
1097 ac_build_endif(&ctx->ac, 5110);
1098 ac_build_endif(&ctx->ac, 5109);
1099 }
1100
1101 /* Determine vertex liveness. */
1102 LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
1103
1104 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1105 ac_build_ifcc(&ctx->ac, tmp, 5120);
1106 {
1107 for (unsigned i = 0; i < verts_per_prim; ++i) {
1108 const LLVMValueRef primidx =
1109 LLVMBuildAdd(builder, tid,
1110 LLVMConstInt(ctx->ac.i32, i, false), "");
1111
1112 if (i > 0) {
1113 tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1114 ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1115 }
1116
1117 /* Load primitive liveness */
1118 tmp = ngg_gs_vertex_ptr(ctx, primidx);
1119 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1120 const LLVMValueRef primlive =
1121 LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1122
1123 tmp = LLVMBuildLoad(builder, vertliveptr, "");
1124 tmp = LLVMBuildOr(builder, tmp, primlive, ""),
1125 LLVMBuildStore(builder, tmp, vertliveptr);
1126
1127 if (i > 0)
1128 ac_build_endif(&ctx->ac, 5121 + i);
1129 }
1130 }
1131 ac_build_endif(&ctx->ac, 5120);
1132
1133 /* Inclusive scan addition across the current wave. */
1134 LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1135 struct ac_wg_scan vertlive_scan = {};
1136 vertlive_scan.op = nir_op_iadd;
1137 vertlive_scan.enable_reduce = true;
1138 vertlive_scan.enable_exclusive = true;
1139 vertlive_scan.src = vertlive;
1140 vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
1141 vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1142 vertlive_scan.numwaves = get_tgsize(ctx);
1143 vertlive_scan.maxwaves = 8;
1144
1145 ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1146
1147 /* Skip all exports (including index exports) when possible. At least on
1148 * early gfx10 revisions this is also to avoid hangs.
1149 */
1150 LLVMValueRef have_exports =
1151 LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1152 num_emit_threads =
1153 LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1154
1155 /* Allocate export space. Send this message as early as possible, to
1156 * hide the latency of the SQ <-> SPI roundtrip.
1157 *
1158 * Note: We could consider compacting primitives for export as well.
1159 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1160 * prim data per clock and skips null primitives at no additional
1161 * cost. So compacting primitives can only be beneficial when
1162 * there are 4 or more contiguous null primitives in the export
1163 * (in the common case of single-dword prim exports).
1164 */
1165 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
1166 vertlive_scan.result_reduce, num_emit_threads);
1167
1168 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1169 * of the primitive liveness flags, relying on the fact that each
1170 * threadgroup can have at most 256 threads. */
1171 ac_build_ifcc(&ctx->ac, vertlive, 5130);
1172 {
1173 tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1174 tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1175 LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
1176 }
1177 ac_build_endif(&ctx->ac, 5130);
1178
1179 ac_build_s_barrier(&ctx->ac);
1180
1181 /* Export primitive data */
1182 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1183 ac_build_ifcc(&ctx->ac, tmp, 5140);
1184 {
1185 LLVMValueRef flags;
1186 struct ac_ngg_prim prim = {};
1187 prim.num_vertices = verts_per_prim;
1188
1189 tmp = ngg_gs_vertex_ptr(ctx, tid);
1190 flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1191 prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->i1, ""), "");
1192
1193 for (unsigned i = 0; i < verts_per_prim; ++i) {
1194 prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1195 LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1196 prim.edgeflag[i] = ctx->ac.i1false;
1197 }
1198
1199 /* Geometry shaders output triangle strips, but NGG expects triangles.
1200 * We need to change the vertex order for odd triangles to get correct
1201 * front/back facing by swapping 2 vertex indices, but we also have to
1202 * keep the provoking vertex in the same place.
1203 *
1204 * If the first vertex is provoking, swap index 1 and 2.
1205 * If the last vertex is provoking, swap index 0 and 1.
1206 */
1207 if (verts_per_prim == 3) {
1208 LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
1209 is_odd = LLVMBuildTrunc(builder, is_odd, ctx->i1, "");
1210 LLVMValueRef flatshade_first =
1211 LLVMBuildICmp(builder, LLVMIntEQ,
1212 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2),
1213 ctx->i32_0, "");
1214
1215 struct ac_ngg_prim in = prim;
1216 prim.index[0] = LLVMBuildSelect(builder, flatshade_first,
1217 in.index[0],
1218 LLVMBuildSelect(builder, is_odd,
1219 in.index[1], in.index[0], ""), "");
1220 prim.index[1] = LLVMBuildSelect(builder, flatshade_first,
1221 LLVMBuildSelect(builder, is_odd,
1222 in.index[2], in.index[1], ""),
1223 LLVMBuildSelect(builder, is_odd,
1224 in.index[0], in.index[1], ""), "");
1225 prim.index[2] = LLVMBuildSelect(builder, flatshade_first,
1226 LLVMBuildSelect(builder, is_odd,
1227 in.index[1], in.index[2], ""),
1228 in.index[2], "");
1229 }
1230
1231 ac_build_export_prim(&ctx->ac, &prim);
1232 }
1233 ac_build_endif(&ctx->ac, 5140);
1234
1235 /* Export position and parameter data */
1236 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1237 ac_build_ifcc(&ctx->ac, tmp, 5145);
1238 {
1239 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1240
1241 tmp = ngg_gs_vertex_ptr(ctx, tid);
1242 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
1243 tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1244 const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1245
1246 unsigned out_idx = 0;
1247 for (unsigned i = 0; i < info->num_outputs; i++) {
1248 outputs[i].semantic_name = info->output_semantic_name[i];
1249 outputs[i].semantic_index = info->output_semantic_index[i];
1250
1251 for (unsigned j = 0; j < 4; j++, out_idx++) {
1252 tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
1253 tmp = LLVMBuildLoad(builder, tmp, "");
1254 outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1255 outputs[i].vertex_stream[j] =
1256 (info->output_streams[i] >> (2 * j)) & 3;
1257 }
1258 }
1259
1260 si_llvm_export_vs(ctx, outputs, info->num_outputs);
1261 }
1262 ac_build_endif(&ctx->ac, 5145);
1263 }
1264
1265 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1266 unsigned min_verts_per_prim, bool use_adjacency)
1267 {
1268 unsigned max_reuse = max_esverts - min_verts_per_prim;
1269 if (use_adjacency)
1270 max_reuse /= 2;
1271 *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1272 }
1273
1274 /**
1275 * Determine subgroup information like maximum number of vertices and prims.
1276 *
1277 * This happens before the shader is uploaded, since LDS relocations during
1278 * upload depend on the subgroup size.
1279 */
1280 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1281 {
1282 const struct si_shader_selector *gs_sel = shader->selector;
1283 const struct si_shader_selector *es_sel =
1284 shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1285 const enum pipe_shader_type gs_type = gs_sel->type;
1286 const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1287 const unsigned input_prim = si_get_input_prim(gs_sel);
1288 const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
1289 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1290 const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1291 const unsigned min_verts_per_prim =
1292 gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1293
1294 /* All these are in dwords: */
1295 /* We can't allow using the whole LDS, because GS waves compete with
1296 * other shader stages for LDS space.
1297 *
1298 * TODO: We should really take the shader's internal LDS use into
1299 * account. The linker will fail if the size is greater than
1300 * 8K dwords.
1301 */
1302 const unsigned max_lds_size = 8 * 1024 - 768;
1303 const unsigned target_lds_size = max_lds_size;
1304 unsigned esvert_lds_size = 0;
1305 unsigned gsprim_lds_size = 0;
1306
1307 /* All these are per subgroup: */
1308 bool max_vert_out_per_gs_instance = false;
1309 unsigned max_esverts_base = 128;
1310 unsigned max_gsprims_base = 128; /* default prim group size clamp */
1311
1312 /* Hardware has the following non-natural restrictions on the value
1313 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1314 * the draw:
1315 * - at most 252 for any line input primitive type
1316 * - at most 251 for any quad input primitive type
1317 * - at most 251 for triangle strips with adjacency (this happens to
1318 * be the natural limit for triangle *lists* with adjacency)
1319 */
1320 max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1321
1322 if (gs_type == PIPE_SHADER_GEOMETRY) {
1323 unsigned max_out_verts_per_gsprim =
1324 gs_sel->gs_max_out_vertices * gs_num_invocations;
1325
1326 if (max_out_verts_per_gsprim <= 256) {
1327 if (max_out_verts_per_gsprim) {
1328 max_gsprims_base = MIN2(max_gsprims_base,
1329 256 / max_out_verts_per_gsprim);
1330 }
1331 } else {
1332 /* Use special multi-cycling mode in which each GS
1333 * instance gets its own subgroup. Does not work with
1334 * tessellation. */
1335 max_vert_out_per_gs_instance = true;
1336 max_gsprims_base = 1;
1337 max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
1338 }
1339
1340 esvert_lds_size = es_sel->esgs_itemsize / 4;
1341 gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1342 } else {
1343 /* VS and TES. */
1344 /* LDS size for passing data from ES to GS. */
1345 esvert_lds_size = ngg_nogs_vertex_size(shader);
1346 }
1347
1348 unsigned max_gsprims = max_gsprims_base;
1349 unsigned max_esverts = max_esverts_base;
1350
1351 if (esvert_lds_size)
1352 max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1353 if (gsprim_lds_size)
1354 max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1355
1356 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1357 clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
1358 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1359
1360 if (esvert_lds_size || gsprim_lds_size) {
1361 /* Now that we have a rough proportionality between esverts
1362 * and gsprims based on the primitive type, scale both of them
1363 * down simultaneously based on required LDS space.
1364 *
1365 * We could be smarter about this if we knew how much vertex
1366 * reuse to expect.
1367 */
1368 unsigned lds_total = max_esverts * esvert_lds_size +
1369 max_gsprims * gsprim_lds_size;
1370 if (lds_total > target_lds_size) {
1371 max_esverts = max_esverts * target_lds_size / lds_total;
1372 max_gsprims = max_gsprims * target_lds_size / lds_total;
1373
1374 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1375 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1376 min_verts_per_prim, use_adjacency);
1377 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1378 }
1379 }
1380
1381 /* Round up towards full wave sizes for better ALU utilization. */
1382 if (!max_vert_out_per_gs_instance) {
1383 const unsigned wavesize = gs_sel->screen->ge_wave_size;
1384 unsigned orig_max_esverts;
1385 unsigned orig_max_gsprims;
1386 do {
1387 orig_max_esverts = max_esverts;
1388 orig_max_gsprims = max_gsprims;
1389
1390 max_esverts = align(max_esverts, wavesize);
1391 max_esverts = MIN2(max_esverts, max_esverts_base);
1392 if (esvert_lds_size)
1393 max_esverts = MIN2(max_esverts,
1394 (max_lds_size - max_gsprims * gsprim_lds_size) /
1395 esvert_lds_size);
1396 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1397
1398 max_gsprims = align(max_gsprims, wavesize);
1399 max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1400 if (gsprim_lds_size)
1401 max_gsprims = MIN2(max_gsprims,
1402 (max_lds_size - max_esverts * esvert_lds_size) /
1403 gsprim_lds_size);
1404 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1405 min_verts_per_prim, use_adjacency);
1406 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1407 } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1408 }
1409
1410 /* Hardware restriction: minimum value of max_esverts */
1411 max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
1412
1413 unsigned max_out_vertices =
1414 max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
1415 gs_type == PIPE_SHADER_GEOMETRY ?
1416 max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
1417 max_esverts;
1418 assert(max_out_vertices <= 256);
1419
1420 unsigned prim_amp_factor = 1;
1421 if (gs_type == PIPE_SHADER_GEOMETRY) {
1422 /* Number of output primitives per GS input primitive after
1423 * GS instancing. */
1424 prim_amp_factor = gs_sel->gs_max_out_vertices;
1425 }
1426
1427 /* The GE only checks against the maximum number of ES verts after
1428 * allocating a full GS primitive. So we need to ensure that whenever
1429 * this check passes, there is enough space for a full primitive without
1430 * vertex reuse.
1431 */
1432 shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1433 shader->ngg.max_gsprims = max_gsprims;
1434 shader->ngg.max_out_verts = max_out_vertices;
1435 shader->ngg.prim_amp_factor = prim_amp_factor;
1436 shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1437
1438 shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
1439 shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
1440
1441 assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
1442 }