radeonsi/gfx10: separate code for getting edgeflags from the gs_invocation_id VGPR
[mesa.git] / src / gallium / drivers / radeonsi / gfx10_shader_ngg.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_shader_internal.h"
26
27 #include "sid.h"
28
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31
32 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
33 {
34 return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
35 }
36
37 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
38 {
39 return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
40 }
41
42 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
43 {
44 LLVMBuilderRef builder = ctx->ac.builder;
45 LLVMValueRef tmp;
46 tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
47 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
48 return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
49 }
50
51 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
52 {
53 return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
54 }
55
56 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
57 {
58 return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
59 }
60
61 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
62 {
63 return si_unpack_param(ctx, ctx->gs_tg_info, 0, 12);
64 }
65
66 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
67 {
68 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
69
70 return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
71 LLVMConstInt(ctx->i32, GFX10_GS_QUERY_BUF, false));
72 }
73
74 static LLVMValueRef ngg_get_initial_edgeflag(struct si_shader_context *ctx, unsigned index)
75 {
76 if (ctx->type == PIPE_SHADER_VERTEX) {
77 LLVMValueRef tmp;
78 tmp = LLVMBuildLShr(ctx->ac.builder,
79 ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
80 LLVMConstInt(ctx->ac.i32, 8 + index, false), "");
81 return LLVMBuildTrunc(ctx->ac.builder, tmp, ctx->ac.i1, "");
82 }
83 return ctx->i1false;
84 }
85
86 static void build_streamout_vertex(struct si_shader_context *ctx,
87 LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
88 unsigned stream, LLVMValueRef offset_vtx,
89 LLVMValueRef vertexptr)
90 {
91 struct si_shader_info *info = &ctx->shader->selector->info;
92 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
93 LLVMBuilderRef builder = ctx->ac.builder;
94 LLVMValueRef offset[4] = {};
95 LLVMValueRef tmp;
96
97 for (unsigned buffer = 0; buffer < 4; ++buffer) {
98 if (!wg_offset_dw[buffer])
99 continue;
100
101 tmp = LLVMBuildMul(builder, offset_vtx,
102 LLVMConstInt(ctx->i32, so->stride[buffer], false), "");
103 tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
104 offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 2, false), "");
105 }
106
107 for (unsigned i = 0; i < so->num_outputs; ++i) {
108 if (so->output[i].stream != stream)
109 continue;
110
111 unsigned reg = so->output[i].register_index;
112 struct si_shader_output_values out;
113 out.semantic_name = info->output_semantic_name[reg];
114 out.semantic_index = info->output_semantic_index[reg];
115
116 for (unsigned comp = 0; comp < 4; comp++) {
117 tmp = ac_build_gep0(&ctx->ac, vertexptr,
118 LLVMConstInt(ctx->i32, 4 * reg + comp, false));
119 out.values[comp] = LLVMBuildLoad(builder, tmp, "");
120 out.vertex_stream[comp] =
121 (info->output_streams[reg] >> (2 * comp)) & 3;
122 }
123
124 si_emit_streamout_output(ctx, so_buffer, offset, &so->output[i], &out);
125 }
126 }
127
128 struct ngg_streamout {
129 LLVMValueRef num_vertices;
130
131 /* per-thread data */
132 LLVMValueRef prim_enable[4]; /* i1 per stream */
133 LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
134
135 /* Output */
136 LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
137 };
138
139 /**
140 * Build streamout logic.
141 *
142 * Implies a barrier.
143 *
144 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
145 *
146 * Clobbers gs_ngg_scratch[8:].
147 */
148 static void build_streamout(struct si_shader_context *ctx,
149 struct ngg_streamout *nggso)
150 {
151 struct si_shader_info *info = &ctx->shader->selector->info;
152 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
153 LLVMBuilderRef builder = ctx->ac.builder;
154 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
155 LLVMValueRef tid = get_thread_id_in_tg(ctx);
156 LLVMValueRef tmp, tmp2;
157 LLVMValueRef i32_2 = LLVMConstInt(ctx->i32, 2, false);
158 LLVMValueRef i32_4 = LLVMConstInt(ctx->i32, 4, false);
159 LLVMValueRef i32_8 = LLVMConstInt(ctx->i32, 8, false);
160 LLVMValueRef so_buffer[4] = {};
161 unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
162 (nggso->vertices[2] ? 1 : 0);
163 LLVMValueRef prim_stride_dw[4] = {};
164 LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->i32);
165 int stream_for_buffer[4] = { -1, -1, -1, -1 };
166 unsigned bufmask_for_stream[4] = {};
167 bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
168 unsigned scratch_emit_base = isgs ? 4 : 0;
169 LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->i32_0;
170 unsigned scratch_offset_base = isgs ? 8 : 4;
171 LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
172
173 ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
174
175 /* Determine the mapping of streamout buffers to vertex streams. */
176 for (unsigned i = 0; i < so->num_outputs; ++i) {
177 unsigned buf = so->output[i].output_buffer;
178 unsigned stream = so->output[i].stream;
179 assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
180 stream_for_buffer[buf] = stream;
181 bufmask_for_stream[stream] |= 1 << buf;
182 }
183
184 for (unsigned buffer = 0; buffer < 4; ++buffer) {
185 if (stream_for_buffer[buffer] == -1)
186 continue;
187
188 assert(so->stride[buffer]);
189
190 tmp = LLVMConstInt(ctx->i32, so->stride[buffer], false);
191 prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
192 prim_stride_dw_vgpr = ac_build_writelane(
193 &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
194 LLVMConstInt(ctx->i32, buffer, false));
195
196 so_buffer[buffer] = ac_build_load_to_sgpr(
197 &ctx->ac, buf_ptr,
198 LLVMConstInt(ctx->i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
199 }
200
201 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->i32_0, "");
202 ac_build_ifcc(&ctx->ac, tmp, 5200);
203 {
204 LLVMTypeRef gdsptr = LLVMPointerType(ctx->i32, AC_ADDR_SPACE_GDS);
205 LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->i32_0, gdsptr, "");
206
207 /* Advance the streamout offsets in GDS. */
208 LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
209 LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
210
211 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
212 ac_build_ifcc(&ctx->ac, tmp, 5210);
213 {
214 if (isgs) {
215 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
216 tmp = LLVMBuildLoad(builder, tmp, "");
217 } else {
218 tmp = ac_build_writelane(&ctx->ac, ctx->i32_0,
219 ngg_get_prim_cnt(ctx), ctx->i32_0);
220 }
221 LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
222
223 unsigned swizzle[4];
224 int unused_stream = -1;
225 for (unsigned stream = 0; stream < 4; ++stream) {
226 if (!info->num_stream_output_components[stream]) {
227 unused_stream = stream;
228 break;
229 }
230 }
231 for (unsigned buffer = 0; buffer < 4; ++buffer) {
232 if (stream_for_buffer[buffer] >= 0) {
233 swizzle[buffer] = stream_for_buffer[buffer];
234 } else {
235 assert(unused_stream >= 0);
236 swizzle[buffer] = unused_stream;
237 }
238 }
239
240 tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
241 swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
242 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
243
244 LLVMValueRef args[] = {
245 LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
246 tmp,
247 ctx->i32_0, // ordering
248 ctx->i32_0, // scope
249 ctx->ac.i1false, // isVolatile
250 LLVMConstInt(ctx->i32, 4 << 24, false), // OA index
251 ctx->ac.i1true, // wave release
252 ctx->ac.i1true, // wave done
253 };
254 tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
255 ctx->i32, args, ARRAY_SIZE(args), 0);
256
257 /* Keep offsets in a VGPR for quick retrieval via readlane by
258 * the first wave for bounds checking, and also store in LDS
259 * for retrieval by all waves later. */
260 LLVMBuildStore(builder, tmp, offsets_vgpr);
261
262 tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
263 scratch_offset_basev, "");
264 tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
265 LLVMBuildStore(builder, tmp, tmp2);
266 }
267 ac_build_endif(&ctx->ac, 5210);
268
269 /* Determine the max emit per buffer. This is done via the SALU, in part
270 * because LLVM can't generate divide-by-multiply if we try to do this
271 * via VALU with one lane per buffer.
272 */
273 LLVMValueRef max_emit[4] = {};
274 for (unsigned buffer = 0; buffer < 4; ++buffer) {
275 if (stream_for_buffer[buffer] == -1)
276 continue;
277
278 LLVMValueRef bufsize_dw =
279 LLVMBuildLShr(builder,
280 LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
281 i32_2, "");
282
283 tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
284 LLVMValueRef offset_dw =
285 ac_build_readlane(&ctx->ac, tmp,
286 LLVMConstInt(ctx->i32, buffer, false));
287
288 tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
289 tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
290
291 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
292 max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->i32_0, tmp, "");
293 }
294
295 /* Determine the number of emitted primitives per stream and fixup the
296 * GDS counter if necessary.
297 *
298 * This is complicated by the fact that a single stream can emit to
299 * multiple buffers (but luckily not vice versa).
300 */
301 LLVMValueRef emit_vgpr = ctx->i32_0;
302
303 for (unsigned stream = 0; stream < 4; ++stream) {
304 if (!info->num_stream_output_components[stream])
305 continue;
306
307 tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
308 LLVMValueRef generated =
309 ac_build_readlane(&ctx->ac, tmp,
310 LLVMConstInt(ctx->i32, stream, false));
311
312 LLVMValueRef emit = generated;
313 for (unsigned buffer = 0; buffer < 4; ++buffer) {
314 if (stream_for_buffer[buffer] == stream)
315 emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
316 }
317
318 emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
319 LLVMConstInt(ctx->i32, stream, false));
320
321 /* Fixup the offset using a plain GDS atomic if we overflowed. */
322 tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
323 ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
324 tmp = LLVMBuildLShr(builder,
325 LLVMConstInt(ctx->i32, bufmask_for_stream[stream], false),
326 ac_get_thread_id(&ctx->ac), "");
327 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
328 ac_build_ifcc(&ctx->ac, tmp, 5222);
329 {
330 tmp = LLVMBuildSub(builder, generated, emit, "");
331 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
332 tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
333 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
334 LLVMAtomicOrderingMonotonic, false);
335 }
336 ac_build_endif(&ctx->ac, 5222);
337 ac_build_endif(&ctx->ac, 5221);
338 }
339
340 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
341 ac_build_ifcc(&ctx->ac, tmp, 5225);
342 {
343 tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
344 scratch_emit_basev, "");
345 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
346 LLVMBuildStore(builder, emit_vgpr, tmp);
347 }
348 ac_build_endif(&ctx->ac, 5225);
349 }
350 ac_build_endif(&ctx->ac, 5200);
351
352 /* Determine the workgroup-relative per-thread / primitive offset into
353 * the streamout buffers */
354 struct ac_wg_scan primemit_scan[4] = {};
355
356 if (isgs) {
357 for (unsigned stream = 0; stream < 4; ++stream) {
358 if (!info->num_stream_output_components[stream])
359 continue;
360
361 primemit_scan[stream].enable_exclusive = true;
362 primemit_scan[stream].op = nir_op_iadd;
363 primemit_scan[stream].src = nggso->prim_enable[stream];
364 primemit_scan[stream].scratch =
365 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
366 LLVMConstInt(ctx->i32, 12 + 8 * stream, false));
367 primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
368 primemit_scan[stream].numwaves = get_tgsize(ctx);
369 primemit_scan[stream].maxwaves = 8;
370 ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
371 }
372 }
373
374 ac_build_s_barrier(&ctx->ac);
375
376 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
377 LLVMValueRef wgoffset_dw[4] = {};
378
379 {
380 LLVMValueRef scratch_vgpr;
381
382 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
383 scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
384
385 for (unsigned buffer = 0; buffer < 4; ++buffer) {
386 if (stream_for_buffer[buffer] >= 0) {
387 wgoffset_dw[buffer] = ac_build_readlane(
388 &ctx->ac, scratch_vgpr,
389 LLVMConstInt(ctx->i32, scratch_offset_base + buffer, false));
390 }
391 }
392
393 for (unsigned stream = 0; stream < 4; ++stream) {
394 if (info->num_stream_output_components[stream]) {
395 nggso->emit[stream] = ac_build_readlane(
396 &ctx->ac, scratch_vgpr,
397 LLVMConstInt(ctx->i32, scratch_emit_base + stream, false));
398 }
399 }
400 }
401
402 /* Write out primitive data */
403 for (unsigned stream = 0; stream < 4; ++stream) {
404 if (!info->num_stream_output_components[stream])
405 continue;
406
407 if (isgs) {
408 ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
409 } else {
410 primemit_scan[stream].result_exclusive = tid;
411 }
412
413 tmp = LLVMBuildICmp(builder, LLVMIntULT,
414 primemit_scan[stream].result_exclusive,
415 nggso->emit[stream], "");
416 tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
417 ac_build_ifcc(&ctx->ac, tmp, 5240);
418 {
419 LLVMValueRef offset_vtx =
420 LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
421 nggso->num_vertices, "");
422
423 for (unsigned i = 0; i < max_num_vertices; ++i) {
424 tmp = LLVMBuildICmp(builder, LLVMIntULT,
425 LLVMConstInt(ctx->i32, i, false),
426 nggso->num_vertices, "");
427 ac_build_ifcc(&ctx->ac, tmp, 5241);
428 build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
429 stream, offset_vtx, nggso->vertices[i]);
430 ac_build_endif(&ctx->ac, 5241);
431 offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->i32_1, "");
432 }
433 }
434 ac_build_endif(&ctx->ac, 5240);
435 }
436 }
437
438 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
439 {
440 unsigned lds_vertex_size = 0;
441
442 /* The edgeflag is always stored in the last element that's also
443 * used for padding to reduce LDS bank conflicts. */
444 if (shader->selector->so.num_outputs)
445 lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
446 if (shader->selector->info.writes_edgeflag)
447 lds_vertex_size = MAX2(lds_vertex_size, 1);
448
449 return lds_vertex_size;
450 }
451
452 /**
453 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
454 * for the vertex outputs.
455 */
456 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
457 LLVMValueRef vtxid)
458 {
459 /* The extra dword is used to avoid LDS bank conflicts. */
460 unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
461 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, vertex_size);
462 LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
463 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
464 return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
465 }
466
467 /**
468 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
469 */
470 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
471 unsigned max_outputs,
472 LLVMValueRef *addrs)
473 {
474 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
475 struct si_shader_selector *sel = ctx->shader->selector;
476 struct si_shader_info *info = &sel->info;
477 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
478 LLVMBuilderRef builder = ctx->ac.builder;
479 LLVMValueRef tmp, tmp2;
480
481 assert(!ctx->shader->is_gs_copy_shader);
482 assert(info->num_outputs <= max_outputs);
483
484 LLVMValueRef vertex_ptr = NULL;
485
486 if (sel->so.num_outputs || sel->info.writes_edgeflag)
487 vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
488
489 for (unsigned i = 0; i < info->num_outputs; i++) {
490 outputs[i].semantic_name = info->output_semantic_name[i];
491 outputs[i].semantic_index = info->output_semantic_index[i];
492
493 for (unsigned j = 0; j < 4; j++) {
494 outputs[i].vertex_stream[j] =
495 (info->output_streams[i] >> (2 * j)) & 3;
496
497 /* TODO: we may store more outputs than streamout needs,
498 * but streamout performance isn't that important.
499 */
500 if (sel->so.num_outputs) {
501 tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
502 LLVMConstInt(ctx->i32, 4 * i + j, false));
503 tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
504 tmp2 = ac_to_integer(&ctx->ac, tmp2);
505 LLVMBuildStore(builder, tmp2, tmp);
506 }
507 }
508
509 /* Store the edgeflag at the end (if streamout is enabled) */
510 if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG &&
511 sel->info.writes_edgeflag) {
512 LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
513 /* The output is a float, but the hw expects a 1-bit integer. */
514 edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->i32, "");
515 edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->i32_1);
516
517 tmp = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
518 tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
519 LLVMBuildStore(builder, edgeflag, tmp);
520 }
521 }
522
523 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
524
525 LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
526 LLVMValueRef is_es_thread = si_is_es_thread(ctx);
527 LLVMValueRef vtxindex[] = {
528 si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16),
529 si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16),
530 si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16),
531 };
532
533 /* Determine the number of vertices per primitive. */
534 unsigned num_vertices;
535 LLVMValueRef num_vertices_val;
536
537 if (ctx->type == PIPE_SHADER_VERTEX) {
538 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
539 /* Blits always use axis-aligned rectangles with 3 vertices. */
540 num_vertices = 3;
541 num_vertices_val = LLVMConstInt(ctx->i32, 3, 0);
542 } else {
543 /* Extract OUTPRIM field. */
544 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
545 num_vertices_val = LLVMBuildAdd(builder, tmp, ctx->i32_1, "");
546 num_vertices = 3; /* TODO: optimize for points & lines */
547 }
548 } else {
549 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
550
551 if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
552 num_vertices = 1;
553 else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
554 num_vertices = 2;
555 else
556 num_vertices = 3;
557
558 num_vertices_val = LLVMConstInt(ctx->i32, num_vertices, false);
559 }
560
561 /* Streamout */
562 LLVMValueRef emitted_prims = NULL;
563
564 if (sel->so.num_outputs) {
565 struct ngg_streamout nggso = {};
566
567 nggso.num_vertices = num_vertices_val;
568 nggso.prim_enable[0] = is_gs_thread;
569
570 for (unsigned i = 0; i < num_vertices; ++i)
571 nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
572
573 build_streamout(ctx, &nggso);
574 emitted_prims = nggso.emit[0];
575 }
576
577 LLVMValueRef user_edgeflags[3] = {};
578
579 if (sel->info.writes_edgeflag) {
580 /* Streamout already inserted the barrier, so don't insert it again. */
581 if (!sel->so.num_outputs)
582 ac_build_s_barrier(&ctx->ac);
583
584 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
585 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
586 for (unsigned i = 0; i < num_vertices; i++) {
587 tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
588 tmp2 = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
589 tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
590 tmp = LLVMBuildLoad(builder, tmp, "");
591 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
592
593 user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
594 LLVMBuildStore(builder, tmp, user_edgeflags[i]);
595 }
596 ac_build_endif(&ctx->ac, 5400);
597 }
598
599 /* Copy Primitive IDs from GS threads to the LDS address corresponding
600 * to the ES thread of the provoking vertex.
601 */
602 if (ctx->type == PIPE_SHADER_VERTEX &&
603 ctx->shader->key.mono.u.vs_export_prim_id) {
604 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
605 if (sel->so.num_outputs || sel->info.writes_edgeflag)
606 ac_build_s_barrier(&ctx->ac);
607
608 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
609 /* Extract the PROVOKING_VTX_INDEX field. */
610 LLVMValueRef provoking_vtx_in_prim =
611 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
612
613 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
614 LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
615 LLVMValueRef provoking_vtx_index =
616 LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
617
618 LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
619 ac_build_gep0(&ctx->ac, ctx->esgs_ring, provoking_vtx_index));
620 ac_build_endif(&ctx->ac, 5400);
621 }
622
623 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
624 ngg_get_vtx_cnt(ctx), ngg_get_prim_cnt(ctx));
625
626 /* Update query buffer */
627 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
628 if (ctx->screen->use_ngg_streamout &&
629 !info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
630 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
631 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
632 ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
633 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
634 ac_build_ifcc(&ctx->ac, tmp, 5030);
635 tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
636 sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
637 ac_build_ifcc(&ctx->ac, tmp, 5031);
638 {
639 LLVMValueRef args[] = {
640 ngg_get_prim_cnt(ctx),
641 ngg_get_query_buf(ctx),
642 LLVMConstInt(ctx->i32, 16, false), /* offset of stream[0].generated_primitives */
643 ctx->i32_0, /* soffset */
644 ctx->i32_0, /* cachepolicy */
645 };
646
647 if (sel->so.num_outputs) {
648 args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->i32_1);
649 args[2] = ac_build_writelane(&ctx->ac, args[2],
650 LLVMConstInt(ctx->i32, 24, false), ctx->i32_1);
651 }
652
653 /* TODO: should this be 64-bit atomics? */
654 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
655 ctx->i32, args, 5, 0);
656 }
657 ac_build_endif(&ctx->ac, 5031);
658 ac_build_endif(&ctx->ac, 5030);
659 ac_build_endif(&ctx->ac, 5029);
660 }
661
662 /* Build the primitive export.
663 *
664 * For the first version, we will always build up all three indices
665 * independent of the primitive type. The additional garbage data
666 * shouldn't hurt.
667 *
668 * TODO: culling depends on the primitive type, so can have some
669 * interaction here.
670 */
671 ac_build_ifcc(&ctx->ac, is_gs_thread, 6001);
672 {
673 struct ac_ngg_prim prim = {};
674
675 if (gfx10_is_ngg_passthrough(ctx->shader)) {
676 prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
677 } else {
678 prim.num_vertices = num_vertices;
679 prim.isnull = ctx->ac.i1false;
680 memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
681
682 for (unsigned i = 0; i < num_vertices; ++i) {
683 prim.edgeflag[i] = ngg_get_initial_edgeflag(ctx, i);
684
685 if (sel->info.writes_edgeflag) {
686 tmp2 = LLVMBuildLoad(builder, user_edgeflags[i], "");
687 prim.edgeflag[i] = LLVMBuildAnd(builder, prim.edgeflag[i],
688 tmp2, "");
689 }
690 }
691 }
692
693 ac_build_export_prim(&ctx->ac, &prim);
694 }
695 ac_build_endif(&ctx->ac, 6001);
696
697 /* Export per-vertex data (positions and parameters). */
698 ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
699 {
700 unsigned i;
701
702 /* Unconditionally (re-)load the values for proper SSA form. */
703 for (i = 0; i < info->num_outputs; i++) {
704 for (unsigned j = 0; j < 4; j++) {
705 outputs[i].values[j] =
706 LLVMBuildLoad(builder,
707 addrs[4 * i + j],
708 "");
709 }
710 }
711
712 if (ctx->shader->key.mono.u.vs_export_prim_id) {
713 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
714 outputs[i].semantic_index = 0;
715
716 if (ctx->type == PIPE_SHADER_VERTEX) {
717 /* Wait for GS stores to finish. */
718 ac_build_s_barrier(&ctx->ac);
719
720 tmp = ac_build_gep0(&ctx->ac, ctx->esgs_ring,
721 get_thread_id_in_tg(ctx));
722 outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
723 } else {
724 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
725 outputs[i].values[0] = si_get_primitive_id(ctx, 0);
726 }
727
728 outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
729 for (unsigned j = 1; j < 4; j++)
730 outputs[i].values[j] = LLVMGetUndef(ctx->f32);
731
732 memset(outputs[i].vertex_stream, 0,
733 sizeof(outputs[i].vertex_stream));
734 i++;
735 }
736
737 si_llvm_export_vs(ctx, outputs, i);
738 }
739 ac_build_endif(&ctx->ac, 6002);
740 }
741
742 static LLVMValueRef
743 ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
744 {
745 const struct si_shader_selector *sel = ctx->shader->selector;
746 const struct si_shader_info *info = &sel->info;
747
748 LLVMTypeRef elements[2] = {
749 LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
750 LLVMArrayType(ctx->ac.i8, 4),
751 };
752 LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
753 type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
754 return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
755 }
756
757 /**
758 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
759 * is in emit order; that is:
760 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
761 * - during vertex emit, i.e. while the API GS shader invocation is running,
762 * N = threadidx * gs_max_out_vertices + emitidx
763 *
764 * Goals of the LDS memory layout:
765 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
766 * in uniform control flow
767 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
768 * culling
769 * 3. Agnostic to the number of waves (since we don't know it before compiling)
770 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
771 * 5. Avoid wasting memory.
772 *
773 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
774 * layout, elimination of bank conflicts requires that each vertex occupy an
775 * odd number of dwords. We use the additional dword to store the output stream
776 * index as well as a flag to indicate whether this vertex ends a primitive
777 * for rasterization.
778 *
779 * Swizzling is required to satisfy points 1 and 2 simultaneously.
780 *
781 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
782 * Indices are swizzled in groups of 32, which ensures point 1 without
783 * disturbing point 2.
784 *
785 * \return an LDS pointer to type {[N x i32], [4 x i8]}
786 */
787 static LLVMValueRef
788 ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
789 {
790 struct si_shader_selector *sel = ctx->shader->selector;
791 LLVMBuilderRef builder = ctx->ac.builder;
792 LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
793
794 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
795 unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
796 if (write_stride_2exp) {
797 LLVMValueRef row =
798 LLVMBuildLShr(builder, vertexidx,
799 LLVMConstInt(ctx->ac.i32, 5, false), "");
800 LLVMValueRef swizzle =
801 LLVMBuildAnd(builder, row,
802 LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
803 false), "");
804 vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
805 }
806
807 return ac_build_gep0(&ctx->ac, storage, vertexidx);
808 }
809
810 static LLVMValueRef
811 ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
812 LLVMValueRef emitidx)
813 {
814 struct si_shader_selector *sel = ctx->shader->selector;
815 LLVMBuilderRef builder = ctx->ac.builder;
816 LLVMValueRef tmp;
817
818 tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
819 tmp = LLVMBuildMul(builder, tmp, gsthread, "");
820 const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
821 return ngg_gs_vertex_ptr(ctx, vertexidx);
822 }
823
824 static LLVMValueRef
825 ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
826 unsigned out_idx)
827 {
828 LLVMValueRef gep_idx[3] = {
829 ctx->ac.i32_0, /* implied C-style array */
830 ctx->ac.i32_0, /* first struct entry */
831 LLVMConstInt(ctx->ac.i32, out_idx, false),
832 };
833 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
834 }
835
836 static LLVMValueRef
837 ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
838 unsigned stream)
839 {
840 LLVMValueRef gep_idx[3] = {
841 ctx->ac.i32_0, /* implied C-style array */
842 ctx->ac.i32_1, /* second struct entry */
843 LLVMConstInt(ctx->ac.i32, stream, false),
844 };
845 return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
846 }
847
848 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
849 unsigned stream,
850 LLVMValueRef *addrs)
851 {
852 const struct si_shader_selector *sel = ctx->shader->selector;
853 const struct si_shader_info *info = &sel->info;
854 LLVMBuilderRef builder = ctx->ac.builder;
855 LLVMValueRef tmp;
856 const LLVMValueRef vertexidx =
857 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
858
859 /* If this thread has already emitted the declared maximum number of
860 * vertices, skip the write: excessive vertex emissions are not
861 * supposed to have any effect.
862 */
863 const LLVMValueRef can_emit =
864 LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
865 LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
866
867 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
868 tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
869 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
870
871 ac_build_ifcc(&ctx->ac, can_emit, 9001);
872
873 const LLVMValueRef vertexptr =
874 ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
875 unsigned out_idx = 0;
876 for (unsigned i = 0; i < info->num_outputs; i++) {
877 for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
878 if (!(info->output_usagemask[i] & (1 << chan)) ||
879 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
880 continue;
881
882 LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
883 out_val = ac_to_integer(&ctx->ac, out_val);
884 LLVMBuildStore(builder, out_val,
885 ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
886 }
887 }
888 assert(out_idx * 4 == sel->gsvs_vertex_size);
889
890 /* Determine and store whether this vertex completed a primitive. */
891 const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
892
893 tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
894 const LLVMValueRef iscompleteprim =
895 LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
896
897 /* Since the geometry shader emits triangle strips, we need to
898 * track which primitive is odd and swap vertex indices to get
899 * the correct vertex order.
900 */
901 LLVMValueRef is_odd = ctx->i1false;
902 if (stream == 0 && u_vertices_per_prim(sel->gs_output_prim) == 3) {
903 tmp = LLVMBuildAnd(builder, curverts, ctx->i32_1, "");
904 is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->i32_1, "");
905 }
906
907 tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
908 LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
909
910 /* The per-vertex primitive flag encoding:
911 * bit 0: whether this vertex finishes a primitive
912 * bit 1: whether the primitive is odd (if we are emitting triangle strips)
913 */
914 tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
915 tmp = LLVMBuildOr(builder, tmp,
916 LLVMBuildShl(builder,
917 LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""),
918 ctx->ac.i8_1, ""), "");
919 LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
920
921 tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
922 tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
923 LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
924
925 ac_build_endif(&ctx->ac, 9001);
926 }
927
928 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
929 {
930 /* Zero out the part of LDS scratch that is used to accumulate the
931 * per-stream generated primitive count.
932 */
933 LLVMBuilderRef builder = ctx->ac.builder;
934 LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
935 LLVMValueRef tid = get_thread_id_in_tg(ctx);
936 LLVMValueRef tmp;
937
938 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->i32, 4, false), "");
939 ac_build_ifcc(&ctx->ac, tmp, 5090);
940 {
941 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
942 LLVMBuildStore(builder, ctx->i32_0, ptr);
943 }
944 ac_build_endif(&ctx->ac, 5090);
945
946 ac_build_s_barrier(&ctx->ac);
947 }
948
949 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
950 {
951 const struct si_shader_selector *sel = ctx->shader->selector;
952 const struct si_shader_info *info = &sel->info;
953 const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
954 LLVMBuilderRef builder = ctx->ac.builder;
955 LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
956 LLVMValueRef tmp, tmp2;
957
958 /* Zero out remaining (non-emitted) primitive flags.
959 *
960 * Note: Alternatively, we could pass the relevant gs_next_vertex to
961 * the emit threads via LDS. This is likely worse in the expected
962 * typical case where each GS thread emits the full set of
963 * vertices.
964 */
965 for (unsigned stream = 0; stream < 4; ++stream) {
966 if (!info->num_stream_output_components[stream])
967 continue;
968
969 const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
970
971 ac_build_bgnloop(&ctx->ac, 5100);
972
973 const LLVMValueRef vertexidx =
974 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
975 tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
976 LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
977 ac_build_ifcc(&ctx->ac, tmp, 5101);
978 ac_build_break(&ctx->ac);
979 ac_build_endif(&ctx->ac, 5101);
980
981 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
982 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
983
984 tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
985 LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
986
987 ac_build_endloop(&ctx->ac, 5100);
988 }
989
990 /* Accumulate generated primitives counts across the entire threadgroup. */
991 for (unsigned stream = 0; stream < 4; ++stream) {
992 if (!info->num_stream_output_components[stream])
993 continue;
994
995 LLVMValueRef numprims =
996 LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
997 numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
998
999 tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->i32_0, "");
1000 ac_build_ifcc(&ctx->ac, tmp, 5105);
1001 {
1002 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
1003 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
1004 LLVMConstInt(ctx->i32, stream, false)),
1005 numprims, LLVMAtomicOrderingMonotonic, false);
1006 }
1007 ac_build_endif(&ctx->ac, 5105);
1008 }
1009
1010 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
1011
1012 ac_build_s_barrier(&ctx->ac);
1013
1014 const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1015 LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1016
1017 /* Streamout */
1018 if (sel->so.num_outputs) {
1019 struct ngg_streamout nggso = {};
1020
1021 nggso.num_vertices = LLVMConstInt(ctx->i32, verts_per_prim, false);
1022
1023 LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1024 for (unsigned stream = 0; stream < 4; ++stream) {
1025 if (!info->num_stream_output_components[stream])
1026 continue;
1027
1028 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
1029 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1030 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1031 nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1032 }
1033
1034 for (unsigned i = 0; i < verts_per_prim; ++i) {
1035 tmp = LLVMBuildSub(builder, tid,
1036 LLVMConstInt(ctx->i32, verts_per_prim - i - 1, false), "");
1037 tmp = ngg_gs_vertex_ptr(ctx, tmp);
1038 nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
1039 }
1040
1041 build_streamout(ctx, &nggso);
1042 }
1043
1044 /* Write shader query data. */
1045 if (ctx->screen->use_ngg_streamout) {
1046 tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
1047 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1048 ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1049 unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1050 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1051 LLVMConstInt(ctx->i32, num_query_comps, false), "");
1052 ac_build_ifcc(&ctx->ac, tmp, 5110);
1053 {
1054 LLVMValueRef offset;
1055 tmp = tid;
1056 if (sel->so.num_outputs)
1057 tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->i32, 3, false), "");
1058 offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 32, false), "");
1059 if (sel->so.num_outputs) {
1060 tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->i32, 2, false), "");
1061 tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 8, false), "");
1062 offset = LLVMBuildAdd(builder, offset, tmp, "");
1063 }
1064
1065 tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1066 LLVMValueRef args[] = {
1067 tmp,
1068 ngg_get_query_buf(ctx),
1069 offset,
1070 LLVMConstInt(ctx->i32, 16, false), /* soffset */
1071 ctx->i32_0, /* cachepolicy */
1072 };
1073 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1074 ctx->i32, args, 5, 0);
1075 }
1076 ac_build_endif(&ctx->ac, 5110);
1077 ac_build_endif(&ctx->ac, 5109);
1078 }
1079
1080 /* TODO: culling */
1081
1082 /* Determine vertex liveness. */
1083 LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
1084
1085 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1086 ac_build_ifcc(&ctx->ac, tmp, 5120);
1087 {
1088 for (unsigned i = 0; i < verts_per_prim; ++i) {
1089 const LLVMValueRef primidx =
1090 LLVMBuildAdd(builder, tid,
1091 LLVMConstInt(ctx->ac.i32, i, false), "");
1092
1093 if (i > 0) {
1094 tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1095 ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1096 }
1097
1098 /* Load primitive liveness */
1099 tmp = ngg_gs_vertex_ptr(ctx, primidx);
1100 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1101 const LLVMValueRef primlive =
1102 LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1103
1104 tmp = LLVMBuildLoad(builder, vertliveptr, "");
1105 tmp = LLVMBuildOr(builder, tmp, primlive, ""),
1106 LLVMBuildStore(builder, tmp, vertliveptr);
1107
1108 if (i > 0)
1109 ac_build_endif(&ctx->ac, 5121 + i);
1110 }
1111 }
1112 ac_build_endif(&ctx->ac, 5120);
1113
1114 /* Inclusive scan addition across the current wave. */
1115 LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1116 struct ac_wg_scan vertlive_scan = {};
1117 vertlive_scan.op = nir_op_iadd;
1118 vertlive_scan.enable_reduce = true;
1119 vertlive_scan.enable_exclusive = true;
1120 vertlive_scan.src = vertlive;
1121 vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
1122 vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1123 vertlive_scan.numwaves = get_tgsize(ctx);
1124 vertlive_scan.maxwaves = 8;
1125
1126 ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1127
1128 /* Skip all exports (including index exports) when possible. At least on
1129 * early gfx10 revisions this is also to avoid hangs.
1130 */
1131 LLVMValueRef have_exports =
1132 LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1133 num_emit_threads =
1134 LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1135
1136 /* Allocate export space. Send this message as early as possible, to
1137 * hide the latency of the SQ <-> SPI roundtrip.
1138 *
1139 * Note: We could consider compacting primitives for export as well.
1140 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1141 * prim data per clock and skips null primitives at no additional
1142 * cost. So compacting primitives can only be beneficial when
1143 * there are 4 or more contiguous null primitives in the export
1144 * (in the common case of single-dword prim exports).
1145 */
1146 ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
1147 vertlive_scan.result_reduce, num_emit_threads);
1148
1149 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1150 * of the primitive liveness flags, relying on the fact that each
1151 * threadgroup can have at most 256 threads. */
1152 ac_build_ifcc(&ctx->ac, vertlive, 5130);
1153 {
1154 tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1155 tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1156 LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
1157 }
1158 ac_build_endif(&ctx->ac, 5130);
1159
1160 ac_build_s_barrier(&ctx->ac);
1161
1162 /* Export primitive data */
1163 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1164 ac_build_ifcc(&ctx->ac, tmp, 5140);
1165 {
1166 LLVMValueRef flags;
1167 struct ac_ngg_prim prim = {};
1168 prim.num_vertices = verts_per_prim;
1169
1170 tmp = ngg_gs_vertex_ptr(ctx, tid);
1171 flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
1172 prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->i1, ""), "");
1173
1174 for (unsigned i = 0; i < verts_per_prim; ++i) {
1175 prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1176 LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1177 prim.edgeflag[i] = ctx->ac.i1false;
1178 }
1179
1180 /* Geometry shaders output triangle strips, but NGG expects triangles.
1181 * We need to change the vertex order for odd triangles to get correct
1182 * front/back facing by swapping 2 vertex indices, but we also have to
1183 * keep the provoking vertex in the same place.
1184 *
1185 * If the first vertex is provoking, swap index 1 and 2.
1186 * If the last vertex is provoking, swap index 0 and 1.
1187 */
1188 if (verts_per_prim == 3) {
1189 LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
1190 is_odd = LLVMBuildTrunc(builder, is_odd, ctx->i1, "");
1191 LLVMValueRef flatshade_first =
1192 LLVMBuildICmp(builder, LLVMIntEQ,
1193 si_unpack_param(ctx, ctx->vs_state_bits, 4, 2),
1194 ctx->i32_0, "");
1195
1196 struct ac_ngg_prim in = prim;
1197 prim.index[0] = LLVMBuildSelect(builder, flatshade_first,
1198 in.index[0],
1199 LLVMBuildSelect(builder, is_odd,
1200 in.index[1], in.index[0], ""), "");
1201 prim.index[1] = LLVMBuildSelect(builder, flatshade_first,
1202 LLVMBuildSelect(builder, is_odd,
1203 in.index[2], in.index[1], ""),
1204 LLVMBuildSelect(builder, is_odd,
1205 in.index[0], in.index[1], ""), "");
1206 prim.index[2] = LLVMBuildSelect(builder, flatshade_first,
1207 LLVMBuildSelect(builder, is_odd,
1208 in.index[1], in.index[2], ""),
1209 in.index[2], "");
1210 }
1211
1212 ac_build_export_prim(&ctx->ac, &prim);
1213 }
1214 ac_build_endif(&ctx->ac, 5140);
1215
1216 /* Export position and parameter data */
1217 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1218 ac_build_ifcc(&ctx->ac, tmp, 5145);
1219 {
1220 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1221
1222 tmp = ngg_gs_vertex_ptr(ctx, tid);
1223 tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
1224 tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1225 const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1226
1227 unsigned out_idx = 0;
1228 for (unsigned i = 0; i < info->num_outputs; i++) {
1229 outputs[i].semantic_name = info->output_semantic_name[i];
1230 outputs[i].semantic_index = info->output_semantic_index[i];
1231
1232 for (unsigned j = 0; j < 4; j++, out_idx++) {
1233 tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
1234 tmp = LLVMBuildLoad(builder, tmp, "");
1235 outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1236 outputs[i].vertex_stream[j] =
1237 (info->output_streams[i] >> (2 * j)) & 3;
1238 }
1239 }
1240
1241 si_llvm_export_vs(ctx, outputs, info->num_outputs);
1242 }
1243 ac_build_endif(&ctx->ac, 5145);
1244 }
1245
1246 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1247 unsigned min_verts_per_prim, bool use_adjacency)
1248 {
1249 unsigned max_reuse = max_esverts - min_verts_per_prim;
1250 if (use_adjacency)
1251 max_reuse /= 2;
1252 *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1253 }
1254
1255 /**
1256 * Determine subgroup information like maximum number of vertices and prims.
1257 *
1258 * This happens before the shader is uploaded, since LDS relocations during
1259 * upload depend on the subgroup size.
1260 */
1261 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1262 {
1263 const struct si_shader_selector *gs_sel = shader->selector;
1264 const struct si_shader_selector *es_sel =
1265 shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1266 const enum pipe_shader_type gs_type = gs_sel->type;
1267 const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1268 const unsigned input_prim = si_get_input_prim(gs_sel);
1269 const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
1270 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1271 const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1272 const unsigned min_verts_per_prim =
1273 gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1274
1275 /* All these are in dwords: */
1276 /* We can't allow using the whole LDS, because GS waves compete with
1277 * other shader stages for LDS space.
1278 *
1279 * TODO: We should really take the shader's internal LDS use into
1280 * account. The linker will fail if the size is greater than
1281 * 8K dwords.
1282 */
1283 const unsigned max_lds_size = 8 * 1024 - 768;
1284 const unsigned target_lds_size = max_lds_size;
1285 unsigned esvert_lds_size = 0;
1286 unsigned gsprim_lds_size = 0;
1287
1288 /* All these are per subgroup: */
1289 bool max_vert_out_per_gs_instance = false;
1290 unsigned max_esverts_base = 128;
1291 unsigned max_gsprims_base = 128; /* default prim group size clamp */
1292
1293 /* Hardware has the following non-natural restrictions on the value
1294 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1295 * the draw:
1296 * - at most 252 for any line input primitive type
1297 * - at most 251 for any quad input primitive type
1298 * - at most 251 for triangle strips with adjacency (this happens to
1299 * be the natural limit for triangle *lists* with adjacency)
1300 */
1301 max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1302
1303 if (gs_type == PIPE_SHADER_GEOMETRY) {
1304 unsigned max_out_verts_per_gsprim =
1305 gs_sel->gs_max_out_vertices * gs_num_invocations;
1306
1307 if (max_out_verts_per_gsprim <= 256) {
1308 if (max_out_verts_per_gsprim) {
1309 max_gsprims_base = MIN2(max_gsprims_base,
1310 256 / max_out_verts_per_gsprim);
1311 }
1312 } else {
1313 /* Use special multi-cycling mode in which each GS
1314 * instance gets its own subgroup. Does not work with
1315 * tessellation. */
1316 max_vert_out_per_gs_instance = true;
1317 max_gsprims_base = 1;
1318 max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
1319 }
1320
1321 esvert_lds_size = es_sel->esgs_itemsize / 4;
1322 gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1323 } else {
1324 /* VS and TES. */
1325 /* LDS size for passing data from ES to GS. */
1326 esvert_lds_size = ngg_nogs_vertex_size(shader);
1327
1328 /* LDS size for passing data from GS to ES.
1329 * GS stores Primitive IDs into LDS at the address corresponding
1330 * to the ES thread of the provoking vertex. All ES threads
1331 * load and export PrimitiveID for their thread.
1332 */
1333 if (gs_sel->type == PIPE_SHADER_VERTEX &&
1334 shader->key.mono.u.vs_export_prim_id)
1335 esvert_lds_size = MAX2(esvert_lds_size, 1);
1336 }
1337
1338 unsigned max_gsprims = max_gsprims_base;
1339 unsigned max_esverts = max_esverts_base;
1340
1341 if (esvert_lds_size)
1342 max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1343 if (gsprim_lds_size)
1344 max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1345
1346 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1347 clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
1348 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1349
1350 if (esvert_lds_size || gsprim_lds_size) {
1351 /* Now that we have a rough proportionality between esverts
1352 * and gsprims based on the primitive type, scale both of them
1353 * down simultaneously based on required LDS space.
1354 *
1355 * We could be smarter about this if we knew how much vertex
1356 * reuse to expect.
1357 */
1358 unsigned lds_total = max_esverts * esvert_lds_size +
1359 max_gsprims * gsprim_lds_size;
1360 if (lds_total > target_lds_size) {
1361 max_esverts = max_esverts * target_lds_size / lds_total;
1362 max_gsprims = max_gsprims * target_lds_size / lds_total;
1363
1364 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1365 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1366 min_verts_per_prim, use_adjacency);
1367 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1368 }
1369 }
1370
1371 /* Round up towards full wave sizes for better ALU utilization. */
1372 if (!max_vert_out_per_gs_instance) {
1373 const unsigned wavesize = gs_sel->screen->ge_wave_size;
1374 unsigned orig_max_esverts;
1375 unsigned orig_max_gsprims;
1376 do {
1377 orig_max_esverts = max_esverts;
1378 orig_max_gsprims = max_gsprims;
1379
1380 max_esverts = align(max_esverts, wavesize);
1381 max_esverts = MIN2(max_esverts, max_esverts_base);
1382 if (esvert_lds_size)
1383 max_esverts = MIN2(max_esverts,
1384 (max_lds_size - max_gsprims * gsprim_lds_size) /
1385 esvert_lds_size);
1386 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1387
1388 max_gsprims = align(max_gsprims, wavesize);
1389 max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1390 if (gsprim_lds_size)
1391 max_gsprims = MIN2(max_gsprims,
1392 (max_lds_size - max_esverts * esvert_lds_size) /
1393 gsprim_lds_size);
1394 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1395 min_verts_per_prim, use_adjacency);
1396 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1397 } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1398 }
1399
1400 /* Hardware restriction: minimum value of max_esverts */
1401 max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
1402
1403 unsigned max_out_vertices =
1404 max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
1405 gs_type == PIPE_SHADER_GEOMETRY ?
1406 max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
1407 max_esverts;
1408 assert(max_out_vertices <= 256);
1409
1410 unsigned prim_amp_factor = 1;
1411 if (gs_type == PIPE_SHADER_GEOMETRY) {
1412 /* Number of output primitives per GS input primitive after
1413 * GS instancing. */
1414 prim_amp_factor = gs_sel->gs_max_out_vertices;
1415 }
1416
1417 /* The GE only checks against the maximum number of ES verts after
1418 * allocating a full GS primitive. So we need to ensure that whenever
1419 * this check passes, there is enough space for a full primitive without
1420 * vertex reuse.
1421 */
1422 shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1423 shader->ngg.max_gsprims = max_gsprims;
1424 shader->ngg.max_out_verts = max_out_vertices;
1425 shader->ngg.prim_amp_factor = prim_amp_factor;
1426 shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1427
1428 shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
1429 shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
1430
1431 assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
1432 }