radeonsi/gfx10: implement Wave32
[mesa.git] / src / gallium / drivers / radeonsi / gfx10_shader_ngg.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_shader_internal.h"
26
27 #include "sid.h"
28
29 #include "util/u_memory.h"
30 #include "util/u_prim.h"
31
32 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
33 {
34 return si_unpack_param(ctx, ctx->param_merged_wave_info, 24, 4);
35 }
36
37 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
38 {
39 return si_unpack_param(ctx, ctx->param_merged_wave_info, 28, 4);
40 }
41
42 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
43 {
44 LLVMBuilderRef builder = ctx->ac.builder;
45 LLVMValueRef tmp;
46 tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
47 LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
48 return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
49 }
50
51 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
52 {
53 return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
54 LLVMConstInt(ctx->ac.i32, 12, false),
55 LLVMConstInt(ctx->ac.i32, 9, false),
56 false);
57 }
58
59 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
60 {
61 return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
62 LLVMConstInt(ctx->ac.i32, 22, false),
63 LLVMConstInt(ctx->ac.i32, 9, false),
64 false);
65 }
66
67 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
68 {
69 return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
70 ctx->i32_0,
71 LLVMConstInt(ctx->ac.i32, 11, false),
72 false);
73 }
74
75 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
76 {
77 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn,
78 ctx->param_rw_buffers);
79
80 return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
81 LLVMConstInt(ctx->i32, GFX10_GS_QUERY_BUF, false));
82 }
83
84 /* Send GS Alloc Req message from the first wave of the group to SPI.
85 * Message payload is:
86 * - bits 0..10: vertices in group
87 * - bits 12..22: primitives in group
88 */
89 static void build_sendmsg_gs_alloc_req(struct si_shader_context *ctx,
90 LLVMValueRef vtx_cnt,
91 LLVMValueRef prim_cnt)
92 {
93 LLVMBuilderRef builder = ctx->ac.builder;
94 LLVMValueRef tmp;
95
96 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
97 ac_build_ifcc(&ctx->ac, tmp, 5020);
98
99 tmp = LLVMBuildShl(builder, prim_cnt, LLVMConstInt(ctx->ac.i32, 12, false),"");
100 tmp = LLVMBuildOr(builder, tmp, vtx_cnt, "");
101 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_ALLOC_REQ, tmp);
102
103 ac_build_endif(&ctx->ac, 5020);
104 }
105
106 struct ngg_prim {
107 unsigned num_vertices;
108 LLVMValueRef isnull;
109 LLVMValueRef index[3];
110 LLVMValueRef edgeflag[3];
111 };
112
113 static void build_export_prim(struct si_shader_context *ctx,
114 const struct ngg_prim *prim)
115 {
116 LLVMBuilderRef builder = ctx->ac.builder;
117 struct ac_export_args args;
118 LLVMValueRef tmp;
119
120 tmp = LLVMBuildZExt(builder, prim->isnull, ctx->ac.i32, "");
121 args.out[0] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 31, false), "");
122
123 for (unsigned i = 0; i < prim->num_vertices; ++i) {
124 tmp = LLVMBuildShl(builder, prim->index[i],
125 LLVMConstInt(ctx->ac.i32, 10 * i, false), "");
126 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
127 tmp = LLVMBuildZExt(builder, prim->edgeflag[i], ctx->ac.i32, "");
128 tmp = LLVMBuildShl(builder, tmp,
129 LLVMConstInt(ctx->ac.i32, 10 * i + 9, false), "");
130 args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
131 }
132
133 args.out[0] = LLVMBuildBitCast(builder, args.out[0], ctx->ac.f32, "");
134 args.out[1] = LLVMGetUndef(ctx->ac.f32);
135 args.out[2] = LLVMGetUndef(ctx->ac.f32);
136 args.out[3] = LLVMGetUndef(ctx->ac.f32);
137
138 args.target = V_008DFC_SQ_EXP_PRIM;
139 args.enabled_channels = 1;
140 args.done = true;
141 args.valid_mask = false;
142 args.compr = false;
143
144 ac_build_export(&ctx->ac, &args);
145 }
146
147 static void build_streamout_vertex(struct si_shader_context *ctx,
148 LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
149 unsigned stream, LLVMValueRef offset_vtx,
150 LLVMValueRef vertexptr)
151 {
152 struct tgsi_shader_info *info = &ctx->shader->selector->info;
153 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
154 LLVMBuilderRef builder = ctx->ac.builder;
155 LLVMValueRef offset[4] = {};
156 LLVMValueRef tmp;
157
158 for (unsigned buffer = 0; buffer < 4; ++buffer) {
159 if (!wg_offset_dw[buffer])
160 continue;
161
162 tmp = LLVMBuildMul(builder, offset_vtx,
163 LLVMConstInt(ctx->i32, so->stride[buffer], false), "");
164 tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
165 offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 2, false), "");
166 }
167
168 for (unsigned i = 0; i < so->num_outputs; ++i) {
169 if (so->output[i].stream != stream)
170 continue;
171
172 unsigned reg = so->output[i].register_index;
173 struct si_shader_output_values out;
174 out.semantic_name = info->output_semantic_name[reg];
175 out.semantic_index = info->output_semantic_index[reg];
176
177 for (unsigned comp = 0; comp < 4; comp++) {
178 tmp = ac_build_gep0(&ctx->ac, vertexptr,
179 LLVMConstInt(ctx->i32, 4 * reg + comp, false));
180 out.values[comp] = LLVMBuildLoad(builder, tmp, "");
181 out.vertex_stream[comp] =
182 (info->output_streams[reg] >> (2 * comp)) & 3;
183 }
184
185 si_emit_streamout_output(ctx, so_buffer, offset, &so->output[i], &out);
186 }
187 }
188
189 struct ngg_streamout {
190 LLVMValueRef num_vertices;
191
192 /* per-thread data */
193 LLVMValueRef prim_enable[4]; /* i1 per stream */
194 LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
195
196 /* Output */
197 LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
198 };
199
200 /**
201 * Build streamout logic.
202 *
203 * Implies a barrier.
204 *
205 * Writes number of emitted primitives to gs_ngg_scratch[4:8].
206 *
207 * Clobbers gs_ngg_scratch[8:].
208 */
209 static void build_streamout(struct si_shader_context *ctx,
210 struct ngg_streamout *nggso)
211 {
212 struct tgsi_shader_info *info = &ctx->shader->selector->info;
213 struct pipe_stream_output_info *so = &ctx->shader->selector->so;
214 LLVMBuilderRef builder = ctx->ac.builder;
215 LLVMValueRef buf_ptr = LLVMGetParam(ctx->main_fn, ctx->param_rw_buffers);
216 LLVMValueRef tid = get_thread_id_in_tg(ctx);
217 LLVMValueRef tmp, tmp2;
218 LLVMValueRef i32_2 = LLVMConstInt(ctx->i32, 2, false);
219 LLVMValueRef i32_4 = LLVMConstInt(ctx->i32, 4, false);
220 LLVMValueRef i32_8 = LLVMConstInt(ctx->i32, 8, false);
221 LLVMValueRef so_buffer[4] = {};
222 unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
223 (nggso->vertices[2] ? 1 : 0);
224 LLVMValueRef prim_stride_dw[4] = {};
225 LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->i32);
226 int stream_for_buffer[4] = { -1, -1, -1, -1 };
227 unsigned bufmask_for_stream[4] = {};
228 bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
229 unsigned scratch_emit_base = isgs ? 4 : 0;
230 LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->i32_0;
231 unsigned scratch_offset_base = isgs ? 8 : 4;
232 LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
233
234 ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
235
236 /* Determine the mapping of streamout buffers to vertex streams. */
237 for (unsigned i = 0; i < so->num_outputs; ++i) {
238 unsigned buf = so->output[i].output_buffer;
239 unsigned stream = so->output[i].stream;
240 assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
241 stream_for_buffer[buf] = stream;
242 bufmask_for_stream[stream] |= 1 << buf;
243 }
244
245 for (unsigned buffer = 0; buffer < 4; ++buffer) {
246 if (stream_for_buffer[buffer] == -1)
247 continue;
248
249 assert(so->stride[buffer]);
250
251 tmp = LLVMConstInt(ctx->i32, so->stride[buffer], false);
252 prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
253 prim_stride_dw_vgpr = ac_build_writelane(
254 &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
255 LLVMConstInt(ctx->i32, buffer, false));
256
257 so_buffer[buffer] = ac_build_load_to_sgpr(
258 &ctx->ac, buf_ptr,
259 LLVMConstInt(ctx->i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
260 }
261
262 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->i32_0, "");
263 ac_build_ifcc(&ctx->ac, tmp, 5200);
264 {
265 LLVMTypeRef gdsptr = LLVMPointerType(ctx->i32, AC_ADDR_SPACE_GDS);
266 LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->i32_0, gdsptr, "");
267
268 /* Advance the streamout offsets in GDS. */
269 LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
270 LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
271
272 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
273 ac_build_ifcc(&ctx->ac, tmp, 5210);
274 {
275 if (isgs) {
276 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
277 tmp = LLVMBuildLoad(builder, tmp, "");
278 } else {
279 tmp = ac_build_writelane(&ctx->ac, ctx->i32_0,
280 ngg_get_prim_cnt(ctx), ctx->i32_0);
281 }
282 LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
283
284 unsigned swizzle[4];
285 int unused_stream = -1;
286 for (unsigned stream = 0; stream < 4; ++stream) {
287 if (!info->num_stream_output_components[stream]) {
288 unused_stream = stream;
289 break;
290 }
291 }
292 for (unsigned buffer = 0; buffer < 4; ++buffer) {
293 if (stream_for_buffer[buffer] >= 0) {
294 swizzle[buffer] = stream_for_buffer[buffer];
295 } else {
296 assert(unused_stream >= 0);
297 swizzle[buffer] = unused_stream;
298 }
299 }
300
301 tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
302 swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
303 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
304
305 LLVMValueRef args[] = {
306 LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
307 tmp,
308 ctx->i32_0, // ordering
309 ctx->i32_0, // scope
310 ctx->ac.i1false, // isVolatile
311 LLVMConstInt(ctx->i32, 4 << 24, false), // OA index
312 ctx->ac.i1true, // wave release
313 ctx->ac.i1true, // wave done
314 };
315 tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
316 ctx->i32, args, ARRAY_SIZE(args), 0);
317
318 /* Keep offsets in a VGPR for quick retrieval via readlane by
319 * the first wave for bounds checking, and also store in LDS
320 * for retrieval by all waves later. */
321 LLVMBuildStore(builder, tmp, offsets_vgpr);
322
323 tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
324 scratch_offset_basev, "");
325 tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
326 LLVMBuildStore(builder, tmp, tmp2);
327 }
328 ac_build_endif(&ctx->ac, 5210);
329
330 /* Determine the max emit per buffer. This is done via the SALU, in part
331 * because LLVM can't generate divide-by-multiply if we try to do this
332 * via VALU with one lane per buffer.
333 */
334 LLVMValueRef max_emit[4] = {};
335 for (unsigned buffer = 0; buffer < 4; ++buffer) {
336 if (stream_for_buffer[buffer] == -1)
337 continue;
338
339 LLVMValueRef bufsize_dw =
340 LLVMBuildLShr(builder,
341 LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
342 i32_2, "");
343
344 tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
345 LLVMValueRef offset_dw =
346 ac_build_readlane(&ctx->ac, tmp,
347 LLVMConstInt(ctx->i32, buffer, false));
348
349 tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
350 tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
351
352 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
353 max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->i32_0, tmp, "");
354 }
355
356 /* Determine the number of emitted primitives per stream and fixup the
357 * GDS counter if necessary.
358 *
359 * This is complicated by the fact that a single stream can emit to
360 * multiple buffers (but luckily not vice versa).
361 */
362 LLVMValueRef emit_vgpr = ctx->i32_0;
363
364 for (unsigned stream = 0; stream < 4; ++stream) {
365 if (!info->num_stream_output_components[stream])
366 continue;
367
368 tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
369 LLVMValueRef generated =
370 ac_build_readlane(&ctx->ac, tmp,
371 LLVMConstInt(ctx->i32, stream, false));
372
373 LLVMValueRef emit = generated;
374 for (unsigned buffer = 0; buffer < 4; ++buffer) {
375 if (stream_for_buffer[buffer] == stream)
376 emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
377 }
378
379 emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
380 LLVMConstInt(ctx->i32, stream, false));
381
382 /* Fixup the offset using a plain GDS atomic if we overflowed. */
383 tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
384 ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
385 tmp = LLVMBuildLShr(builder,
386 LLVMConstInt(ctx->i32, bufmask_for_stream[stream], false),
387 ac_get_thread_id(&ctx->ac), "");
388 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
389 ac_build_ifcc(&ctx->ac, tmp, 5222);
390 {
391 tmp = LLVMBuildSub(builder, generated, emit, "");
392 tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
393 tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
394 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
395 LLVMAtomicOrderingMonotonic, false);
396 }
397 ac_build_endif(&ctx->ac, 5222);
398 ac_build_endif(&ctx->ac, 5221);
399 }
400
401 tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
402 ac_build_ifcc(&ctx->ac, tmp, 5225);
403 {
404 tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
405 scratch_emit_basev, "");
406 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
407 LLVMBuildStore(builder, emit_vgpr, tmp);
408 }
409 ac_build_endif(&ctx->ac, 5225);
410 }
411 ac_build_endif(&ctx->ac, 5200);
412
413 /* Determine the workgroup-relative per-thread / primitive offset into
414 * the streamout buffers */
415 struct ac_wg_scan primemit_scan[4] = {};
416
417 if (isgs) {
418 for (unsigned stream = 0; stream < 4; ++stream) {
419 if (!info->num_stream_output_components[stream])
420 continue;
421
422 primemit_scan[stream].enable_exclusive = true;
423 primemit_scan[stream].op = nir_op_iadd;
424 primemit_scan[stream].src = nggso->prim_enable[stream];
425 primemit_scan[stream].scratch =
426 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
427 LLVMConstInt(ctx->i32, 12 + 8 * stream, false));
428 primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
429 primemit_scan[stream].numwaves = get_tgsize(ctx);
430 primemit_scan[stream].maxwaves = 8;
431 ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
432 }
433 }
434
435 ac_build_s_barrier(&ctx->ac);
436
437 /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
438 LLVMValueRef wgoffset_dw[4] = {};
439
440 {
441 LLVMValueRef scratch_vgpr;
442
443 tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
444 scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
445
446 for (unsigned buffer = 0; buffer < 4; ++buffer) {
447 if (stream_for_buffer[buffer] >= 0) {
448 wgoffset_dw[buffer] = ac_build_readlane(
449 &ctx->ac, scratch_vgpr,
450 LLVMConstInt(ctx->i32, scratch_offset_base + buffer, false));
451 }
452 }
453
454 for (unsigned stream = 0; stream < 4; ++stream) {
455 if (info->num_stream_output_components[stream]) {
456 nggso->emit[stream] = ac_build_readlane(
457 &ctx->ac, scratch_vgpr,
458 LLVMConstInt(ctx->i32, scratch_emit_base + stream, false));
459 }
460 }
461 }
462
463 /* Write out primitive data */
464 for (unsigned stream = 0; stream < 4; ++stream) {
465 if (!info->num_stream_output_components[stream])
466 continue;
467
468 if (isgs) {
469 ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
470 } else {
471 primemit_scan[stream].result_exclusive = tid;
472 }
473
474 tmp = LLVMBuildICmp(builder, LLVMIntULT,
475 primemit_scan[stream].result_exclusive,
476 nggso->emit[stream], "");
477 tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
478 ac_build_ifcc(&ctx->ac, tmp, 5240);
479 {
480 LLVMValueRef offset_vtx =
481 LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
482 nggso->num_vertices, "");
483
484 for (unsigned i = 0; i < max_num_vertices; ++i) {
485 tmp = LLVMBuildICmp(builder, LLVMIntULT,
486 LLVMConstInt(ctx->i32, i, false),
487 nggso->num_vertices, "");
488 ac_build_ifcc(&ctx->ac, tmp, 5241);
489 build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
490 stream, offset_vtx, nggso->vertices[i]);
491 ac_build_endif(&ctx->ac, 5241);
492 offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->i32_1, "");
493 }
494 }
495 ac_build_endif(&ctx->ac, 5240);
496 }
497 }
498
499 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
500 {
501 unsigned lds_vertex_size = 0;
502
503 /* The edgeflag is always stored in the last element that's also
504 * used for padding to reduce LDS bank conflicts. */
505 if (shader->selector->so.num_outputs)
506 lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
507 if (shader->selector->ngg_writes_edgeflag)
508 lds_vertex_size = MAX2(lds_vertex_size, 1);
509
510 return lds_vertex_size;
511 }
512
513 /**
514 * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
515 * for the vertex outputs.
516 */
517 static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
518 LLVMValueRef vtxid)
519 {
520 /* The extra dword is used to avoid LDS bank conflicts. */
521 unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
522 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, vertex_size);
523 LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
524 LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
525 return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
526 }
527
528 /**
529 * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
530 */
531 void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
532 unsigned max_outputs,
533 LLVMValueRef *addrs)
534 {
535 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
536 struct si_shader_selector *sel = ctx->shader->selector;
537 struct tgsi_shader_info *info = &sel->info;
538 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
539 LLVMBuilderRef builder = ctx->ac.builder;
540 struct lp_build_if_state if_state;
541 LLVMValueRef tmp, tmp2;
542
543 assert(!ctx->shader->is_gs_copy_shader);
544 assert(info->num_outputs <= max_outputs);
545
546 LLVMValueRef vertex_ptr = NULL;
547
548 if (sel->so.num_outputs || sel->ngg_writes_edgeflag)
549 vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
550
551 for (unsigned i = 0; i < info->num_outputs; i++) {
552 outputs[i].semantic_name = info->output_semantic_name[i];
553 outputs[i].semantic_index = info->output_semantic_index[i];
554
555 for (unsigned j = 0; j < 4; j++) {
556 outputs[i].vertex_stream[j] =
557 (info->output_streams[i] >> (2 * j)) & 3;
558
559 /* TODO: we may store more outputs than streamout needs,
560 * but streamout performance isn't that important.
561 */
562 if (sel->so.num_outputs) {
563 tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
564 LLVMConstInt(ctx->i32, 4 * i + j, false));
565 tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
566 tmp2 = ac_to_integer(&ctx->ac, tmp2);
567 LLVMBuildStore(builder, tmp2, tmp);
568 }
569 }
570
571 /* Store the edgeflag at the end (if streamout is enabled) */
572 if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG &&
573 sel->ngg_writes_edgeflag) {
574 LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
575 /* The output is a float, but the hw expects a 1-bit integer. */
576 edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->i32, "");
577 edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->i32_1);
578
579 tmp = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
580 tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
581 LLVMBuildStore(builder, edgeflag, tmp);
582 }
583 }
584
585 lp_build_endif(&ctx->merged_wrap_if_state);
586
587 LLVMValueRef prims_in_wave = si_unpack_param(ctx, ctx->param_merged_wave_info, 8, 8);
588 LLVMValueRef vtx_in_wave = si_unpack_param(ctx, ctx->param_merged_wave_info, 0, 8);
589 LLVMValueRef is_gs_thread = LLVMBuildICmp(builder, LLVMIntULT,
590 ac_get_thread_id(&ctx->ac), prims_in_wave, "");
591 LLVMValueRef is_es_thread = LLVMBuildICmp(builder, LLVMIntULT,
592 ac_get_thread_id(&ctx->ac), vtx_in_wave, "");
593 LLVMValueRef vtxindex[] = {
594 si_unpack_param(ctx, ctx->param_gs_vtx01_offset, 0, 16),
595 si_unpack_param(ctx, ctx->param_gs_vtx01_offset, 16, 16),
596 si_unpack_param(ctx, ctx->param_gs_vtx23_offset, 0, 16),
597 };
598
599 /* Determine the number of vertices per primitive. */
600 unsigned num_vertices;
601 LLVMValueRef num_vertices_val;
602
603 if (ctx->type == PIPE_SHADER_VERTEX) {
604 if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) {
605 /* Blits always use axis-aligned rectangles with 3 vertices. */
606 num_vertices = 3;
607 num_vertices_val = LLVMConstInt(ctx->i32, 3, 0);
608 } else {
609 /* Extract OUTPRIM field. */
610 tmp = si_unpack_param(ctx, ctx->param_vs_state_bits, 2, 2);
611 num_vertices_val = LLVMBuildAdd(builder, tmp, ctx->i32_1, "");
612 num_vertices = 3; /* TODO: optimize for points & lines */
613 }
614 } else {
615 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
616
617 if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
618 num_vertices = 1;
619 else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
620 num_vertices = 2;
621 else
622 num_vertices = 3;
623
624 num_vertices_val = LLVMConstInt(ctx->i32, num_vertices, false);
625 }
626
627 /* Streamout */
628 LLVMValueRef emitted_prims = NULL;
629
630 if (sel->so.num_outputs) {
631 struct ngg_streamout nggso = {};
632
633 nggso.num_vertices = num_vertices_val;
634 nggso.prim_enable[0] = is_gs_thread;
635
636 for (unsigned i = 0; i < num_vertices; ++i)
637 nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
638
639 build_streamout(ctx, &nggso);
640 emitted_prims = nggso.emit[0];
641 }
642
643 LLVMValueRef user_edgeflags[3] = {};
644
645 if (sel->ngg_writes_edgeflag) {
646 /* Streamout already inserted the barrier, so don't insert it again. */
647 if (!sel->so.num_outputs)
648 ac_build_s_barrier(&ctx->ac);
649
650 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
651 /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
652 for (unsigned i = 0; i < num_vertices; i++) {
653 tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
654 tmp2 = LLVMConstInt(ctx->i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
655 tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
656 tmp = LLVMBuildLoad(builder, tmp, "");
657 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
658
659 user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
660 LLVMBuildStore(builder, tmp, user_edgeflags[i]);
661 }
662 ac_build_endif(&ctx->ac, 5400);
663 }
664
665 /* Copy Primitive IDs from GS threads to the LDS address corresponding
666 * to the ES thread of the provoking vertex.
667 */
668 if (ctx->type == PIPE_SHADER_VERTEX &&
669 ctx->shader->key.mono.u.vs_export_prim_id) {
670 /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
671 if (sel->so.num_outputs || sel->ngg_writes_edgeflag)
672 ac_build_s_barrier(&ctx->ac);
673
674 ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
675 /* Extract the PROVOKING_VTX_INDEX field. */
676 LLVMValueRef provoking_vtx_in_prim =
677 si_unpack_param(ctx, ctx->param_vs_state_bits, 4, 2);
678
679 /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
680 LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
681 LLVMValueRef provoking_vtx_index =
682 LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
683
684 LLVMBuildStore(builder, ctx->abi.gs_prim_id,
685 ac_build_gep0(&ctx->ac, ctx->esgs_ring, provoking_vtx_index));
686 ac_build_endif(&ctx->ac, 5400);
687 }
688
689 build_sendmsg_gs_alloc_req(ctx, ngg_get_vtx_cnt(ctx), ngg_get_prim_cnt(ctx));
690
691 /* Update query buffer */
692 /* TODO: this won't catch 96-bit clear_buffer via transform feedback. */
693 if (!info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) {
694 tmp = si_unpack_param(ctx, ctx->param_vs_state_bits, 6, 1);
695 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
696 ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
697 tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
698 ac_build_ifcc(&ctx->ac, tmp, 5030);
699 tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
700 sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
701 ac_build_ifcc(&ctx->ac, tmp, 5031);
702 {
703 LLVMValueRef args[] = {
704 ngg_get_prim_cnt(ctx),
705 ngg_get_query_buf(ctx),
706 LLVMConstInt(ctx->i32, 16, false), /* offset of stream[0].generated_primitives */
707 ctx->i32_0, /* soffset */
708 ctx->i32_0, /* cachepolicy */
709 };
710
711 if (sel->so.num_outputs) {
712 args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->i32_1);
713 args[2] = ac_build_writelane(&ctx->ac, args[2],
714 LLVMConstInt(ctx->i32, 24, false), ctx->i32_1);
715 }
716
717 /* TODO: should this be 64-bit atomics? */
718 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
719 ctx->i32, args, 5, 0);
720 }
721 ac_build_endif(&ctx->ac, 5031);
722 ac_build_endif(&ctx->ac, 5030);
723 ac_build_endif(&ctx->ac, 5029);
724 }
725
726 /* Export primitive data to the index buffer. Format is:
727 * - bits 0..8: index 0
728 * - bit 9: edge flag 0
729 * - bits 10..18: index 1
730 * - bit 19: edge flag 1
731 * - bits 20..28: index 2
732 * - bit 29: edge flag 2
733 * - bit 31: null primitive (skip)
734 *
735 * For the first version, we will always build up all three indices
736 * independent of the primitive type. The additional garbage data
737 * shouldn't hurt.
738 *
739 * TODO: culling depends on the primitive type, so can have some
740 * interaction here.
741 */
742 lp_build_if(&if_state, &ctx->gallivm, is_gs_thread);
743 {
744 struct ngg_prim prim = {};
745
746 prim.num_vertices = num_vertices;
747 prim.isnull = ctx->ac.i1false;
748 memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
749
750 for (unsigned i = 0; i < num_vertices; ++i) {
751 if (ctx->type != PIPE_SHADER_VERTEX) {
752 prim.edgeflag[i] = ctx->i1false;
753 continue;
754 }
755
756 tmp = LLVMBuildLShr(builder, ctx->abi.gs_invocation_id,
757 LLVMConstInt(ctx->ac.i32, 8 + i, false), "");
758 prim.edgeflag[i] = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
759
760 if (sel->ngg_writes_edgeflag) {
761 tmp2 = LLVMBuildLoad(builder, user_edgeflags[i], "");
762 prim.edgeflag[i] = LLVMBuildAnd(builder, prim.edgeflag[i],
763 tmp2, "");
764 }
765 }
766
767 build_export_prim(ctx, &prim);
768 }
769 lp_build_endif(&if_state);
770
771 /* Export per-vertex data (positions and parameters). */
772 lp_build_if(&if_state, &ctx->gallivm, is_es_thread);
773 {
774 unsigned i;
775
776 /* Unconditionally (re-)load the values for proper SSA form. */
777 for (i = 0; i < info->num_outputs; i++) {
778 for (unsigned j = 0; j < 4; j++) {
779 outputs[i].values[j] =
780 LLVMBuildLoad(builder,
781 addrs[4 * i + j],
782 "");
783 }
784 }
785
786 if (ctx->shader->key.mono.u.vs_export_prim_id) {
787 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
788 outputs[i].semantic_index = 0;
789
790 if (ctx->type == PIPE_SHADER_VERTEX) {
791 /* Wait for GS stores to finish. */
792 ac_build_s_barrier(&ctx->ac);
793
794 tmp = ac_build_gep0(&ctx->ac, ctx->esgs_ring,
795 get_thread_id_in_tg(ctx));
796 outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
797 } else {
798 assert(ctx->type == PIPE_SHADER_TESS_EVAL);
799 outputs[i].values[0] = si_get_primitive_id(ctx, 0);
800 }
801
802 outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
803 for (unsigned j = 1; j < 4; j++)
804 outputs[i].values[j] = LLVMGetUndef(ctx->f32);
805
806 memset(outputs[i].vertex_stream, 0,
807 sizeof(outputs[i].vertex_stream));
808 i++;
809 }
810
811 si_llvm_export_vs(ctx, outputs, i);
812 }
813 lp_build_endif(&if_state);
814 }
815
816 static LLVMValueRef
817 ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
818 {
819 const struct si_shader_selector *sel = ctx->shader->selector;
820 const struct tgsi_shader_info *info = &sel->info;
821
822 LLVMTypeRef elements[2] = {
823 LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
824 LLVMArrayType(ctx->ac.i8, 4),
825 };
826 LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
827 type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
828 return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
829 }
830
831 /**
832 * Return a pointer to the LDS storage reserved for the N'th vertex, where N
833 * is in emit order; that is:
834 * - during the epilogue, N is the threadidx (relative to the entire threadgroup)
835 * - during vertex emit, i.e. while the API GS shader invocation is running,
836 * N = threadidx * gs_max_out_vertices + emitidx
837 *
838 * Goals of the LDS memory layout:
839 * 1. Eliminate bank conflicts on write for geometry shaders that have all emits
840 * in uniform control flow
841 * 2. Eliminate bank conflicts on read for export if, additionally, there is no
842 * culling
843 * 3. Agnostic to the number of waves (since we don't know it before compiling)
844 * 4. Allow coalescing of LDS instructions (ds_write_b128 etc.)
845 * 5. Avoid wasting memory.
846 *
847 * We use an AoS layout due to point 4 (this also helps point 3). In an AoS
848 * layout, elimination of bank conflicts requires that each vertex occupy an
849 * odd number of dwords. We use the additional dword to store the output stream
850 * index as well as a flag to indicate whether this vertex ends a primitive
851 * for rasterization.
852 *
853 * Swizzling is required to satisfy points 1 and 2 simultaneously.
854 *
855 * Vertices are stored in export order (gsthread * gs_max_out_vertices + emitidx).
856 * Indices are swizzled in groups of 32, which ensures point 1 without
857 * disturbing point 2.
858 *
859 * \return an LDS pointer to type {[N x i32], [4 x i8]}
860 */
861 static LLVMValueRef
862 ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
863 {
864 struct si_shader_selector *sel = ctx->shader->selector;
865 LLVMBuilderRef builder = ctx->ac.builder;
866 LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
867
868 /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
869 unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
870 if (write_stride_2exp) {
871 LLVMValueRef row =
872 LLVMBuildLShr(builder, vertexidx,
873 LLVMConstInt(ctx->ac.i32, 5, false), "");
874 LLVMValueRef swizzle =
875 LLVMBuildAnd(builder, row,
876 LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
877 false), "");
878 vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
879 }
880
881 return ac_build_gep0(&ctx->ac, storage, vertexidx);
882 }
883
884 static LLVMValueRef
885 ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
886 LLVMValueRef emitidx)
887 {
888 struct si_shader_selector *sel = ctx->shader->selector;
889 LLVMBuilderRef builder = ctx->ac.builder;
890 LLVMValueRef tmp;
891
892 tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
893 tmp = LLVMBuildMul(builder, tmp, gsthread, "");
894 const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
895 return ngg_gs_vertex_ptr(ctx, vertexidx);
896 }
897
898 void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
899 unsigned stream,
900 LLVMValueRef *addrs)
901 {
902 const struct si_shader_selector *sel = ctx->shader->selector;
903 const struct tgsi_shader_info *info = &sel->info;
904 LLVMBuilderRef builder = ctx->ac.builder;
905 struct lp_build_if_state if_state;
906 LLVMValueRef tmp;
907 const LLVMValueRef vertexidx =
908 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
909
910 /* If this thread has already emitted the declared maximum number of
911 * vertices, skip the write: excessive vertex emissions are not
912 * supposed to have any effect.
913 */
914 const LLVMValueRef can_emit =
915 LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
916 LLVMConstInt(ctx->i32, sel->gs_max_out_vertices, false), "");
917
918 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
919 tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
920 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
921
922 lp_build_if(&if_state, &ctx->gallivm, can_emit);
923
924 const LLVMValueRef vertexptr =
925 ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
926 unsigned out_idx = 0;
927 for (unsigned i = 0; i < info->num_outputs; i++) {
928 for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
929 if (!(info->output_usagemask[i] & (1 << chan)) ||
930 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
931 continue;
932
933 LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
934 LLVMValueRef gep_idx[3] = {
935 ctx->ac.i32_0, /* implied C-style array */
936 ctx->ac.i32_0, /* first entry of struct */
937 LLVMConstInt(ctx->ac.i32, out_idx, false),
938 };
939 LLVMValueRef ptr = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
940
941 out_val = ac_to_integer(&ctx->ac, out_val);
942 LLVMBuildStore(builder, out_val, ptr);
943 }
944 }
945 assert(out_idx * 4 == sel->gsvs_vertex_size);
946
947 /* Determine and store whether this vertex completed a primitive. */
948 const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
949
950 tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
951 const LLVMValueRef iscompleteprim =
952 LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
953
954 tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
955 LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
956
957 LLVMValueRef gep_idx[3] = {
958 ctx->ac.i32_0, /* implied C-style array */
959 ctx->ac.i32_1, /* second struct entry */
960 LLVMConstInt(ctx->ac.i32, stream, false),
961 };
962 const LLVMValueRef primflagptr =
963 LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
964
965 tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
966 LLVMBuildStore(builder, tmp, primflagptr);
967
968 tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
969 tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
970 LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
971
972 lp_build_endif(&if_state);
973 }
974
975 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
976 {
977 /* Zero out the part of LDS scratch that is used to accumulate the
978 * per-stream generated primitive count.
979 */
980 LLVMBuilderRef builder = ctx->ac.builder;
981 LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
982 LLVMValueRef tid = get_thread_id_in_tg(ctx);
983 LLVMValueRef tmp;
984
985 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->i32, 4, false), "");
986 ac_build_ifcc(&ctx->ac, tmp, 5090);
987 {
988 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
989 LLVMBuildStore(builder, ctx->i32_0, ptr);
990 }
991 ac_build_endif(&ctx->ac, 5090);
992
993 ac_build_s_barrier(&ctx->ac);
994 }
995
996 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
997 {
998 const struct si_shader_selector *sel = ctx->shader->selector;
999 const struct tgsi_shader_info *info = &sel->info;
1000 const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
1001 LLVMBuilderRef builder = ctx->ac.builder;
1002 LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
1003 LLVMValueRef tmp, tmp2;
1004
1005 /* Zero out remaining (non-emitted) primitive flags.
1006 *
1007 * Note: Alternatively, we could pass the relevant gs_next_vertex to
1008 * the emit threads via LDS. This is likely worse in the expected
1009 * typical case where each GS thread emits the full set of
1010 * vertices.
1011 */
1012 for (unsigned stream = 0; stream < 4; ++stream) {
1013 if (!info->num_stream_output_components[stream])
1014 continue;
1015
1016 const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
1017
1018 ac_build_bgnloop(&ctx->ac, 5100);
1019
1020 const LLVMValueRef vertexidx =
1021 LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
1022 tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
1023 LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
1024 ac_build_ifcc(&ctx->ac, tmp, 5101);
1025 ac_build_break(&ctx->ac);
1026 ac_build_endif(&ctx->ac, 5101);
1027
1028 tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
1029 LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
1030
1031 tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
1032 LLVMValueRef gep_idx[3] = {
1033 ctx->ac.i32_0, /* implied C-style array */
1034 ctx->ac.i32_1, /* second entry of struct */
1035 LLVMConstInt(ctx->ac.i32, stream, false),
1036 };
1037 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1038 LLVMBuildStore(builder, i8_0, tmp);
1039
1040 ac_build_endloop(&ctx->ac, 5100);
1041 }
1042
1043 /* Accumulate generated primitives counts across the entire threadgroup. */
1044 for (unsigned stream = 0; stream < 4; ++stream) {
1045 if (!info->num_stream_output_components[stream])
1046 continue;
1047
1048 LLVMValueRef numprims =
1049 LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
1050 numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
1051
1052 tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->i32_0, "");
1053 ac_build_ifcc(&ctx->ac, tmp, 5105);
1054 {
1055 LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
1056 ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
1057 LLVMConstInt(ctx->i32, stream, false)),
1058 numprims, LLVMAtomicOrderingMonotonic, false);
1059 }
1060 ac_build_endif(&ctx->ac, 5105);
1061 }
1062
1063 lp_build_endif(&ctx->merged_wrap_if_state);
1064
1065 ac_build_s_barrier(&ctx->ac);
1066
1067 const LLVMValueRef tid = get_thread_id_in_tg(ctx);
1068 LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
1069
1070 /* Streamout */
1071 if (sel->so.num_outputs) {
1072 struct ngg_streamout nggso = {};
1073
1074 nggso.num_vertices = LLVMConstInt(ctx->i32, verts_per_prim, false);
1075
1076 LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
1077 for (unsigned stream = 0; stream < 4; ++stream) {
1078 if (!info->num_stream_output_components[stream])
1079 continue;
1080
1081 LLVMValueRef gep_idx[3] = {
1082 ctx->i32_0, /* implicit C-style array */
1083 ctx->i32_1, /* second value of struct */
1084 LLVMConstInt(ctx->i32, stream, false),
1085 };
1086 tmp = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
1087 tmp = LLVMBuildLoad(builder, tmp, "");
1088 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1089 tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1090 nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
1091 }
1092
1093 for (unsigned i = 0; i < verts_per_prim; ++i) {
1094 tmp = LLVMBuildSub(builder, tid,
1095 LLVMConstInt(ctx->i32, verts_per_prim - i - 1, false), "");
1096 tmp = ngg_gs_vertex_ptr(ctx, tmp);
1097 nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->i32_0);
1098 }
1099
1100 build_streamout(ctx, &nggso);
1101 }
1102
1103 /* Write shader query data. */
1104 tmp = si_unpack_param(ctx, ctx->param_vs_state_bits, 6, 1);
1105 tmp = LLVMBuildTrunc(builder, tmp, ctx->i1, "");
1106 ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
1107 unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
1108 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
1109 LLVMConstInt(ctx->i32, num_query_comps, false), "");
1110 ac_build_ifcc(&ctx->ac, tmp, 5110);
1111 {
1112 LLVMValueRef offset;
1113 tmp = tid;
1114 if (sel->so.num_outputs)
1115 tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->i32, 3, false), "");
1116 offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 32, false), "");
1117 if (sel->so.num_outputs) {
1118 tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->i32, 2, false), "");
1119 tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->i32, 8, false), "");
1120 offset = LLVMBuildAdd(builder, offset, tmp, "");
1121 }
1122
1123 tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
1124 LLVMValueRef args[] = {
1125 tmp,
1126 ngg_get_query_buf(ctx),
1127 offset,
1128 LLVMConstInt(ctx->i32, 16, false), /* soffset */
1129 ctx->i32_0, /* cachepolicy */
1130 };
1131 ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
1132 ctx->i32, args, 5, 0);
1133 }
1134 ac_build_endif(&ctx->ac, 5110);
1135 ac_build_endif(&ctx->ac, 5109);
1136
1137 /* TODO: culling */
1138
1139 /* Determine vertex liveness. */
1140 LLVMValueRef vertliveptr = lp_build_alloca(&ctx->gallivm, ctx->ac.i1, "vertexlive");
1141
1142 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1143 ac_build_ifcc(&ctx->ac, tmp, 5120);
1144 {
1145 for (unsigned i = 0; i < verts_per_prim; ++i) {
1146 const LLVMValueRef primidx =
1147 LLVMBuildAdd(builder, tid,
1148 LLVMConstInt(ctx->ac.i32, i, false), "");
1149
1150 if (i > 0) {
1151 tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
1152 ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
1153 }
1154
1155 /* Load primitive liveness */
1156 tmp = ngg_gs_vertex_ptr(ctx, primidx);
1157 LLVMValueRef gep_idx[3] = {
1158 ctx->ac.i32_0, /* implicit C-style array */
1159 ctx->ac.i32_1, /* second value of struct */
1160 ctx->ac.i32_0, /* stream 0 */
1161 };
1162 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1163 tmp = LLVMBuildLoad(builder, tmp, "");
1164 const LLVMValueRef primlive =
1165 LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
1166
1167 tmp = LLVMBuildLoad(builder, vertliveptr, "");
1168 tmp = LLVMBuildOr(builder, tmp, primlive, ""),
1169 LLVMBuildStore(builder, tmp, vertliveptr);
1170
1171 if (i > 0)
1172 ac_build_endif(&ctx->ac, 5121 + i);
1173 }
1174 }
1175 ac_build_endif(&ctx->ac, 5120);
1176
1177 /* Inclusive scan addition across the current wave. */
1178 LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
1179 struct ac_wg_scan vertlive_scan = {};
1180 vertlive_scan.op = nir_op_iadd;
1181 vertlive_scan.enable_reduce = true;
1182 vertlive_scan.enable_exclusive = true;
1183 vertlive_scan.src = vertlive;
1184 vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->i32_0);
1185 vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
1186 vertlive_scan.numwaves = get_tgsize(ctx);
1187 vertlive_scan.maxwaves = 8;
1188
1189 ac_build_wg_scan(&ctx->ac, &vertlive_scan);
1190
1191 /* Skip all exports (including index exports) when possible. At least on
1192 * early gfx10 revisions this is also to avoid hangs.
1193 */
1194 LLVMValueRef have_exports =
1195 LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
1196 num_emit_threads =
1197 LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
1198
1199 /* Allocate export space. Send this message as early as possible, to
1200 * hide the latency of the SQ <-> SPI roundtrip.
1201 *
1202 * Note: We could consider compacting primitives for export as well.
1203 * PA processes 1 non-null prim / clock, but it fetches 4 DW of
1204 * prim data per clock and skips null primitives at no additional
1205 * cost. So compacting primitives can only be beneficial when
1206 * there are 4 or more contiguous null primitives in the export
1207 * (in the common case of single-dword prim exports).
1208 */
1209 build_sendmsg_gs_alloc_req(ctx, vertlive_scan.result_reduce, num_emit_threads);
1210
1211 /* Setup the reverse vertex compaction permutation. We re-use stream 1
1212 * of the primitive liveness flags, relying on the fact that each
1213 * threadgroup can have at most 256 threads. */
1214 ac_build_ifcc(&ctx->ac, vertlive, 5130);
1215 {
1216 tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
1217 LLVMValueRef gep_idx[3] = {
1218 ctx->ac.i32_0, /* implicit C-style array */
1219 ctx->ac.i32_1, /* second value of struct */
1220 ctx->ac.i32_1, /* stream 1 */
1221 };
1222 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1223 tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
1224 LLVMBuildStore(builder, tmp2, tmp);
1225 }
1226 ac_build_endif(&ctx->ac, 5130);
1227
1228 ac_build_s_barrier(&ctx->ac);
1229
1230 /* Export primitive data */
1231 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
1232 ac_build_ifcc(&ctx->ac, tmp, 5140);
1233 {
1234 struct ngg_prim prim = {};
1235 prim.num_vertices = verts_per_prim;
1236
1237 tmp = ngg_gs_vertex_ptr(ctx, tid);
1238 LLVMValueRef gep_idx[3] = {
1239 ctx->ac.i32_0, /* implicit C-style array */
1240 ctx->ac.i32_1, /* second value of struct */
1241 ctx->ac.i32_0, /* primflag */
1242 };
1243 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1244 tmp = LLVMBuildLoad(builder, tmp, "");
1245 prim.isnull = LLVMBuildICmp(builder, LLVMIntEQ, tmp,
1246 LLVMConstInt(ctx->ac.i8, 0, false), "");
1247
1248 for (unsigned i = 0; i < verts_per_prim; ++i) {
1249 prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
1250 LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
1251 prim.edgeflag[i] = ctx->ac.i1false;
1252 }
1253
1254 build_export_prim(ctx, &prim);
1255 }
1256 ac_build_endif(&ctx->ac, 5140);
1257
1258 /* Export position and parameter data */
1259 tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
1260 ac_build_ifcc(&ctx->ac, tmp, 5145);
1261 {
1262 struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
1263
1264 tmp = ngg_gs_vertex_ptr(ctx, tid);
1265 LLVMValueRef gep_idx[3] = {
1266 ctx->ac.i32_0, /* implicit C-style array */
1267 ctx->ac.i32_1, /* second value of struct */
1268 ctx->ac.i32_1, /* stream 1: source data index */
1269 };
1270 tmp = LLVMBuildGEP(builder, tmp, gep_idx, 3, "");
1271 tmp = LLVMBuildLoad(builder, tmp, "");
1272 tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
1273 const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
1274
1275 unsigned out_idx = 0;
1276 gep_idx[1] = ctx->ac.i32_0;
1277 for (unsigned i = 0; i < info->num_outputs; i++) {
1278 outputs[i].semantic_name = info->output_semantic_name[i];
1279 outputs[i].semantic_index = info->output_semantic_index[i];
1280
1281 for (unsigned j = 0; j < 4; j++, out_idx++) {
1282 gep_idx[2] = LLVMConstInt(ctx->ac.i32, out_idx, false);
1283 tmp = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
1284 tmp = LLVMBuildLoad(builder, tmp, "");
1285 outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
1286 outputs[i].vertex_stream[j] =
1287 (info->output_streams[i] >> (2 * j)) & 3;
1288 }
1289 }
1290
1291 si_llvm_export_vs(ctx, outputs, info->num_outputs);
1292 }
1293 ac_build_endif(&ctx->ac, 5145);
1294 }
1295
1296 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1297 unsigned min_verts_per_prim, bool use_adjacency)
1298 {
1299 unsigned max_reuse = max_esverts - min_verts_per_prim;
1300 if (use_adjacency)
1301 max_reuse /= 2;
1302 *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1303 }
1304
1305 /**
1306 * Determine subgroup information like maximum number of vertices and prims.
1307 *
1308 * This happens before the shader is uploaded, since LDS relocations during
1309 * upload depend on the subgroup size.
1310 */
1311 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
1312 {
1313 const struct si_shader_selector *gs_sel = shader->selector;
1314 const struct si_shader_selector *es_sel =
1315 shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
1316 const enum pipe_shader_type gs_type = gs_sel->type;
1317 const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
1318 /* TODO: Use QUADS as the worst case because of reuse, but triangles
1319 * will always have 1 additional unoccupied vector lane. We could use
1320 * that lane if the worst case was TRIANGLES. */
1321 const unsigned input_prim = si_get_input_prim(gs_sel, PIPE_PRIM_QUADS);
1322 const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
1323 input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
1324 const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
1325 const unsigned min_verts_per_prim =
1326 gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1327
1328 /* All these are in dwords: */
1329 /* We can't allow using the whole LDS, because GS waves compete with
1330 * other shader stages for LDS space.
1331 *
1332 * TODO: We should really take the shader's internal LDS use into
1333 * account. The linker will fail if the size is greater than
1334 * 8K dwords.
1335 */
1336 const unsigned max_lds_size = 8 * 1024 - 768;
1337 const unsigned target_lds_size = max_lds_size;
1338 unsigned esvert_lds_size = 0;
1339 unsigned gsprim_lds_size = 0;
1340
1341 /* All these are per subgroup: */
1342 bool max_vert_out_per_gs_instance = false;
1343 unsigned max_esverts_base = 128;
1344 unsigned max_gsprims_base = 128; /* default prim group size clamp */
1345
1346 /* Hardware has the following non-natural restrictions on the value
1347 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1348 * the draw:
1349 * - at most 252 for any line input primitive type
1350 * - at most 251 for any quad input primitive type
1351 * - at most 251 for triangle strips with adjacency (this happens to
1352 * be the natural limit for triangle *lists* with adjacency)
1353 */
1354 max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1355
1356 if (gs_type == PIPE_SHADER_GEOMETRY) {
1357 unsigned max_out_verts_per_gsprim =
1358 gs_sel->gs_max_out_vertices * gs_num_invocations;
1359
1360 if (max_out_verts_per_gsprim <= 256) {
1361 if (max_out_verts_per_gsprim) {
1362 max_gsprims_base = MIN2(max_gsprims_base,
1363 256 / max_out_verts_per_gsprim);
1364 }
1365 } else {
1366 /* Use special multi-cycling mode in which each GS
1367 * instance gets its own subgroup. Does not work with
1368 * tessellation. */
1369 max_vert_out_per_gs_instance = true;
1370 max_gsprims_base = 1;
1371 max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
1372 }
1373
1374 esvert_lds_size = es_sel->esgs_itemsize / 4;
1375 gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
1376 } else {
1377 /* VS and TES. */
1378 /* LDS size for passing data from ES to GS. */
1379 esvert_lds_size = ngg_nogs_vertex_size(shader);
1380
1381 /* LDS size for passing data from GS to ES.
1382 * GS stores Primitive IDs into LDS at the address corresponding
1383 * to the ES thread of the provoking vertex. All ES threads
1384 * load and export PrimitiveID for their thread.
1385 */
1386 if (gs_sel->type == PIPE_SHADER_VERTEX &&
1387 shader->key.mono.u.vs_export_prim_id)
1388 esvert_lds_size = MAX2(esvert_lds_size, 1);
1389 }
1390
1391 unsigned max_gsprims = max_gsprims_base;
1392 unsigned max_esverts = max_esverts_base;
1393
1394 if (esvert_lds_size)
1395 max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
1396 if (gsprim_lds_size)
1397 max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
1398
1399 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1400 clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
1401 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1402
1403 if (esvert_lds_size || gsprim_lds_size) {
1404 /* Now that we have a rough proportionality between esverts
1405 * and gsprims based on the primitive type, scale both of them
1406 * down simultaneously based on required LDS space.
1407 *
1408 * We could be smarter about this if we knew how much vertex
1409 * reuse to expect.
1410 */
1411 unsigned lds_total = max_esverts * esvert_lds_size +
1412 max_gsprims * gsprim_lds_size;
1413 if (lds_total > target_lds_size) {
1414 max_esverts = max_esverts * target_lds_size / lds_total;
1415 max_gsprims = max_gsprims * target_lds_size / lds_total;
1416
1417 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1418 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1419 min_verts_per_prim, use_adjacency);
1420 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1421 }
1422 }
1423
1424 /* Round up towards full wave sizes for better ALU utilization. */
1425 if (!max_vert_out_per_gs_instance) {
1426 const unsigned wavesize = gs_sel->screen->ge_wave_size;
1427 unsigned orig_max_esverts;
1428 unsigned orig_max_gsprims;
1429 do {
1430 orig_max_esverts = max_esverts;
1431 orig_max_gsprims = max_gsprims;
1432
1433 max_esverts = align(max_esverts, wavesize);
1434 max_esverts = MIN2(max_esverts, max_esverts_base);
1435 if (esvert_lds_size)
1436 max_esverts = MIN2(max_esverts,
1437 (max_lds_size - max_gsprims * gsprim_lds_size) /
1438 esvert_lds_size);
1439 max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
1440
1441 max_gsprims = align(max_gsprims, wavesize);
1442 max_gsprims = MIN2(max_gsprims, max_gsprims_base);
1443 if (gsprim_lds_size)
1444 max_gsprims = MIN2(max_gsprims,
1445 (max_lds_size - max_esverts * esvert_lds_size) /
1446 gsprim_lds_size);
1447 clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
1448 min_verts_per_prim, use_adjacency);
1449 assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
1450 } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
1451 }
1452
1453 /* Hardware restriction: minimum value of max_esverts */
1454 max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
1455
1456 unsigned max_out_vertices =
1457 max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
1458 gs_type == PIPE_SHADER_GEOMETRY ?
1459 max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
1460 max_esverts;
1461 assert(max_out_vertices <= 256);
1462
1463 unsigned prim_amp_factor = 1;
1464 if (gs_type == PIPE_SHADER_GEOMETRY) {
1465 /* Number of output primitives per GS input primitive after
1466 * GS instancing. */
1467 prim_amp_factor = gs_sel->gs_max_out_vertices;
1468 }
1469
1470 /* The GE only checks against the maximum number of ES verts after
1471 * allocating a full GS primitive. So we need to ensure that whenever
1472 * this check passes, there is enough space for a full primitive without
1473 * vertex reuse.
1474 */
1475 shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
1476 shader->ngg.max_gsprims = max_gsprims;
1477 shader->ngg.max_out_verts = max_out_vertices;
1478 shader->ngg.prim_amp_factor = prim_amp_factor;
1479 shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
1480
1481 shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
1482 shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
1483
1484 assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
1485 }