radeonsi: move non-LLVM code out of si_shader_llvm.c
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "tgsi/tgsi_strings.h"
27 #include "tgsi/tgsi_from_mesa.h"
28
29 #include "ac_exp_param.h"
30 #include "ac_rtld.h"
31 #include "si_shader_internal.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_serialize.h"
37
38 static const char scratch_rsrc_dword0_symbol[] =
39 "SCRATCH_RSRC_DWORD0";
40
41 static const char scratch_rsrc_dword1_symbol[] =
42 "SCRATCH_RSRC_DWORD1";
43
44 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
45
46 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
47 union si_shader_part_key *key);
48
49 /** Whether the shader runs as a combination of multiple API shaders */
50 static bool is_multi_part_shader(struct si_shader_context *ctx)
51 {
52 if (ctx->screen->info.chip_class <= GFX8)
53 return false;
54
55 return ctx->shader->key.as_ls ||
56 ctx->shader->key.as_es ||
57 ctx->type == PIPE_SHADER_TESS_CTRL ||
58 ctx->type == PIPE_SHADER_GEOMETRY;
59 }
60
61 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
62 bool si_is_merged_shader(struct si_shader_context *ctx)
63 {
64 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
65 }
66
67 /**
68 * Returns a unique index for a per-patch semantic name and index. The index
69 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
70 * can be calculated.
71 */
72 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
73 {
74 switch (semantic_name) {
75 case TGSI_SEMANTIC_TESSOUTER:
76 return 0;
77 case TGSI_SEMANTIC_TESSINNER:
78 return 1;
79 case TGSI_SEMANTIC_PATCH:
80 assert(index < 30);
81 return 2 + index;
82
83 default:
84 assert(!"invalid semantic name");
85 return 0;
86 }
87 }
88
89 /**
90 * Returns a unique index for a semantic name and index. The index must be
91 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
92 * calculated.
93 */
94 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
95 unsigned is_varying)
96 {
97 switch (semantic_name) {
98 case TGSI_SEMANTIC_POSITION:
99 return 0;
100 case TGSI_SEMANTIC_GENERIC:
101 /* Since some shader stages use the the highest used IO index
102 * to determine the size to allocate for inputs/outputs
103 * (in LDS, tess and GS rings). GENERIC should be placed right
104 * after POSITION to make that size as small as possible.
105 */
106 if (index < SI_MAX_IO_GENERIC)
107 return 1 + index;
108
109 assert(!"invalid generic index");
110 return 0;
111 case TGSI_SEMANTIC_FOG:
112 return SI_MAX_IO_GENERIC + 1;
113 case TGSI_SEMANTIC_COLOR:
114 assert(index < 2);
115 return SI_MAX_IO_GENERIC + 2 + index;
116 case TGSI_SEMANTIC_BCOLOR:
117 assert(index < 2);
118 /* If it's a varying, COLOR and BCOLOR alias. */
119 if (is_varying)
120 return SI_MAX_IO_GENERIC + 2 + index;
121 else
122 return SI_MAX_IO_GENERIC + 4 + index;
123 case TGSI_SEMANTIC_TEXCOORD:
124 assert(index < 8);
125 return SI_MAX_IO_GENERIC + 6 + index;
126
127 /* These are rarely used between LS and HS or ES and GS. */
128 case TGSI_SEMANTIC_CLIPDIST:
129 assert(index < 2);
130 return SI_MAX_IO_GENERIC + 6 + 8 + index;
131 case TGSI_SEMANTIC_CLIPVERTEX:
132 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
133 case TGSI_SEMANTIC_PSIZE:
134 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
135
136 /* These can't be written by LS, HS, and ES. */
137 case TGSI_SEMANTIC_LAYER:
138 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
139 case TGSI_SEMANTIC_VIEWPORT_INDEX:
140 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
141 case TGSI_SEMANTIC_PRIMID:
142 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
143 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
144 default:
145 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
146 assert(!"invalid semantic name");
147 return 0;
148 }
149 }
150
151 /**
152 * Get the value of a shader input parameter and extract a bitfield.
153 */
154 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
155 LLVMValueRef value, unsigned rshift,
156 unsigned bitwidth)
157 {
158 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
159 value = ac_to_integer(&ctx->ac, value);
160
161 if (rshift)
162 value = LLVMBuildLShr(ctx->ac.builder, value,
163 LLVMConstInt(ctx->ac.i32, rshift, 0), "");
164
165 if (rshift + bitwidth < 32) {
166 unsigned mask = (1 << bitwidth) - 1;
167 value = LLVMBuildAnd(ctx->ac.builder, value,
168 LLVMConstInt(ctx->ac.i32, mask, 0), "");
169 }
170
171 return value;
172 }
173
174 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
175 struct ac_arg param, unsigned rshift,
176 unsigned bitwidth)
177 {
178 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
179
180 return unpack_llvm_param(ctx, value, rshift, bitwidth);
181 }
182
183 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
184 LLVMValueRef i32, unsigned index)
185 {
186 assert(index <= 1);
187
188 if (index == 1)
189 return LLVMBuildAShr(ctx->ac.builder, i32,
190 LLVMConstInt(ctx->ac.i32, 16, 0), "");
191
192 return LLVMBuildSExt(ctx->ac.builder,
193 LLVMBuildTrunc(ctx->ac.builder, i32,
194 ctx->ac.i16, ""),
195 ctx->ac.i32, "");
196 }
197
198 void si_llvm_load_input_vs(
199 struct si_shader_context *ctx,
200 unsigned input_index,
201 LLVMValueRef out[4])
202 {
203 const struct si_shader_info *info = &ctx->shader->selector->info;
204 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
205
206 if (vs_blit_property) {
207 LLVMValueRef vertex_id = ctx->abi.vertex_id;
208 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
209 LLVMIntULE, vertex_id,
210 ctx->ac.i32_1, "");
211 /* Use LLVMIntNE, because we have 3 vertices and only
212 * the middle one should use y2.
213 */
214 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
215 LLVMIntNE, vertex_id,
216 ctx->ac.i32_1, "");
217
218 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
219 if (input_index == 0) {
220 /* Position: */
221 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
222 param_vs_blit_inputs);
223 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
224 param_vs_blit_inputs + 1);
225
226 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
227 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
228 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
229 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
230
231 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
232 x1, x2, "");
233 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
234 y1, y2, "");
235
236 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->ac.f32, "");
237 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->ac.f32, "");
238 out[2] = LLVMGetParam(ctx->main_fn,
239 param_vs_blit_inputs + 2);
240 out[3] = ctx->ac.f32_1;
241 return;
242 }
243
244 /* Color or texture coordinates: */
245 assert(input_index == 1);
246
247 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
248 for (int i = 0; i < 4; i++) {
249 out[i] = LLVMGetParam(ctx->main_fn,
250 param_vs_blit_inputs + 3 + i);
251 }
252 } else {
253 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
254 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
255 param_vs_blit_inputs + 3);
256 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
257 param_vs_blit_inputs + 4);
258 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
259 param_vs_blit_inputs + 5);
260 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
261 param_vs_blit_inputs + 6);
262
263 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
264 x1, x2, "");
265 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
266 y1, y2, "");
267 out[2] = LLVMGetParam(ctx->main_fn,
268 param_vs_blit_inputs + 7);
269 out[3] = LLVMGetParam(ctx->main_fn,
270 param_vs_blit_inputs + 8);
271 }
272 return;
273 }
274
275 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
276 union si_vs_fix_fetch fix_fetch;
277 LLVMValueRef vb_desc;
278 LLVMValueRef vertex_index;
279 LLVMValueRef tmp;
280
281 if (input_index < num_vbos_in_user_sgprs) {
282 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
283 } else {
284 unsigned index= input_index - num_vbos_in_user_sgprs;
285 vb_desc = ac_build_load_to_sgpr(&ctx->ac,
286 ac_get_arg(&ctx->ac, ctx->vertex_buffers),
287 LLVMConstInt(ctx->ac.i32, index, 0));
288 }
289
290 vertex_index = LLVMGetParam(ctx->main_fn,
291 ctx->vertex_index0.arg_index +
292 input_index);
293
294 /* Use the open-coded implementation for all loads of doubles and
295 * of dword-sized data that needs fixups. We need to insert conversion
296 * code anyway, and the amd/common code does it for us.
297 *
298 * Note: On LLVM <= 8, we can only open-code formats with
299 * channel size >= 4 bytes.
300 */
301 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
302 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
303 if (opencode ||
304 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
305 (fix_fetch.u.log_size == 2)) {
306 tmp = ac_build_opencoded_load_format(
307 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
308 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
309 vb_desc, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
310 for (unsigned i = 0; i < 4; ++i)
311 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->ac.i32, i, false), "");
312 return;
313 }
314
315 /* Do multiple loads for special formats. */
316 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
317 LLVMValueRef fetches[4];
318 unsigned num_fetches;
319 unsigned fetch_stride;
320 unsigned channels_per_fetch;
321
322 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
323 num_fetches = MIN2(required_channels, 3);
324 fetch_stride = 1 << fix_fetch.u.log_size;
325 channels_per_fetch = 1;
326 } else {
327 num_fetches = 1;
328 fetch_stride = 0;
329 channels_per_fetch = required_channels;
330 }
331
332 for (unsigned i = 0; i < num_fetches; ++i) {
333 LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, fetch_stride * i, 0);
334 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
335 channels_per_fetch, 0, true);
336 }
337
338 if (num_fetches == 1 && channels_per_fetch > 1) {
339 LLVMValueRef fetch = fetches[0];
340 for (unsigned i = 0; i < channels_per_fetch; ++i) {
341 tmp = LLVMConstInt(ctx->ac.i32, i, false);
342 fetches[i] = LLVMBuildExtractElement(
343 ctx->ac.builder, fetch, tmp, "");
344 }
345 num_fetches = channels_per_fetch;
346 channels_per_fetch = 1;
347 }
348
349 for (unsigned i = num_fetches; i < 4; ++i)
350 fetches[i] = LLVMGetUndef(ctx->ac.f32);
351
352 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
353 required_channels == 4) {
354 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
355 fetches[3] = ctx->ac.i32_1;
356 else
357 fetches[3] = ctx->ac.f32_1;
358 } else if (fix_fetch.u.log_size == 3 &&
359 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
360 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
361 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
362 required_channels == 4) {
363 /* For 2_10_10_10, the hardware returns an unsigned value;
364 * convert it to a signed one.
365 */
366 LLVMValueRef tmp = fetches[3];
367 LLVMValueRef c30 = LLVMConstInt(ctx->ac.i32, 30, 0);
368
369 /* First, recover the sign-extended signed integer value. */
370 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
371 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->ac.i32, "");
372 else
373 tmp = ac_to_integer(&ctx->ac, tmp);
374
375 /* For the integer-like cases, do a natural sign extension.
376 *
377 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
378 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
379 * exponent.
380 */
381 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
382 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
383 LLVMConstInt(ctx->ac.i32, 7, 0) : c30, "");
384 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
385
386 /* Convert back to the right type. */
387 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
388 LLVMValueRef clamp;
389 LLVMValueRef neg_one = LLVMConstReal(ctx->ac.f32, -1.0);
390 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->ac.f32, "");
391 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
392 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
393 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
394 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->ac.f32, "");
395 }
396
397 fetches[3] = tmp;
398 }
399
400 for (unsigned i = 0; i < 4; ++i)
401 out[i] = ac_to_float(&ctx->ac, fetches[i]);
402 }
403
404 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
405 unsigned swizzle)
406 {
407 if (swizzle > 0)
408 return ctx->ac.i32_0;
409
410 switch (ctx->type) {
411 case PIPE_SHADER_VERTEX:
412 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
413 case PIPE_SHADER_TESS_CTRL:
414 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
415 case PIPE_SHADER_TESS_EVAL:
416 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
417 case PIPE_SHADER_GEOMETRY:
418 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
419 default:
420 assert(0);
421 return ctx->ac.i32_0;
422 }
423 }
424
425 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
426 {
427 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
428
429 /* For non-indexed draws, the base vertex set by the driver
430 * (for direct draws) or the CP (for indirect draws) is the
431 * first vertex ID, but GLSL expects 0 to be returned.
432 */
433 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
434 ctx->vs_state_bits);
435 LLVMValueRef indexed;
436
437 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->ac.i32_1, "");
438 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->ac.i1, "");
439
440 return LLVMBuildSelect(ctx->ac.builder, indexed,
441 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
442 ctx->ac.i32_0, "");
443 }
444
445 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
446 {
447 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
448
449 LLVMValueRef values[3];
450 LLVMValueRef result;
451 unsigned i;
452 unsigned *properties = ctx->shader->selector->info.properties;
453
454 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
455 unsigned sizes[3] = {
456 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
457 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
458 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
459 };
460
461 for (i = 0; i < 3; ++i)
462 values[i] = LLVMConstInt(ctx->ac.i32, sizes[i], 0);
463
464 result = ac_build_gather_values(&ctx->ac, values, 3);
465 } else {
466 result = ac_get_arg(&ctx->ac, ctx->block_size);
467 }
468
469 return result;
470 }
471
472 void si_declare_compute_memory(struct si_shader_context *ctx)
473 {
474 struct si_shader_selector *sel = ctx->shader->selector;
475 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
476
477 LLVMTypeRef i8p = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
478 LLVMValueRef var;
479
480 assert(!ctx->ac.lds);
481
482 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
483 LLVMArrayType(ctx->ac.i8, lds_size),
484 "compute_lds",
485 AC_ADDR_SPACE_LDS);
486 LLVMSetAlignment(var, 64 * 1024);
487
488 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
489 }
490
491 /* Initialize arguments for the shader export intrinsic */
492 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx,
493 LLVMValueRef *values,
494 unsigned target,
495 struct ac_export_args *args)
496 {
497 args->enabled_channels = 0xf; /* writemask - default is 0xf */
498 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
499 args->done = 0; /* Specify whether this is the last export */
500 args->target = target; /* Specify the target we are exporting */
501 args->compr = false;
502
503 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
504 }
505
506 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
507 struct ac_export_args *pos, LLVMValueRef *out_elts)
508 {
509 unsigned reg_index;
510 unsigned chan;
511 unsigned const_chan;
512 LLVMValueRef base_elt;
513 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
514 LLVMValueRef constbuf_index = LLVMConstInt(ctx->ac.i32,
515 SI_VS_CONST_CLIP_PLANES, 0);
516 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
517
518 for (reg_index = 0; reg_index < 2; reg_index ++) {
519 struct ac_export_args *args = &pos[2 + reg_index];
520
521 args->out[0] =
522 args->out[1] =
523 args->out[2] =
524 args->out[3] = LLVMConstReal(ctx->ac.f32, 0.0f);
525
526 /* Compute dot products of position and user clip plane vectors */
527 for (chan = 0; chan < 4; chan++) {
528 for (const_chan = 0; const_chan < 4; const_chan++) {
529 LLVMValueRef addr =
530 LLVMConstInt(ctx->ac.i32, ((reg_index * 4 + chan) * 4 +
531 const_chan) * 4, 0);
532 base_elt = si_buffer_load_const(ctx, const_resource,
533 addr);
534 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
535 out_elts[const_chan], args->out[chan]);
536 }
537 }
538
539 args->enabled_channels = 0xf;
540 args->valid_mask = 0;
541 args->done = 0;
542 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
543 args->compr = 0;
544 }
545 }
546
547 static void si_dump_streamout(struct pipe_stream_output_info *so)
548 {
549 unsigned i;
550
551 if (so->num_outputs)
552 fprintf(stderr, "STREAMOUT\n");
553
554 for (i = 0; i < so->num_outputs; i++) {
555 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
556 so->output[i].start_component;
557 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
558 i, so->output[i].output_buffer,
559 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
560 so->output[i].register_index,
561 mask & 1 ? "x" : "",
562 mask & 2 ? "y" : "",
563 mask & 4 ? "z" : "",
564 mask & 8 ? "w" : "");
565 }
566 }
567
568 void si_emit_streamout_output(struct si_shader_context *ctx,
569 LLVMValueRef const *so_buffers,
570 LLVMValueRef const *so_write_offsets,
571 struct pipe_stream_output *stream_out,
572 struct si_shader_output_values *shader_out)
573 {
574 unsigned buf_idx = stream_out->output_buffer;
575 unsigned start = stream_out->start_component;
576 unsigned num_comps = stream_out->num_components;
577 LLVMValueRef out[4];
578
579 assert(num_comps && num_comps <= 4);
580 if (!num_comps || num_comps > 4)
581 return;
582
583 /* Load the output as int. */
584 for (int j = 0; j < num_comps; j++) {
585 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
586
587 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
588 }
589
590 /* Pack the output. */
591 LLVMValueRef vdata = NULL;
592
593 switch (num_comps) {
594 case 1: /* as i32 */
595 vdata = out[0];
596 break;
597 case 2: /* as v2i32 */
598 case 3: /* as v3i32 */
599 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
600 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
601 break;
602 }
603 /* as v4i32 (aligned to 4) */
604 out[3] = LLVMGetUndef(ctx->ac.i32);
605 /* fall through */
606 case 4: /* as v4i32 */
607 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
608 break;
609 }
610
611 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
612 vdata, num_comps,
613 so_write_offsets[buf_idx],
614 ctx->ac.i32_0,
615 stream_out->dst_offset * 4, ac_glc | ac_slc);
616 }
617
618 /**
619 * Write streamout data to buffers for vertex stream @p stream (different
620 * vertex streams can occur for GS copy shaders).
621 */
622 void si_llvm_emit_streamout(struct si_shader_context *ctx,
623 struct si_shader_output_values *outputs,
624 unsigned noutput, unsigned stream)
625 {
626 struct si_shader_selector *sel = ctx->shader->selector;
627 struct pipe_stream_output_info *so = &sel->so;
628 LLVMBuilderRef builder = ctx->ac.builder;
629 int i;
630
631 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
632 LLVMValueRef so_vtx_count =
633 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
634
635 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
636
637 /* can_emit = tid < so_vtx_count; */
638 LLVMValueRef can_emit =
639 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
640
641 /* Emit the streamout code conditionally. This actually avoids
642 * out-of-bounds buffer access. The hw tells us via the SGPR
643 * (so_vtx_count) which threads are allowed to emit streamout data. */
644 ac_build_ifcc(&ctx->ac, can_emit, 6501);
645 {
646 /* The buffer offset is computed as follows:
647 * ByteOffset = streamout_offset[buffer_id]*4 +
648 * (streamout_write_index + thread_id)*stride[buffer_id] +
649 * attrib_offset
650 */
651
652 LLVMValueRef so_write_index =
653 ac_get_arg(&ctx->ac,
654 ctx->streamout_write_index);
655
656 /* Compute (streamout_write_index + thread_id). */
657 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
658
659 /* Load the descriptor and compute the write offset for each
660 * enabled buffer. */
661 LLVMValueRef so_write_offset[4] = {};
662 LLVMValueRef so_buffers[4];
663 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
664 ctx->rw_buffers);
665
666 for (i = 0; i < 4; i++) {
667 if (!so->stride[i])
668 continue;
669
670 LLVMValueRef offset = LLVMConstInt(ctx->ac.i32,
671 SI_VS_STREAMOUT_BUF0 + i, 0);
672
673 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
674
675 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
676 ctx->streamout_offset[i]);
677 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->ac.i32, 4, 0), "");
678
679 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
680 LLVMConstInt(ctx->ac.i32, so->stride[i]*4, 0),
681 so_offset);
682 }
683
684 /* Write streamout data. */
685 for (i = 0; i < so->num_outputs; i++) {
686 unsigned reg = so->output[i].register_index;
687
688 if (reg >= noutput)
689 continue;
690
691 if (stream != so->output[i].stream)
692 continue;
693
694 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
695 &so->output[i], &outputs[reg]);
696 }
697 }
698 ac_build_endif(&ctx->ac, 6501);
699 }
700
701 static void si_export_param(struct si_shader_context *ctx, unsigned index,
702 LLVMValueRef *values)
703 {
704 struct ac_export_args args;
705
706 si_llvm_init_vs_export_args(ctx, values,
707 V_008DFC_SQ_EXP_PARAM + index, &args);
708 ac_build_export(&ctx->ac, &args);
709 }
710
711 static void si_build_param_exports(struct si_shader_context *ctx,
712 struct si_shader_output_values *outputs,
713 unsigned noutput)
714 {
715 struct si_shader *shader = ctx->shader;
716 unsigned param_count = 0;
717
718 for (unsigned i = 0; i < noutput; i++) {
719 unsigned semantic_name = outputs[i].semantic_name;
720 unsigned semantic_index = outputs[i].semantic_index;
721
722 if (outputs[i].vertex_stream[0] != 0 &&
723 outputs[i].vertex_stream[1] != 0 &&
724 outputs[i].vertex_stream[2] != 0 &&
725 outputs[i].vertex_stream[3] != 0)
726 continue;
727
728 switch (semantic_name) {
729 case TGSI_SEMANTIC_LAYER:
730 case TGSI_SEMANTIC_VIEWPORT_INDEX:
731 case TGSI_SEMANTIC_CLIPDIST:
732 case TGSI_SEMANTIC_COLOR:
733 case TGSI_SEMANTIC_BCOLOR:
734 case TGSI_SEMANTIC_PRIMID:
735 case TGSI_SEMANTIC_FOG:
736 case TGSI_SEMANTIC_TEXCOORD:
737 case TGSI_SEMANTIC_GENERIC:
738 break;
739 default:
740 continue;
741 }
742
743 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
744 semantic_index < SI_MAX_IO_GENERIC) &&
745 shader->key.opt.kill_outputs &
746 (1ull << si_shader_io_get_unique_index(semantic_name,
747 semantic_index, true)))
748 continue;
749
750 si_export_param(ctx, param_count, outputs[i].values);
751
752 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
753 shader->info.vs_output_param_offset[i] = param_count++;
754 }
755
756 shader->info.nr_param_exports = param_count;
757 }
758
759 /**
760 * Vertex color clamping.
761 *
762 * This uses a state constant loaded in a user data SGPR and
763 * an IF statement is added that clamps all colors if the constant
764 * is true.
765 */
766 static void si_vertex_color_clamping(struct si_shader_context *ctx,
767 struct si_shader_output_values *outputs,
768 unsigned noutput)
769 {
770 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
771 bool has_colors = false;
772
773 /* Store original colors to alloca variables. */
774 for (unsigned i = 0; i < noutput; i++) {
775 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
776 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
777 continue;
778
779 for (unsigned j = 0; j < 4; j++) {
780 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
781 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
782 }
783 has_colors = true;
784 }
785
786 if (!has_colors)
787 return;
788
789 /* The state is in the first bit of the user SGPR. */
790 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
791 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->ac.i1, "");
792
793 ac_build_ifcc(&ctx->ac, cond, 6502);
794
795 /* Store clamped colors to alloca variables within the conditional block. */
796 for (unsigned i = 0; i < noutput; i++) {
797 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
798 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
799 continue;
800
801 for (unsigned j = 0; j < 4; j++) {
802 LLVMBuildStore(ctx->ac.builder,
803 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
804 addr[i][j]);
805 }
806 }
807 ac_build_endif(&ctx->ac, 6502);
808
809 /* Load clamped colors */
810 for (unsigned i = 0; i < noutput; i++) {
811 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
812 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
813 continue;
814
815 for (unsigned j = 0; j < 4; j++) {
816 outputs[i].values[j] =
817 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
818 }
819 }
820 }
821
822 /* Generate export instructions for hardware VS shader stage or NGG GS stage
823 * (position and parameter data only).
824 */
825 void si_llvm_export_vs(struct si_shader_context *ctx,
826 struct si_shader_output_values *outputs,
827 unsigned noutput)
828 {
829 struct si_shader *shader = ctx->shader;
830 struct ac_export_args pos_args[4] = {};
831 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
832 unsigned pos_idx;
833 int i;
834
835 si_vertex_color_clamping(ctx, outputs, noutput);
836
837 /* Build position exports. */
838 for (i = 0; i < noutput; i++) {
839 switch (outputs[i].semantic_name) {
840 case TGSI_SEMANTIC_POSITION:
841 si_llvm_init_vs_export_args(ctx, outputs[i].values,
842 V_008DFC_SQ_EXP_POS, &pos_args[0]);
843 break;
844 case TGSI_SEMANTIC_PSIZE:
845 psize_value = outputs[i].values[0];
846 break;
847 case TGSI_SEMANTIC_LAYER:
848 layer_value = outputs[i].values[0];
849 break;
850 case TGSI_SEMANTIC_VIEWPORT_INDEX:
851 viewport_index_value = outputs[i].values[0];
852 break;
853 case TGSI_SEMANTIC_EDGEFLAG:
854 edgeflag_value = outputs[i].values[0];
855 break;
856 case TGSI_SEMANTIC_CLIPDIST:
857 if (!shader->key.opt.clip_disable) {
858 unsigned index = 2 + outputs[i].semantic_index;
859 si_llvm_init_vs_export_args(ctx, outputs[i].values,
860 V_008DFC_SQ_EXP_POS + index,
861 &pos_args[index]);
862 }
863 break;
864 case TGSI_SEMANTIC_CLIPVERTEX:
865 if (!shader->key.opt.clip_disable) {
866 si_llvm_emit_clipvertex(ctx, pos_args,
867 outputs[i].values);
868 }
869 break;
870 }
871 }
872
873 /* We need to add the position output manually if it's missing. */
874 if (!pos_args[0].out[0]) {
875 pos_args[0].enabled_channels = 0xf; /* writemask */
876 pos_args[0].valid_mask = 0; /* EXEC mask */
877 pos_args[0].done = 0; /* last export? */
878 pos_args[0].target = V_008DFC_SQ_EXP_POS;
879 pos_args[0].compr = 0; /* COMPR flag */
880 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
881 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
882 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
883 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
884 }
885
886 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
887 !shader->key.as_ngg;
888
889 /* Write the misc vector (point size, edgeflag, layer, viewport). */
890 if (shader->selector->info.writes_psize ||
891 pos_writes_edgeflag ||
892 shader->selector->info.writes_viewport_index ||
893 shader->selector->info.writes_layer) {
894 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
895 (pos_writes_edgeflag << 1) |
896 (shader->selector->info.writes_layer << 2);
897
898 pos_args[1].valid_mask = 0; /* EXEC mask */
899 pos_args[1].done = 0; /* last export? */
900 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
901 pos_args[1].compr = 0; /* COMPR flag */
902 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
903 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
904 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
905 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
906
907 if (shader->selector->info.writes_psize)
908 pos_args[1].out[0] = psize_value;
909
910 if (pos_writes_edgeflag) {
911 /* The output is a float, but the hw expects an integer
912 * with the first bit containing the edge flag. */
913 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
914 edgeflag_value,
915 ctx->ac.i32, "");
916 edgeflag_value = ac_build_umin(&ctx->ac,
917 edgeflag_value,
918 ctx->ac.i32_1);
919
920 /* The LLVM intrinsic expects a float. */
921 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
922 }
923
924 if (ctx->screen->info.chip_class >= GFX9) {
925 /* GFX9 has the layer in out.z[10:0] and the viewport
926 * index in out.z[19:16].
927 */
928 if (shader->selector->info.writes_layer)
929 pos_args[1].out[2] = layer_value;
930
931 if (shader->selector->info.writes_viewport_index) {
932 LLVMValueRef v = viewport_index_value;
933
934 v = ac_to_integer(&ctx->ac, v);
935 v = LLVMBuildShl(ctx->ac.builder, v,
936 LLVMConstInt(ctx->ac.i32, 16, 0), "");
937 v = LLVMBuildOr(ctx->ac.builder, v,
938 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
939 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
940 pos_args[1].enabled_channels |= 1 << 2;
941 }
942 } else {
943 if (shader->selector->info.writes_layer)
944 pos_args[1].out[2] = layer_value;
945
946 if (shader->selector->info.writes_viewport_index) {
947 pos_args[1].out[3] = viewport_index_value;
948 pos_args[1].enabled_channels |= 1 << 3;
949 }
950 }
951 }
952
953 for (i = 0; i < 4; i++)
954 if (pos_args[i].out[0])
955 shader->info.nr_pos_exports++;
956
957 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
958 * Setting valid_mask=1 prevents it and has no other effect.
959 */
960 if (ctx->screen->info.family == CHIP_NAVI10 ||
961 ctx->screen->info.family == CHIP_NAVI12 ||
962 ctx->screen->info.family == CHIP_NAVI14)
963 pos_args[0].valid_mask = 1;
964
965 pos_idx = 0;
966 for (i = 0; i < 4; i++) {
967 if (!pos_args[i].out[0])
968 continue;
969
970 /* Specify the target we are exporting */
971 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
972
973 if (pos_idx == shader->info.nr_pos_exports)
974 /* Specify that this is the last export */
975 pos_args[i].done = 1;
976
977 ac_build_export(&ctx->ac, &pos_args[i]);
978 }
979
980 /* Build parameter exports. */
981 si_build_param_exports(ctx, outputs, noutput);
982 }
983
984 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
985 unsigned max_outputs,
986 LLVMValueRef *addrs)
987 {
988 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
989 struct si_shader_info *info = &ctx->shader->selector->info;
990 struct si_shader_output_values *outputs = NULL;
991 int i,j;
992
993 assert(!ctx->shader->is_gs_copy_shader);
994 assert(info->num_outputs <= max_outputs);
995
996 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
997
998 for (i = 0; i < info->num_outputs; i++) {
999 outputs[i].semantic_name = info->output_semantic_name[i];
1000 outputs[i].semantic_index = info->output_semantic_index[i];
1001
1002 for (j = 0; j < 4; j++) {
1003 outputs[i].values[j] =
1004 LLVMBuildLoad(ctx->ac.builder,
1005 addrs[4 * i + j],
1006 "");
1007 outputs[i].vertex_stream[j] =
1008 (info->output_streams[i] >> (2 * j)) & 3;
1009 }
1010 }
1011
1012 if (!ctx->screen->use_ngg_streamout &&
1013 ctx->shader->selector->so.num_outputs)
1014 si_llvm_emit_streamout(ctx, outputs, i, 0);
1015
1016 /* Export PrimitiveID. */
1017 if (ctx->shader->key.mono.u.vs_export_prim_id) {
1018 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
1019 outputs[i].semantic_index = 0;
1020 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
1021 for (j = 1; j < 4; j++)
1022 outputs[i].values[j] = LLVMConstReal(ctx->ac.f32, 0);
1023
1024 memset(outputs[i].vertex_stream, 0,
1025 sizeof(outputs[i].vertex_stream));
1026 i++;
1027 }
1028
1029 si_llvm_export_vs(ctx, outputs, i);
1030 FREE(outputs);
1031 }
1032
1033 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
1034 unsigned max_outputs,
1035 LLVMValueRef *addrs)
1036 {
1037 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1038 struct si_shader_info *info = &ctx->shader->selector->info;
1039 LLVMValueRef pos[4] = {};
1040
1041 assert(info->num_outputs <= max_outputs);
1042
1043 for (unsigned i = 0; i < info->num_outputs; i++) {
1044 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
1045 continue;
1046
1047 for (unsigned chan = 0; chan < 4; chan++)
1048 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
1049 break;
1050 }
1051 assert(pos[0] != NULL);
1052
1053 /* Return the position output. */
1054 LLVMValueRef ret = ctx->return_value;
1055 for (unsigned chan = 0; chan < 4; chan++)
1056 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
1057 ctx->return_value = ret;
1058 }
1059
1060 static void declare_streamout_params(struct si_shader_context *ctx,
1061 struct pipe_stream_output_info *so)
1062 {
1063 if (ctx->screen->use_ngg_streamout) {
1064 if (ctx->type == PIPE_SHADER_TESS_EVAL)
1065 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1066 return;
1067 }
1068
1069 /* Streamout SGPRs. */
1070 if (so->num_outputs) {
1071 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
1072 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
1073 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
1074 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1075 }
1076
1077 /* A streamout buffer offset is loaded if the stride is non-zero. */
1078 for (int i = 0; i < 4; i++) {
1079 if (!so->stride[i])
1080 continue;
1081
1082 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
1083 }
1084 }
1085
1086 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
1087 {
1088 switch (shader->selector->type) {
1089 case PIPE_SHADER_VERTEX:
1090 case PIPE_SHADER_TESS_EVAL:
1091 return shader->key.as_ngg ? 128 : 0;
1092
1093 case PIPE_SHADER_TESS_CTRL:
1094 /* Return this so that LLVM doesn't remove s_barrier
1095 * instructions on chips where we use s_barrier. */
1096 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
1097
1098 case PIPE_SHADER_GEOMETRY:
1099 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
1100
1101 case PIPE_SHADER_COMPUTE:
1102 break; /* see below */
1103
1104 default:
1105 return 0;
1106 }
1107
1108 const unsigned *properties = shader->selector->info.properties;
1109 unsigned max_work_group_size =
1110 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
1111 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
1112 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
1113
1114 if (!max_work_group_size) {
1115 /* This is a variable group size compute shader,
1116 * compile it for the maximum possible group size.
1117 */
1118 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
1119 }
1120 return max_work_group_size;
1121 }
1122
1123 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
1124 bool assign_params)
1125 {
1126 enum ac_arg_type const_shader_buf_type;
1127
1128 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
1129 ctx->shader->selector->info.shader_buffers_declared == 0)
1130 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
1131 else
1132 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
1133
1134 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
1135 assign_params ? &ctx->const_and_shader_buffers :
1136 &ctx->other_const_and_shader_buffers);
1137 }
1138
1139 static void declare_samplers_and_images(struct si_shader_context *ctx,
1140 bool assign_params)
1141 {
1142 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
1143 assign_params ? &ctx->samplers_and_images :
1144 &ctx->other_samplers_and_images);
1145 }
1146
1147 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
1148 bool assign_params)
1149 {
1150 declare_const_and_shader_buffers(ctx, assign_params);
1151 declare_samplers_and_images(ctx, assign_params);
1152 }
1153
1154 static void declare_global_desc_pointers(struct si_shader_context *ctx)
1155 {
1156 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
1157 &ctx->rw_buffers);
1158 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
1159 &ctx->bindless_samplers_and_images);
1160 }
1161
1162 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
1163 {
1164 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1165 if (!ctx->shader->is_gs_copy_shader) {
1166 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
1167 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
1168 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
1169 }
1170 }
1171
1172 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
1173 {
1174 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
1175
1176 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
1177 if (num_vbos_in_user_sgprs) {
1178 unsigned user_sgprs = ctx->args.num_sgprs_used;
1179
1180 if (si_is_merged_shader(ctx))
1181 user_sgprs -= 8;
1182 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
1183
1184 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
1185 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
1186 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1187
1188 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
1189 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
1190 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
1191 }
1192 }
1193
1194 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
1195 unsigned *num_prolog_vgprs,
1196 bool ngg_cull_shader)
1197 {
1198 struct si_shader *shader = ctx->shader;
1199
1200 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
1201 if (shader->key.as_ls) {
1202 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
1203 if (ctx->screen->info.chip_class >= GFX10) {
1204 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
1205 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1206 } else {
1207 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1208 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
1209 }
1210 } else if (ctx->screen->info.chip_class >= GFX10) {
1211 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
1212 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1213 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
1214 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1215 } else {
1216 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1217 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
1218 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
1219 }
1220
1221 if (!shader->is_gs_copy_shader) {
1222 if (shader->key.opt.ngg_culling && !ngg_cull_shader) {
1223 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1224 &ctx->ngg_old_thread_id);
1225 }
1226
1227 /* Vertex load indices. */
1228 if (shader->selector->info.num_inputs) {
1229 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1230 &ctx->vertex_index0);
1231 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
1232 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
1233 }
1234 *num_prolog_vgprs += shader->selector->info.num_inputs;
1235 }
1236 }
1237
1238 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
1239 unsigned vs_blit_property)
1240 {
1241 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1242 &ctx->vs_blit_inputs); /* i16 x1, y1 */
1243 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
1244 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
1245
1246 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
1247 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
1248 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
1249 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
1250 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
1251 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
1252 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
1253 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
1254 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
1255 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
1256 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
1257 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
1258 }
1259 }
1260
1261 static void declare_tes_input_vgprs(struct si_shader_context *ctx, bool ngg_cull_shader)
1262 {
1263 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
1264 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
1265 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
1266 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
1267
1268 if (ctx->shader->key.opt.ngg_culling && !ngg_cull_shader) {
1269 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1270 &ctx->ngg_old_thread_id);
1271 }
1272 }
1273
1274 enum {
1275 /* Convenient merged shader definitions. */
1276 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
1277 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
1278 };
1279
1280 void si_add_arg_checked(struct ac_shader_args *args,
1281 enum ac_arg_regfile file,
1282 unsigned registers, enum ac_arg_type type,
1283 struct ac_arg *arg,
1284 unsigned idx)
1285 {
1286 assert(args->arg_count == idx);
1287 ac_add_arg(args, file, registers, type, arg);
1288 }
1289
1290 void si_create_function(struct si_shader_context *ctx, bool ngg_cull_shader)
1291 {
1292 struct si_shader *shader = ctx->shader;
1293 LLVMTypeRef returns[AC_MAX_ARGS];
1294 unsigned i, num_return_sgprs;
1295 unsigned num_returns = 0;
1296 unsigned num_prolog_vgprs = 0;
1297 unsigned type = ctx->type;
1298 unsigned vs_blit_property =
1299 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
1300
1301 memset(&ctx->args, 0, sizeof(ctx->args));
1302
1303 /* Set MERGED shaders. */
1304 if (ctx->screen->info.chip_class >= GFX9) {
1305 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
1306 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
1307 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
1308 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
1309 }
1310
1311 switch (type) {
1312 case PIPE_SHADER_VERTEX:
1313 declare_global_desc_pointers(ctx);
1314
1315 if (vs_blit_property) {
1316 declare_vs_blit_inputs(ctx, vs_blit_property);
1317
1318 /* VGPRs */
1319 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
1320 break;
1321 }
1322
1323 declare_per_stage_desc_pointers(ctx, true);
1324 declare_vs_specific_input_sgprs(ctx);
1325 if (!shader->is_gs_copy_shader)
1326 declare_vb_descriptor_input_sgprs(ctx);
1327
1328 if (shader->key.as_es) {
1329 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1330 &ctx->es2gs_offset);
1331 } else if (shader->key.as_ls) {
1332 /* no extra parameters */
1333 } else {
1334 /* The locations of the other parameters are assigned dynamically. */
1335 declare_streamout_params(ctx, &shader->selector->so);
1336 }
1337
1338 /* VGPRs */
1339 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
1340
1341 /* Return values */
1342 if (shader->key.opt.vs_as_prim_discard_cs) {
1343 for (i = 0; i < 4; i++)
1344 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
1345 }
1346 break;
1347
1348 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
1349 declare_global_desc_pointers(ctx);
1350 declare_per_stage_desc_pointers(ctx, true);
1351 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1352 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
1353 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1354 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1355 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1356 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1357
1358 /* VGPRs */
1359 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
1360 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
1361
1362 /* param_tcs_offchip_offset and param_tcs_factor_offset are
1363 * placed after the user SGPRs.
1364 */
1365 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
1366 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
1367 for (i = 0; i < 11; i++)
1368 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
1369 break;
1370
1371 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
1372 /* Merged stages have 8 system SGPRs at the beginning. */
1373 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
1374 declare_per_stage_desc_pointers(ctx,
1375 ctx->type == PIPE_SHADER_TESS_CTRL);
1376 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1377 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
1378 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1379 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
1380 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1381 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1382
1383 declare_global_desc_pointers(ctx);
1384 declare_per_stage_desc_pointers(ctx,
1385 ctx->type == PIPE_SHADER_VERTEX);
1386 declare_vs_specific_input_sgprs(ctx);
1387
1388 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1389 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
1390 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1391 declare_vb_descriptor_input_sgprs(ctx);
1392
1393 /* VGPRs (first TCS, then VS) */
1394 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
1395 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
1396
1397 if (ctx->type == PIPE_SHADER_VERTEX) {
1398 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
1399
1400 /* LS return values are inputs to the TCS main shader part. */
1401 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
1402 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
1403 for (i = 0; i < 2; i++)
1404 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
1405 } else {
1406 /* TCS return values are inputs to the TCS epilog.
1407 *
1408 * param_tcs_offchip_offset, param_tcs_factor_offset,
1409 * param_tcs_offchip_layout, and param_rw_buffers
1410 * should be passed to the epilog.
1411 */
1412 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
1413 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
1414 for (i = 0; i < 11; i++)
1415 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
1416 }
1417 break;
1418
1419 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
1420 /* Merged stages have 8 system SGPRs at the beginning. */
1421 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
1422 declare_per_stage_desc_pointers(ctx,
1423 ctx->type == PIPE_SHADER_GEOMETRY);
1424
1425 if (ctx->shader->key.as_ngg)
1426 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
1427 else
1428 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
1429
1430 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
1431 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1432 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
1433 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
1434 &ctx->small_prim_cull_info); /* SPI_SHADER_PGM_LO_GS << 8 */
1435 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
1436
1437 declare_global_desc_pointers(ctx);
1438 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
1439 declare_per_stage_desc_pointers(ctx,
1440 (ctx->type == PIPE_SHADER_VERTEX ||
1441 ctx->type == PIPE_SHADER_TESS_EVAL));
1442 }
1443
1444 if (ctx->type == PIPE_SHADER_VERTEX) {
1445 if (vs_blit_property)
1446 declare_vs_blit_inputs(ctx, vs_blit_property);
1447 else
1448 declare_vs_specific_input_sgprs(ctx);
1449 } else {
1450 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1451 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1452 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
1453 /* Declare as many input SGPRs as the VS has. */
1454 }
1455
1456 if (ctx->type == PIPE_SHADER_VERTEX)
1457 declare_vb_descriptor_input_sgprs(ctx);
1458
1459 /* VGPRs (first GS, then VS/TES) */
1460 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
1461 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
1462 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
1463 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
1464 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
1465
1466 if (ctx->type == PIPE_SHADER_VERTEX) {
1467 declare_vs_input_vgprs(ctx, &num_prolog_vgprs, ngg_cull_shader);
1468 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
1469 declare_tes_input_vgprs(ctx, ngg_cull_shader);
1470 }
1471
1472 if ((ctx->shader->key.as_es || ngg_cull_shader) &&
1473 (ctx->type == PIPE_SHADER_VERTEX ||
1474 ctx->type == PIPE_SHADER_TESS_EVAL)) {
1475 unsigned num_user_sgprs, num_vgprs;
1476
1477 if (ctx->type == PIPE_SHADER_VERTEX) {
1478 /* For the NGG cull shader, add 1 SGPR to hold
1479 * the vertex buffer pointer.
1480 */
1481 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR + ngg_cull_shader;
1482
1483 if (ngg_cull_shader && shader->selector->num_vbos_in_user_sgprs) {
1484 assert(num_user_sgprs <= 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
1485 num_user_sgprs = SI_SGPR_VS_VB_DESCRIPTOR_FIRST +
1486 shader->selector->num_vbos_in_user_sgprs * 4;
1487 }
1488 } else {
1489 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
1490 }
1491
1492 /* The NGG cull shader has to return all 9 VGPRs + the old thread ID.
1493 *
1494 * The normal merged ESGS shader only has to return the 5 VGPRs
1495 * for the GS stage.
1496 */
1497 num_vgprs = ngg_cull_shader ? 10 : 5;
1498
1499 /* ES return values are inputs to GS. */
1500 for (i = 0; i < 8 + num_user_sgprs; i++)
1501 returns[num_returns++] = ctx->ac.i32; /* SGPRs */
1502 for (i = 0; i < num_vgprs; i++)
1503 returns[num_returns++] = ctx->ac.f32; /* VGPRs */
1504 }
1505 break;
1506
1507 case PIPE_SHADER_TESS_EVAL:
1508 declare_global_desc_pointers(ctx);
1509 declare_per_stage_desc_pointers(ctx, true);
1510 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1511 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1512 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
1513
1514 if (shader->key.as_es) {
1515 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1516 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1517 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
1518 } else {
1519 declare_streamout_params(ctx, &shader->selector->so);
1520 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1521 }
1522
1523 /* VGPRs */
1524 declare_tes_input_vgprs(ctx, ngg_cull_shader);
1525 break;
1526
1527 case PIPE_SHADER_GEOMETRY:
1528 declare_global_desc_pointers(ctx);
1529 declare_per_stage_desc_pointers(ctx, true);
1530 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
1531 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
1532
1533 /* VGPRs */
1534 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
1535 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
1536 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
1537 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
1538 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
1539 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
1540 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
1541 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
1542 break;
1543
1544 case PIPE_SHADER_FRAGMENT:
1545 declare_global_desc_pointers(ctx);
1546 declare_per_stage_desc_pointers(ctx, true);
1547 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
1548 SI_PARAM_ALPHA_REF);
1549 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1550 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
1551
1552 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
1553 SI_PARAM_PERSP_SAMPLE);
1554 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1555 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
1556 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1557 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
1558 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
1559 NULL, SI_PARAM_PERSP_PULL_MODEL);
1560 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1561 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
1562 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1563 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
1564 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1565 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
1566 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
1567 NULL, SI_PARAM_LINE_STIPPLE_TEX);
1568 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1569 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
1570 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1571 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
1572 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1573 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
1574 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1575 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
1576 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
1577 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1578 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
1579 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
1580 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1581 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
1582 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1583 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
1584 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1585 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
1586
1587 /* Color inputs from the prolog. */
1588 if (shader->selector->info.colors_read) {
1589 unsigned num_color_elements =
1590 util_bitcount(shader->selector->info.colors_read);
1591
1592 for (i = 0; i < num_color_elements; i++)
1593 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
1594
1595 num_prolog_vgprs += num_color_elements;
1596 }
1597
1598 /* Outputs for the epilog. */
1599 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
1600 num_returns =
1601 num_return_sgprs +
1602 util_bitcount(shader->selector->info.colors_written) * 4 +
1603 shader->selector->info.writes_z +
1604 shader->selector->info.writes_stencil +
1605 shader->selector->info.writes_samplemask +
1606 1 /* SampleMaskIn */;
1607
1608 num_returns = MAX2(num_returns,
1609 num_return_sgprs +
1610 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
1611
1612 for (i = 0; i < num_return_sgprs; i++)
1613 returns[i] = ctx->ac.i32;
1614 for (; i < num_returns; i++)
1615 returns[i] = ctx->ac.f32;
1616 break;
1617
1618 case PIPE_SHADER_COMPUTE:
1619 declare_global_desc_pointers(ctx);
1620 declare_per_stage_desc_pointers(ctx, true);
1621 if (shader->selector->info.uses_grid_size)
1622 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
1623 &ctx->args.num_work_groups);
1624 if (shader->selector->info.uses_block_size &&
1625 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
1626 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
1627
1628 unsigned cs_user_data_dwords =
1629 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
1630 if (cs_user_data_dwords) {
1631 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
1632 &ctx->cs_user_data);
1633 }
1634
1635 /* Hardware SGPRs. */
1636 for (i = 0; i < 3; i++) {
1637 if (shader->selector->info.uses_block_id[i]) {
1638 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1639 &ctx->args.workgroup_ids[i]);
1640 }
1641 }
1642 if (shader->selector->info.uses_subgroup_info)
1643 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
1644
1645 /* Hardware VGPRs. */
1646 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
1647 &ctx->args.local_invocation_ids);
1648 break;
1649 default:
1650 assert(0 && "unimplemented shader");
1651 return;
1652 }
1653
1654 si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main",
1655 returns, num_returns, si_get_max_workgroup_size(shader));
1656
1657 /* Reserve register locations for VGPR inputs the PS prolog may need. */
1658 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
1659 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
1660 "InitialPSInputAddr",
1661 S_0286D0_PERSP_SAMPLE_ENA(1) |
1662 S_0286D0_PERSP_CENTER_ENA(1) |
1663 S_0286D0_PERSP_CENTROID_ENA(1) |
1664 S_0286D0_LINEAR_SAMPLE_ENA(1) |
1665 S_0286D0_LINEAR_CENTER_ENA(1) |
1666 S_0286D0_LINEAR_CENTROID_ENA(1) |
1667 S_0286D0_FRONT_FACE_ENA(1) |
1668 S_0286D0_ANCILLARY_ENA(1) |
1669 S_0286D0_POS_FIXED_PT_ENA(1));
1670 }
1671
1672 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
1673 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
1674
1675 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
1676 shader->info.num_input_vgprs -= num_prolog_vgprs;
1677
1678 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
1679 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
1680 /* The LSHS size is not known until draw time, so we append it
1681 * at the end of whatever LDS use there may be in the rest of
1682 * the shader (currently none, unless LLVM decides to do its
1683 * own LDS-based lowering).
1684 */
1685 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
1686 ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
1687 "__lds_end", AC_ADDR_SPACE_LDS);
1688 LLVMSetAlignment(ctx->ac.lds, 256);
1689 } else {
1690 ac_declare_lds_as_pointer(&ctx->ac);
1691 }
1692 }
1693
1694 /* Unlike radv, we override these arguments in the prolog, so to the
1695 * API shader they appear as normal arguments.
1696 */
1697 if (ctx->type == PIPE_SHADER_VERTEX) {
1698 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
1699 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
1700 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
1701 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
1702 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
1703 }
1704 }
1705
1706 /* For the UMR disassembler. */
1707 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
1708 #define DEBUGGER_NUM_MARKERS 5
1709
1710 static bool si_shader_binary_open(struct si_screen *screen,
1711 struct si_shader *shader,
1712 struct ac_rtld_binary *rtld)
1713 {
1714 const struct si_shader_selector *sel = shader->selector;
1715 const char *part_elfs[5];
1716 size_t part_sizes[5];
1717 unsigned num_parts = 0;
1718
1719 #define add_part(shader_or_part) \
1720 if (shader_or_part) { \
1721 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
1722 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
1723 num_parts++; \
1724 }
1725
1726 add_part(shader->prolog);
1727 add_part(shader->previous_stage);
1728 add_part(shader->prolog2);
1729 add_part(shader);
1730 add_part(shader->epilog);
1731
1732 #undef add_part
1733
1734 struct ac_rtld_symbol lds_symbols[2];
1735 unsigned num_lds_symbols = 0;
1736
1737 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
1738 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
1739 /* We add this symbol even on LLVM <= 8 to ensure that
1740 * shader->config.lds_size is set correctly below.
1741 */
1742 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1743 sym->name = "esgs_ring";
1744 sym->size = shader->gs_info.esgs_ring_size;
1745 sym->align = 64 * 1024;
1746 }
1747
1748 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
1749 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1750 sym->name = "ngg_emit";
1751 sym->size = shader->ngg.ngg_emit_size * 4;
1752 sym->align = 4;
1753 }
1754
1755 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
1756 .info = &screen->info,
1757 .options = {
1758 .halt_at_entry = screen->options.halt_shaders,
1759 },
1760 .shader_type = tgsi_processor_to_shader_stage(sel->type),
1761 .wave_size = si_get_shader_wave_size(shader),
1762 .num_parts = num_parts,
1763 .elf_ptrs = part_elfs,
1764 .elf_sizes = part_sizes,
1765 .num_shared_lds_symbols = num_lds_symbols,
1766 .shared_lds_symbols = lds_symbols });
1767
1768 if (rtld->lds_size > 0) {
1769 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
1770 shader->config.lds_size =
1771 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
1772 }
1773
1774 return ok;
1775 }
1776
1777 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
1778 {
1779 struct ac_rtld_binary rtld;
1780 si_shader_binary_open(screen, shader, &rtld);
1781 return rtld.exec_size;
1782 }
1783
1784 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
1785 {
1786 uint64_t *scratch_va = data;
1787
1788 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
1789 *value = (uint32_t)*scratch_va;
1790 return true;
1791 }
1792 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
1793 /* Enable scratch coalescing. */
1794 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
1795 S_008F04_SWIZZLE_ENABLE(1);
1796 return true;
1797 }
1798
1799 return false;
1800 }
1801
1802 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
1803 uint64_t scratch_va)
1804 {
1805 struct ac_rtld_binary binary;
1806 if (!si_shader_binary_open(sscreen, shader, &binary))
1807 return false;
1808
1809 si_resource_reference(&shader->bo, NULL);
1810 shader->bo = si_aligned_buffer_create(&sscreen->b,
1811 sscreen->info.cpdma_prefetch_writes_memory ?
1812 0 : SI_RESOURCE_FLAG_READ_ONLY,
1813 PIPE_USAGE_IMMUTABLE,
1814 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
1815 256);
1816 if (!shader->bo)
1817 return false;
1818
1819 /* Upload. */
1820 struct ac_rtld_upload_info u = {};
1821 u.binary = &binary;
1822 u.get_external_symbol = si_get_external_symbol;
1823 u.cb_data = &scratch_va;
1824 u.rx_va = shader->bo->gpu_address;
1825 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
1826 PIPE_TRANSFER_READ_WRITE |
1827 PIPE_TRANSFER_UNSYNCHRONIZED |
1828 RADEON_TRANSFER_TEMPORARY);
1829 if (!u.rx_ptr)
1830 return false;
1831
1832 bool ok = ac_rtld_upload(&u);
1833
1834 sscreen->ws->buffer_unmap(shader->bo->buf);
1835 ac_rtld_close(&binary);
1836
1837 return ok;
1838 }
1839
1840 static void si_shader_dump_disassembly(struct si_screen *screen,
1841 const struct si_shader_binary *binary,
1842 enum pipe_shader_type shader_type,
1843 unsigned wave_size,
1844 struct pipe_debug_callback *debug,
1845 const char *name, FILE *file)
1846 {
1847 struct ac_rtld_binary rtld_binary;
1848
1849 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
1850 .info = &screen->info,
1851 .shader_type = tgsi_processor_to_shader_stage(shader_type),
1852 .wave_size = wave_size,
1853 .num_parts = 1,
1854 .elf_ptrs = &binary->elf_buffer,
1855 .elf_sizes = &binary->elf_size }))
1856 return;
1857
1858 const char *disasm;
1859 size_t nbytes;
1860
1861 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
1862 goto out;
1863
1864 if (nbytes > INT_MAX)
1865 goto out;
1866
1867 if (debug && debug->debug_message) {
1868 /* Very long debug messages are cut off, so send the
1869 * disassembly one line at a time. This causes more
1870 * overhead, but on the plus side it simplifies
1871 * parsing of resulting logs.
1872 */
1873 pipe_debug_message(debug, SHADER_INFO,
1874 "Shader Disassembly Begin");
1875
1876 uint64_t line = 0;
1877 while (line < nbytes) {
1878 int count = nbytes - line;
1879 const char *nl = memchr(disasm + line, '\n', nbytes - line);
1880 if (nl)
1881 count = nl - (disasm + line);
1882
1883 if (count) {
1884 pipe_debug_message(debug, SHADER_INFO,
1885 "%.*s", count, disasm + line);
1886 }
1887
1888 line += count + 1;
1889 }
1890
1891 pipe_debug_message(debug, SHADER_INFO,
1892 "Shader Disassembly End");
1893 }
1894
1895 if (file) {
1896 fprintf(file, "Shader %s disassembly:\n", name);
1897 fprintf(file, "%*s", (int)nbytes, disasm);
1898 }
1899
1900 out:
1901 ac_rtld_close(&rtld_binary);
1902 }
1903
1904 static void si_calculate_max_simd_waves(struct si_shader *shader)
1905 {
1906 struct si_screen *sscreen = shader->selector->screen;
1907 struct ac_shader_config *conf = &shader->config;
1908 unsigned num_inputs = shader->selector->info.num_inputs;
1909 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
1910 unsigned lds_per_wave = 0;
1911 unsigned max_simd_waves;
1912
1913 max_simd_waves = sscreen->info.max_wave64_per_simd;
1914
1915 /* Compute LDS usage for PS. */
1916 switch (shader->selector->type) {
1917 case PIPE_SHADER_FRAGMENT:
1918 /* The minimum usage per wave is (num_inputs * 48). The maximum
1919 * usage is (num_inputs * 48 * 16).
1920 * We can get anything in between and it varies between waves.
1921 *
1922 * The 48 bytes per input for a single primitive is equal to
1923 * 4 bytes/component * 4 components/input * 3 points.
1924 *
1925 * Other stages don't know the size at compile time or don't
1926 * allocate LDS per wave, but instead they do it per thread group.
1927 */
1928 lds_per_wave = conf->lds_size * lds_increment +
1929 align(num_inputs * 48, lds_increment);
1930 break;
1931 case PIPE_SHADER_COMPUTE:
1932 if (shader->selector) {
1933 unsigned max_workgroup_size =
1934 si_get_max_workgroup_size(shader);
1935 lds_per_wave = (conf->lds_size * lds_increment) /
1936 DIV_ROUND_UP(max_workgroup_size,
1937 sscreen->compute_wave_size);
1938 }
1939 break;
1940 default:;
1941 }
1942
1943 /* Compute the per-SIMD wave counts. */
1944 if (conf->num_sgprs) {
1945 max_simd_waves =
1946 MIN2(max_simd_waves,
1947 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
1948 }
1949
1950 if (conf->num_vgprs) {
1951 /* Always print wave limits as Wave64, so that we can compare
1952 * Wave32 and Wave64 with shader-db fairly. */
1953 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
1954 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
1955 }
1956
1957 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
1958 * 16KB makes some SIMDs unoccupied).
1959 *
1960 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
1961 */
1962 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
1963 unsigned max_lds_per_simd = max_lds_size / 4;
1964 if (lds_per_wave)
1965 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1966
1967 shader->info.max_simd_waves = max_simd_waves;
1968 }
1969
1970 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
1971 struct si_shader *shader,
1972 struct pipe_debug_callback *debug)
1973 {
1974 const struct ac_shader_config *conf = &shader->config;
1975
1976 if (screen->options.debug_disassembly)
1977 si_shader_dump_disassembly(screen, &shader->binary,
1978 shader->selector->type,
1979 si_get_shader_wave_size(shader),
1980 debug, "main", NULL);
1981
1982 pipe_debug_message(debug, SHADER_INFO,
1983 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1984 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1985 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1986 conf->num_sgprs, conf->num_vgprs,
1987 si_get_shader_binary_size(screen, shader),
1988 conf->lds_size, conf->scratch_bytes_per_wave,
1989 shader->info.max_simd_waves, conf->spilled_sgprs,
1990 conf->spilled_vgprs, shader->info.private_mem_vgprs);
1991 }
1992
1993 static void si_shader_dump_stats(struct si_screen *sscreen,
1994 struct si_shader *shader,
1995 FILE *file,
1996 bool check_debug_option)
1997 {
1998 const struct ac_shader_config *conf = &shader->config;
1999
2000 if (!check_debug_option ||
2001 si_can_dump_shader(sscreen, shader->selector->type)) {
2002 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
2003 fprintf(file, "*** SHADER CONFIG ***\n"
2004 "SPI_PS_INPUT_ADDR = 0x%04x\n"
2005 "SPI_PS_INPUT_ENA = 0x%04x\n",
2006 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
2007 }
2008
2009 fprintf(file, "*** SHADER STATS ***\n"
2010 "SGPRS: %d\n"
2011 "VGPRS: %d\n"
2012 "Spilled SGPRs: %d\n"
2013 "Spilled VGPRs: %d\n"
2014 "Private memory VGPRs: %d\n"
2015 "Code Size: %d bytes\n"
2016 "LDS: %d blocks\n"
2017 "Scratch: %d bytes per wave\n"
2018 "Max Waves: %d\n"
2019 "********************\n\n\n",
2020 conf->num_sgprs, conf->num_vgprs,
2021 conf->spilled_sgprs, conf->spilled_vgprs,
2022 shader->info.private_mem_vgprs,
2023 si_get_shader_binary_size(sscreen, shader),
2024 conf->lds_size, conf->scratch_bytes_per_wave,
2025 shader->info.max_simd_waves);
2026 }
2027 }
2028
2029 const char *si_get_shader_name(const struct si_shader *shader)
2030 {
2031 switch (shader->selector->type) {
2032 case PIPE_SHADER_VERTEX:
2033 if (shader->key.as_es)
2034 return "Vertex Shader as ES";
2035 else if (shader->key.as_ls)
2036 return "Vertex Shader as LS";
2037 else if (shader->key.opt.vs_as_prim_discard_cs)
2038 return "Vertex Shader as Primitive Discard CS";
2039 else if (shader->key.as_ngg)
2040 return "Vertex Shader as ESGS";
2041 else
2042 return "Vertex Shader as VS";
2043 case PIPE_SHADER_TESS_CTRL:
2044 return "Tessellation Control Shader";
2045 case PIPE_SHADER_TESS_EVAL:
2046 if (shader->key.as_es)
2047 return "Tessellation Evaluation Shader as ES";
2048 else if (shader->key.as_ngg)
2049 return "Tessellation Evaluation Shader as ESGS";
2050 else
2051 return "Tessellation Evaluation Shader as VS";
2052 case PIPE_SHADER_GEOMETRY:
2053 if (shader->is_gs_copy_shader)
2054 return "GS Copy Shader as VS";
2055 else
2056 return "Geometry Shader";
2057 case PIPE_SHADER_FRAGMENT:
2058 return "Pixel Shader";
2059 case PIPE_SHADER_COMPUTE:
2060 return "Compute Shader";
2061 default:
2062 return "Unknown Shader";
2063 }
2064 }
2065
2066 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
2067 struct pipe_debug_callback *debug,
2068 FILE *file, bool check_debug_option)
2069 {
2070 enum pipe_shader_type shader_type = shader->selector->type;
2071
2072 if (!check_debug_option ||
2073 si_can_dump_shader(sscreen, shader_type))
2074 si_dump_shader_key(shader, file);
2075
2076 if (!check_debug_option && shader->binary.llvm_ir_string) {
2077 if (shader->previous_stage &&
2078 shader->previous_stage->binary.llvm_ir_string) {
2079 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
2080 si_get_shader_name(shader));
2081 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
2082 }
2083
2084 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
2085 si_get_shader_name(shader));
2086 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
2087 }
2088
2089 if (!check_debug_option ||
2090 (si_can_dump_shader(sscreen, shader_type) &&
2091 !(sscreen->debug_flags & DBG(NO_ASM)))) {
2092 unsigned wave_size = si_get_shader_wave_size(shader);
2093
2094 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
2095
2096 if (shader->prolog)
2097 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
2098 shader_type, wave_size, debug, "prolog", file);
2099 if (shader->previous_stage)
2100 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
2101 shader_type, wave_size, debug, "previous stage", file);
2102 if (shader->prolog2)
2103 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
2104 shader_type, wave_size, debug, "prolog2", file);
2105
2106 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
2107 wave_size, debug, "main", file);
2108
2109 if (shader->epilog)
2110 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
2111 shader_type, wave_size, debug, "epilog", file);
2112 fprintf(file, "\n");
2113 }
2114
2115 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
2116 }
2117
2118 static void si_dump_shader_key_vs(const struct si_shader_key *key,
2119 const struct si_vs_prolog_bits *prolog,
2120 const char *prefix, FILE *f)
2121 {
2122 fprintf(f, " %s.instance_divisor_is_one = %u\n",
2123 prefix, prolog->instance_divisor_is_one);
2124 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
2125 prefix, prolog->instance_divisor_is_fetched);
2126 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
2127 prefix, prolog->unpack_instance_id_from_vertex_id);
2128 fprintf(f, " %s.ls_vgpr_fix = %u\n",
2129 prefix, prolog->ls_vgpr_fix);
2130
2131 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
2132 fprintf(f, " mono.vs.fix_fetch = {");
2133 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
2134 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
2135 if (i)
2136 fprintf(f, ", ");
2137 if (!fix.bits)
2138 fprintf(f, "0");
2139 else
2140 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
2141 fix.u.num_channels_m1, fix.u.format);
2142 }
2143 fprintf(f, "}\n");
2144 }
2145
2146 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
2147 {
2148 const struct si_shader_key *key = &shader->key;
2149 enum pipe_shader_type shader_type = shader->selector->type;
2150
2151 fprintf(f, "SHADER KEY\n");
2152
2153 switch (shader_type) {
2154 case PIPE_SHADER_VERTEX:
2155 si_dump_shader_key_vs(key, &key->part.vs.prolog,
2156 "part.vs.prolog", f);
2157 fprintf(f, " as_es = %u\n", key->as_es);
2158 fprintf(f, " as_ls = %u\n", key->as_ls);
2159 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2160 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
2161 key->mono.u.vs_export_prim_id);
2162 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
2163 key->opt.vs_as_prim_discard_cs);
2164 fprintf(f, " opt.cs_prim_type = %s\n",
2165 tgsi_primitive_names[key->opt.cs_prim_type]);
2166 fprintf(f, " opt.cs_indexed = %u\n",
2167 key->opt.cs_indexed);
2168 fprintf(f, " opt.cs_instancing = %u\n",
2169 key->opt.cs_instancing);
2170 fprintf(f, " opt.cs_primitive_restart = %u\n",
2171 key->opt.cs_primitive_restart);
2172 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
2173 key->opt.cs_provoking_vertex_first);
2174 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
2175 key->opt.cs_need_correct_orientation);
2176 fprintf(f, " opt.cs_cull_front = %u\n",
2177 key->opt.cs_cull_front);
2178 fprintf(f, " opt.cs_cull_back = %u\n",
2179 key->opt.cs_cull_back);
2180 fprintf(f, " opt.cs_cull_z = %u\n",
2181 key->opt.cs_cull_z);
2182 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
2183 key->opt.cs_halfz_clip_space);
2184 break;
2185
2186 case PIPE_SHADER_TESS_CTRL:
2187 if (shader->selector->screen->info.chip_class >= GFX9) {
2188 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
2189 "part.tcs.ls_prolog", f);
2190 }
2191 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
2192 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
2193 break;
2194
2195 case PIPE_SHADER_TESS_EVAL:
2196 fprintf(f, " as_es = %u\n", key->as_es);
2197 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2198 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
2199 key->mono.u.vs_export_prim_id);
2200 break;
2201
2202 case PIPE_SHADER_GEOMETRY:
2203 if (shader->is_gs_copy_shader)
2204 break;
2205
2206 if (shader->selector->screen->info.chip_class >= GFX9 &&
2207 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
2208 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
2209 "part.gs.vs_prolog", f);
2210 }
2211 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
2212 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
2213 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2214 break;
2215
2216 case PIPE_SHADER_COMPUTE:
2217 break;
2218
2219 case PIPE_SHADER_FRAGMENT:
2220 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
2221 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
2222 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
2223 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
2224 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
2225 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
2226 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
2227 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
2228 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
2229 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
2230 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
2231 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
2232 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
2233 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
2234 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
2235 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
2236 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
2237 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
2238 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
2239 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
2240 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
2241 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
2242 break;
2243
2244 default:
2245 assert(0);
2246 }
2247
2248 if ((shader_type == PIPE_SHADER_GEOMETRY ||
2249 shader_type == PIPE_SHADER_TESS_EVAL ||
2250 shader_type == PIPE_SHADER_VERTEX) &&
2251 !key->as_es && !key->as_ls) {
2252 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
2253 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
2254 if (shader_type != PIPE_SHADER_GEOMETRY)
2255 fprintf(f, " opt.ngg_culling = 0x%x\n", key->opt.ngg_culling);
2256 }
2257 }
2258
2259 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
2260 {
2261 struct si_shader *shader = ctx->shader;
2262 struct si_shader_info *info = &shader->selector->info;
2263
2264 if ((ctx->type != PIPE_SHADER_VERTEX &&
2265 ctx->type != PIPE_SHADER_TESS_EVAL) ||
2266 shader->key.as_ls ||
2267 shader->key.as_es)
2268 return;
2269
2270 ac_optimize_vs_outputs(&ctx->ac,
2271 ctx->main_fn,
2272 shader->info.vs_output_param_offset,
2273 info->num_outputs,
2274 &shader->info.nr_param_exports);
2275 }
2276
2277 static void si_init_exec_from_input(struct si_shader_context *ctx,
2278 struct ac_arg param, unsigned bitoffset)
2279 {
2280 LLVMValueRef args[] = {
2281 ac_get_arg(&ctx->ac, param),
2282 LLVMConstInt(ctx->ac.i32, bitoffset, 0),
2283 };
2284 ac_build_intrinsic(&ctx->ac,
2285 "llvm.amdgcn.init.exec.from.input",
2286 ctx->ac.voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
2287 }
2288
2289 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
2290 const struct si_vs_prolog_bits *prolog_key,
2291 const struct si_shader_key *key,
2292 bool ngg_cull_shader)
2293 {
2294 /* VGPR initialization fixup for Vega10 and Raven is always done in the
2295 * VS prolog. */
2296 return sel->vs_needs_prolog ||
2297 prolog_key->ls_vgpr_fix ||
2298 prolog_key->unpack_instance_id_from_vertex_id ||
2299 (ngg_cull_shader && key->opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL);
2300 }
2301
2302 static bool si_build_main_function(struct si_shader_context *ctx,
2303 struct nir_shader *nir, bool free_nir,
2304 bool ngg_cull_shader)
2305 {
2306 struct si_shader *shader = ctx->shader;
2307 struct si_shader_selector *sel = shader->selector;
2308
2309 si_llvm_init_resource_callbacks(ctx);
2310
2311 switch (ctx->type) {
2312 case PIPE_SHADER_VERTEX:
2313 if (shader->key.as_ls)
2314 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
2315 else if (shader->key.as_es)
2316 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
2317 else if (shader->key.opt.vs_as_prim_discard_cs)
2318 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
2319 else if (ngg_cull_shader)
2320 ctx->abi.emit_outputs = gfx10_emit_ngg_culling_epilogue_4x_wave32;
2321 else if (shader->key.as_ngg)
2322 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
2323 else
2324 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
2325 ctx->abi.load_base_vertex = get_base_vertex;
2326 break;
2327 case PIPE_SHADER_TESS_CTRL:
2328 si_llvm_init_tcs_callbacks(ctx);
2329 break;
2330 case PIPE_SHADER_TESS_EVAL:
2331 si_llvm_init_tes_callbacks(ctx);
2332
2333 if (shader->key.as_es)
2334 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
2335 else if (ngg_cull_shader)
2336 ctx->abi.emit_outputs = gfx10_emit_ngg_culling_epilogue_4x_wave32;
2337 else if (shader->key.as_ngg)
2338 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
2339 else
2340 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
2341 break;
2342 case PIPE_SHADER_GEOMETRY:
2343 si_llvm_init_gs_callbacks(ctx);
2344 break;
2345 case PIPE_SHADER_FRAGMENT:
2346 si_llvm_init_ps_callbacks(ctx);
2347 break;
2348 case PIPE_SHADER_COMPUTE:
2349 ctx->abi.load_local_group_size = get_block_size;
2350 break;
2351 default:
2352 assert(!"Unsupported shader type");
2353 return false;
2354 }
2355
2356 si_create_function(ctx, ngg_cull_shader);
2357
2358 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
2359 si_preload_esgs_ring(ctx);
2360
2361 if (ctx->type == PIPE_SHADER_GEOMETRY)
2362 si_preload_gs_rings(ctx);
2363 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
2364 si_llvm_preload_tes_rings(ctx);
2365
2366 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
2367 sel->info.tessfactors_are_def_in_all_invocs) {
2368 for (unsigned i = 0; i < 6; i++) {
2369 ctx->invoc0_tess_factors[i] =
2370 ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
2371 }
2372 }
2373
2374 if (ctx->type == PIPE_SHADER_GEOMETRY) {
2375 for (unsigned i = 0; i < 4; i++) {
2376 ctx->gs_next_vertex[i] =
2377 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
2378 }
2379 if (shader->key.as_ngg) {
2380 for (unsigned i = 0; i < 4; ++i) {
2381 ctx->gs_curprim_verts[i] =
2382 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
2383 ctx->gs_generated_prims[i] =
2384 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
2385 }
2386
2387 unsigned scratch_size = 8;
2388 if (sel->so.num_outputs)
2389 scratch_size = 44;
2390
2391 assert(!ctx->gs_ngg_scratch);
2392 LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, scratch_size);
2393 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2394 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
2395 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
2396 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
2397
2398 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2399 LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
2400 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
2401 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
2402 }
2403 }
2404
2405 if (ctx->type != PIPE_SHADER_GEOMETRY &&
2406 (shader->key.as_ngg && !shader->key.as_es)) {
2407 /* Unconditionally declare scratch space base for streamout and
2408 * vertex compaction. Whether space is actually allocated is
2409 * determined during linking / PM4 creation.
2410 *
2411 * Add an extra dword per vertex to ensure an odd stride, which
2412 * avoids bank conflicts for SoA accesses.
2413 */
2414 if (!gfx10_is_ngg_passthrough(shader))
2415 si_llvm_declare_esgs_ring(ctx);
2416
2417 /* This is really only needed when streamout and / or vertex
2418 * compaction is enabled.
2419 */
2420 if (!ctx->gs_ngg_scratch &&
2421 (sel->so.num_outputs || shader->key.opt.ngg_culling)) {
2422 LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, 8);
2423 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2424 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
2425 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
2426 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
2427 }
2428 }
2429
2430 /* For GFX9 merged shaders:
2431 * - Set EXEC for the first shader. If the prolog is present, set
2432 * EXEC there instead.
2433 * - Add a barrier before the second shader.
2434 * - In the second shader, reset EXEC to ~0 and wrap the main part in
2435 * an if-statement. This is required for correctness in geometry
2436 * shaders, to ensure that empty GS waves do not send GS_EMIT and
2437 * GS_CUT messages.
2438 *
2439 * For monolithic merged shaders, the first shader is wrapped in an
2440 * if-block together with its prolog in si_build_wrapper_function.
2441 *
2442 * NGG vertex and tess eval shaders running as the last
2443 * vertex/geometry stage handle execution explicitly using
2444 * if-statements.
2445 */
2446 if (ctx->screen->info.chip_class >= GFX9) {
2447 if (!shader->is_monolithic &&
2448 (shader->key.as_es || shader->key.as_ls) &&
2449 (ctx->type == PIPE_SHADER_TESS_EVAL ||
2450 (ctx->type == PIPE_SHADER_VERTEX &&
2451 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog,
2452 &shader->key, ngg_cull_shader)))) {
2453 si_init_exec_from_input(ctx,
2454 ctx->merged_wave_info, 0);
2455 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
2456 ctx->type == PIPE_SHADER_GEOMETRY ||
2457 (shader->key.as_ngg && !shader->key.as_es)) {
2458 LLVMValueRef thread_enabled;
2459 bool nested_barrier;
2460
2461 if (!shader->is_monolithic ||
2462 (ctx->type == PIPE_SHADER_TESS_EVAL &&
2463 shader->key.as_ngg && !shader->key.as_es &&
2464 !shader->key.opt.ngg_culling))
2465 ac_init_exec_full_mask(&ctx->ac);
2466
2467 if ((ctx->type == PIPE_SHADER_VERTEX ||
2468 ctx->type == PIPE_SHADER_TESS_EVAL) &&
2469 shader->key.as_ngg && !shader->key.as_es &&
2470 !shader->key.opt.ngg_culling) {
2471 gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
2472
2473 /* Build the primitive export at the beginning
2474 * of the shader if possible.
2475 */
2476 if (gfx10_ngg_export_prim_early(shader))
2477 gfx10_ngg_build_export_prim(ctx, NULL, NULL);
2478 }
2479
2480 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
2481 ctx->type == PIPE_SHADER_GEOMETRY) {
2482 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
2483 gfx10_ngg_gs_emit_prologue(ctx);
2484 nested_barrier = false;
2485 } else {
2486 nested_barrier = true;
2487 }
2488
2489 thread_enabled = si_is_gs_thread(ctx);
2490 } else {
2491 thread_enabled = si_is_es_thread(ctx);
2492 nested_barrier = false;
2493 }
2494
2495 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
2496 ctx->merged_wrap_if_label = 11500;
2497 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
2498
2499 if (nested_barrier) {
2500 /* Execute a barrier before the second shader in
2501 * a merged shader.
2502 *
2503 * Execute the barrier inside the conditional block,
2504 * so that empty waves can jump directly to s_endpgm,
2505 * which will also signal the barrier.
2506 *
2507 * This is possible in gfx9, because an empty wave
2508 * for the second shader does not participate in
2509 * the epilogue. With NGG, empty waves may still
2510 * be required to export data (e.g. GS output vertices),
2511 * so we cannot let them exit early.
2512 *
2513 * If the shader is TCS and the TCS epilog is present
2514 * and contains a barrier, it will wait there and then
2515 * reach s_endpgm.
2516 */
2517 si_llvm_emit_barrier(ctx);
2518 }
2519 }
2520 }
2521
2522 if (sel->force_correct_derivs_after_kill) {
2523 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->ac.i1, "");
2524 /* true = don't kill. */
2525 LLVMBuildStore(ctx->ac.builder, ctx->ac.i1true,
2526 ctx->postponed_kill);
2527 }
2528
2529 bool success = si_nir_build_llvm(ctx, nir);
2530 if (free_nir)
2531 ralloc_free(nir);
2532 if (!success) {
2533 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
2534 return false;
2535 }
2536
2537 si_llvm_build_ret(ctx, ctx->return_value);
2538 return true;
2539 }
2540
2541 /**
2542 * Compute the VS prolog key, which contains all the information needed to
2543 * build the VS prolog function, and set shader->info bits where needed.
2544 *
2545 * \param info Shader info of the vertex shader.
2546 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
2547 * \param has_old_ Whether the preceding shader part is the NGG cull shader.
2548 * \param prolog_key Key of the VS prolog
2549 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
2550 * \param key Output shader part key.
2551 */
2552 static void si_get_vs_prolog_key(const struct si_shader_info *info,
2553 unsigned num_input_sgprs,
2554 bool ngg_cull_shader,
2555 const struct si_vs_prolog_bits *prolog_key,
2556 struct si_shader *shader_out,
2557 union si_shader_part_key *key)
2558 {
2559 memset(key, 0, sizeof(*key));
2560 key->vs_prolog.states = *prolog_key;
2561 key->vs_prolog.num_input_sgprs = num_input_sgprs;
2562 key->vs_prolog.num_inputs = info->num_inputs;
2563 key->vs_prolog.as_ls = shader_out->key.as_ls;
2564 key->vs_prolog.as_es = shader_out->key.as_es;
2565 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
2566
2567 if (ngg_cull_shader) {
2568 key->vs_prolog.gs_fast_launch_tri_list = !!(shader_out->key.opt.ngg_culling &
2569 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST);
2570 key->vs_prolog.gs_fast_launch_tri_strip = !!(shader_out->key.opt.ngg_culling &
2571 SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP);
2572 } else {
2573 key->vs_prolog.has_ngg_cull_inputs = !!shader_out->key.opt.ngg_culling;
2574 }
2575
2576 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
2577 key->vs_prolog.as_ls = 1;
2578 key->vs_prolog.num_merged_next_stage_vgprs = 2;
2579 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
2580 key->vs_prolog.as_es = 1;
2581 key->vs_prolog.num_merged_next_stage_vgprs = 5;
2582 } else if (shader_out->key.as_ngg) {
2583 key->vs_prolog.num_merged_next_stage_vgprs = 5;
2584 }
2585
2586 /* Enable loading the InstanceID VGPR. */
2587 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
2588
2589 if ((key->vs_prolog.states.instance_divisor_is_one |
2590 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
2591 shader_out->info.uses_instanceid = true;
2592 }
2593
2594 /**
2595 * Given a list of shader part functions, build a wrapper function that
2596 * runs them in sequence to form a monolithic shader.
2597 */
2598 void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
2599 unsigned num_parts, unsigned main_part,
2600 unsigned next_shader_first_part)
2601 {
2602 LLVMBuilderRef builder = ctx->ac.builder;
2603 /* PS epilog has one arg per color component; gfx9 merged shader
2604 * prologs need to forward 40 SGPRs.
2605 */
2606 LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
2607 LLVMTypeRef function_type;
2608 unsigned num_first_params;
2609 unsigned num_out, initial_num_out;
2610 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
2611 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
2612 unsigned num_sgprs, num_vgprs;
2613 unsigned gprs;
2614
2615 memset(&ctx->args, 0, sizeof(ctx->args));
2616
2617 for (unsigned i = 0; i < num_parts; ++i) {
2618 ac_add_function_attr(ctx->ac.context, parts[i], -1,
2619 AC_FUNC_ATTR_ALWAYSINLINE);
2620 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
2621 }
2622
2623 /* The parameters of the wrapper function correspond to those of the
2624 * first part in terms of SGPRs and VGPRs, but we use the types of the
2625 * main part to get the right types. This is relevant for the
2626 * dereferenceable attribute on descriptor table pointers.
2627 */
2628 num_sgprs = 0;
2629 num_vgprs = 0;
2630
2631 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
2632 num_first_params = LLVMCountParamTypes(function_type);
2633
2634 for (unsigned i = 0; i < num_first_params; ++i) {
2635 LLVMValueRef param = LLVMGetParam(parts[0], i);
2636
2637 if (ac_is_sgpr_param(param)) {
2638 assert(num_vgprs == 0);
2639 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
2640 } else {
2641 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
2642 }
2643 }
2644
2645 gprs = 0;
2646 while (gprs < num_sgprs + num_vgprs) {
2647 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
2648 LLVMTypeRef type = LLVMTypeOf(param);
2649 unsigned size = ac_get_type_size(type) / 4;
2650
2651 /* This is going to get casted anyways, so we don't have to
2652 * have the exact same type. But we do have to preserve the
2653 * pointer-ness so that LLVM knows about it.
2654 */
2655 enum ac_arg_type arg_type = AC_ARG_INT;
2656 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
2657 type = LLVMGetElementType(type);
2658
2659 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
2660 if (LLVMGetVectorSize(type) == 4)
2661 arg_type = AC_ARG_CONST_DESC_PTR;
2662 else if (LLVMGetVectorSize(type) == 8)
2663 arg_type = AC_ARG_CONST_IMAGE_PTR;
2664 else
2665 assert(0);
2666 } else if (type == ctx->ac.f32) {
2667 arg_type = AC_ARG_CONST_FLOAT_PTR;
2668 } else {
2669 assert(0);
2670 }
2671 }
2672
2673 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR,
2674 size, arg_type, NULL);
2675
2676 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
2677 assert(gprs + size <= num_sgprs + num_vgprs &&
2678 (gprs >= num_sgprs || gprs + size <= num_sgprs));
2679
2680 gprs += size;
2681 }
2682
2683 /* Prepare the return type. */
2684 unsigned num_returns = 0;
2685 LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
2686
2687 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
2688 return_type = LLVMGetReturnType(last_func_type);
2689
2690 switch (LLVMGetTypeKind(return_type)) {
2691 case LLVMStructTypeKind:
2692 num_returns = LLVMCountStructElementTypes(return_type);
2693 assert(num_returns <= ARRAY_SIZE(returns));
2694 LLVMGetStructElementTypes(return_type, returns);
2695 break;
2696 case LLVMVoidTypeKind:
2697 break;
2698 default:
2699 unreachable("unexpected type");
2700 }
2701
2702 si_llvm_create_func(ctx, "wrapper", returns, num_returns,
2703 si_get_max_workgroup_size(ctx->shader));
2704
2705 if (si_is_merged_shader(ctx))
2706 ac_init_exec_full_mask(&ctx->ac);
2707
2708 /* Record the arguments of the function as if they were an output of
2709 * a previous part.
2710 */
2711 num_out = 0;
2712 num_out_sgpr = 0;
2713
2714 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
2715 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
2716 LLVMTypeRef param_type = LLVMTypeOf(param);
2717 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->ac.i32 : ctx->ac.f32;
2718 unsigned size = ac_get_type_size(param_type) / 4;
2719
2720 if (size == 1) {
2721 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2722 param = LLVMBuildPtrToInt(builder, param, ctx->ac.i32, "");
2723 param_type = ctx->ac.i32;
2724 }
2725
2726 if (param_type != out_type)
2727 param = LLVMBuildBitCast(builder, param, out_type, "");
2728 out[num_out++] = param;
2729 } else {
2730 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
2731
2732 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2733 param = LLVMBuildPtrToInt(builder, param, ctx->ac.i64, "");
2734 param_type = ctx->ac.i64;
2735 }
2736
2737 if (param_type != vector_type)
2738 param = LLVMBuildBitCast(builder, param, vector_type, "");
2739
2740 for (unsigned j = 0; j < size; ++j)
2741 out[num_out++] = LLVMBuildExtractElement(
2742 builder, param, LLVMConstInt(ctx->ac.i32, j, 0), "");
2743 }
2744
2745 if (ctx->args.args[i].file == AC_ARG_SGPR)
2746 num_out_sgpr = num_out;
2747 }
2748
2749 memcpy(initial, out, sizeof(out));
2750 initial_num_out = num_out;
2751 initial_num_out_sgpr = num_out_sgpr;
2752
2753 /* Now chain the parts. */
2754 LLVMValueRef ret = NULL;
2755 for (unsigned part = 0; part < num_parts; ++part) {
2756 LLVMValueRef in[AC_MAX_ARGS];
2757 LLVMTypeRef ret_type;
2758 unsigned out_idx = 0;
2759 unsigned num_params = LLVMCountParams(parts[part]);
2760
2761 /* Merged shaders are executed conditionally depending
2762 * on the number of enabled threads passed in the input SGPRs. */
2763 if (is_multi_part_shader(ctx) && part == 0) {
2764 LLVMValueRef ena, count = initial[3];
2765
2766 count = LLVMBuildAnd(builder, count,
2767 LLVMConstInt(ctx->ac.i32, 0x7f, 0), "");
2768 ena = LLVMBuildICmp(builder, LLVMIntULT,
2769 ac_get_thread_id(&ctx->ac), count, "");
2770 ac_build_ifcc(&ctx->ac, ena, 6506);
2771 }
2772
2773 /* Derive arguments for the next part from outputs of the
2774 * previous one.
2775 */
2776 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
2777 LLVMValueRef param;
2778 LLVMTypeRef param_type;
2779 bool is_sgpr;
2780 unsigned param_size;