24f744ba5cd99ab388a283922dfcfe9491cb8beb
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "tgsi/tgsi_strings.h"
27 #include "tgsi/tgsi_from_mesa.h"
28
29 #include "ac_exp_param.h"
30 #include "ac_rtld.h"
31 #include "si_shader_internal.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_serialize.h"
37
38 static const char scratch_rsrc_dword0_symbol[] =
39 "SCRATCH_RSRC_DWORD0";
40
41 static const char scratch_rsrc_dword1_symbol[] =
42 "SCRATCH_RSRC_DWORD1";
43
44 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
45
46 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
47 union si_shader_part_key *key);
48
49 /** Whether the shader runs as a combination of multiple API shaders */
50 static bool is_multi_part_shader(struct si_shader_context *ctx)
51 {
52 if (ctx->screen->info.chip_class <= GFX8)
53 return false;
54
55 return ctx->shader->key.as_ls ||
56 ctx->shader->key.as_es ||
57 ctx->type == PIPE_SHADER_TESS_CTRL ||
58 ctx->type == PIPE_SHADER_GEOMETRY;
59 }
60
61 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
62 bool si_is_merged_shader(struct si_shader_context *ctx)
63 {
64 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
65 }
66
67 /**
68 * Returns a unique index for a per-patch semantic name and index. The index
69 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
70 * can be calculated.
71 */
72 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
73 {
74 switch (semantic_name) {
75 case TGSI_SEMANTIC_TESSOUTER:
76 return 0;
77 case TGSI_SEMANTIC_TESSINNER:
78 return 1;
79 case TGSI_SEMANTIC_PATCH:
80 assert(index < 30);
81 return 2 + index;
82
83 default:
84 assert(!"invalid semantic name");
85 return 0;
86 }
87 }
88
89 /**
90 * Returns a unique index for a semantic name and index. The index must be
91 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
92 * calculated.
93 */
94 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
95 unsigned is_varying)
96 {
97 switch (semantic_name) {
98 case TGSI_SEMANTIC_POSITION:
99 return 0;
100 case TGSI_SEMANTIC_GENERIC:
101 /* Since some shader stages use the the highest used IO index
102 * to determine the size to allocate for inputs/outputs
103 * (in LDS, tess and GS rings). GENERIC should be placed right
104 * after POSITION to make that size as small as possible.
105 */
106 if (index < SI_MAX_IO_GENERIC)
107 return 1 + index;
108
109 assert(!"invalid generic index");
110 return 0;
111 case TGSI_SEMANTIC_FOG:
112 return SI_MAX_IO_GENERIC + 1;
113 case TGSI_SEMANTIC_COLOR:
114 assert(index < 2);
115 return SI_MAX_IO_GENERIC + 2 + index;
116 case TGSI_SEMANTIC_BCOLOR:
117 assert(index < 2);
118 /* If it's a varying, COLOR and BCOLOR alias. */
119 if (is_varying)
120 return SI_MAX_IO_GENERIC + 2 + index;
121 else
122 return SI_MAX_IO_GENERIC + 4 + index;
123 case TGSI_SEMANTIC_TEXCOORD:
124 assert(index < 8);
125 return SI_MAX_IO_GENERIC + 6 + index;
126
127 /* These are rarely used between LS and HS or ES and GS. */
128 case TGSI_SEMANTIC_CLIPDIST:
129 assert(index < 2);
130 return SI_MAX_IO_GENERIC + 6 + 8 + index;
131 case TGSI_SEMANTIC_CLIPVERTEX:
132 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
133 case TGSI_SEMANTIC_PSIZE:
134 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
135
136 /* These can't be written by LS, HS, and ES. */
137 case TGSI_SEMANTIC_LAYER:
138 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
139 case TGSI_SEMANTIC_VIEWPORT_INDEX:
140 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
141 case TGSI_SEMANTIC_PRIMID:
142 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
143 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
144 default:
145 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
146 assert(!"invalid semantic name");
147 return 0;
148 }
149 }
150
151 /**
152 * Get the value of a shader input parameter and extract a bitfield.
153 */
154 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
155 LLVMValueRef value, unsigned rshift,
156 unsigned bitwidth)
157 {
158 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
159 value = ac_to_integer(&ctx->ac, value);
160
161 if (rshift)
162 value = LLVMBuildLShr(ctx->ac.builder, value,
163 LLVMConstInt(ctx->i32, rshift, 0), "");
164
165 if (rshift + bitwidth < 32) {
166 unsigned mask = (1 << bitwidth) - 1;
167 value = LLVMBuildAnd(ctx->ac.builder, value,
168 LLVMConstInt(ctx->i32, mask, 0), "");
169 }
170
171 return value;
172 }
173
174 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
175 struct ac_arg param, unsigned rshift,
176 unsigned bitwidth)
177 {
178 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
179
180 return unpack_llvm_param(ctx, value, rshift, bitwidth);
181 }
182
183 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
184 LLVMValueRef i32, unsigned index)
185 {
186 assert(index <= 1);
187
188 if (index == 1)
189 return LLVMBuildAShr(ctx->ac.builder, i32,
190 LLVMConstInt(ctx->i32, 16, 0), "");
191
192 return LLVMBuildSExt(ctx->ac.builder,
193 LLVMBuildTrunc(ctx->ac.builder, i32,
194 ctx->ac.i16, ""),
195 ctx->i32, "");
196 }
197
198 void si_llvm_load_input_vs(
199 struct si_shader_context *ctx,
200 unsigned input_index,
201 LLVMValueRef out[4])
202 {
203 const struct si_shader_info *info = &ctx->shader->selector->info;
204 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
205
206 if (vs_blit_property) {
207 LLVMValueRef vertex_id = ctx->abi.vertex_id;
208 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
209 LLVMIntULE, vertex_id,
210 ctx->i32_1, "");
211 /* Use LLVMIntNE, because we have 3 vertices and only
212 * the middle one should use y2.
213 */
214 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
215 LLVMIntNE, vertex_id,
216 ctx->i32_1, "");
217
218 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
219 if (input_index == 0) {
220 /* Position: */
221 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
222 param_vs_blit_inputs);
223 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
224 param_vs_blit_inputs + 1);
225
226 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
227 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
228 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
229 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
230
231 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
232 x1, x2, "");
233 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
234 y1, y2, "");
235
236 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
237 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
238 out[2] = LLVMGetParam(ctx->main_fn,
239 param_vs_blit_inputs + 2);
240 out[3] = ctx->ac.f32_1;
241 return;
242 }
243
244 /* Color or texture coordinates: */
245 assert(input_index == 1);
246
247 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
248 for (int i = 0; i < 4; i++) {
249 out[i] = LLVMGetParam(ctx->main_fn,
250 param_vs_blit_inputs + 3 + i);
251 }
252 } else {
253 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
254 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
255 param_vs_blit_inputs + 3);
256 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
257 param_vs_blit_inputs + 4);
258 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
259 param_vs_blit_inputs + 5);
260 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
261 param_vs_blit_inputs + 6);
262
263 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
264 x1, x2, "");
265 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
266 y1, y2, "");
267 out[2] = LLVMGetParam(ctx->main_fn,
268 param_vs_blit_inputs + 7);
269 out[3] = LLVMGetParam(ctx->main_fn,
270 param_vs_blit_inputs + 8);
271 }
272 return;
273 }
274
275 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
276 union si_vs_fix_fetch fix_fetch;
277 LLVMValueRef vb_desc;
278 LLVMValueRef vertex_index;
279 LLVMValueRef tmp;
280
281 if (input_index < num_vbos_in_user_sgprs) {
282 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
283 } else {
284 unsigned index= input_index - num_vbos_in_user_sgprs;
285 vb_desc = ac_build_load_to_sgpr(&ctx->ac,
286 ac_get_arg(&ctx->ac, ctx->vertex_buffers),
287 LLVMConstInt(ctx->i32, index, 0));
288 }
289
290 vertex_index = LLVMGetParam(ctx->main_fn,
291 ctx->vertex_index0.arg_index +
292 input_index);
293
294 /* Use the open-coded implementation for all loads of doubles and
295 * of dword-sized data that needs fixups. We need to insert conversion
296 * code anyway, and the amd/common code does it for us.
297 *
298 * Note: On LLVM <= 8, we can only open-code formats with
299 * channel size >= 4 bytes.
300 */
301 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
302 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
303 if (opencode ||
304 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
305 (fix_fetch.u.log_size == 2)) {
306 tmp = ac_build_opencoded_load_format(
307 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
308 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
309 vb_desc, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
310 for (unsigned i = 0; i < 4; ++i)
311 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
312 return;
313 }
314
315 /* Do multiple loads for special formats. */
316 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
317 LLVMValueRef fetches[4];
318 unsigned num_fetches;
319 unsigned fetch_stride;
320 unsigned channels_per_fetch;
321
322 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
323 num_fetches = MIN2(required_channels, 3);
324 fetch_stride = 1 << fix_fetch.u.log_size;
325 channels_per_fetch = 1;
326 } else {
327 num_fetches = 1;
328 fetch_stride = 0;
329 channels_per_fetch = required_channels;
330 }
331
332 for (unsigned i = 0; i < num_fetches; ++i) {
333 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
334 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
335 channels_per_fetch, 0, true);
336 }
337
338 if (num_fetches == 1 && channels_per_fetch > 1) {
339 LLVMValueRef fetch = fetches[0];
340 for (unsigned i = 0; i < channels_per_fetch; ++i) {
341 tmp = LLVMConstInt(ctx->i32, i, false);
342 fetches[i] = LLVMBuildExtractElement(
343 ctx->ac.builder, fetch, tmp, "");
344 }
345 num_fetches = channels_per_fetch;
346 channels_per_fetch = 1;
347 }
348
349 for (unsigned i = num_fetches; i < 4; ++i)
350 fetches[i] = LLVMGetUndef(ctx->f32);
351
352 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
353 required_channels == 4) {
354 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
355 fetches[3] = ctx->ac.i32_1;
356 else
357 fetches[3] = ctx->ac.f32_1;
358 } else if (fix_fetch.u.log_size == 3 &&
359 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
360 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
361 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
362 required_channels == 4) {
363 /* For 2_10_10_10, the hardware returns an unsigned value;
364 * convert it to a signed one.
365 */
366 LLVMValueRef tmp = fetches[3];
367 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
368
369 /* First, recover the sign-extended signed integer value. */
370 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
371 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
372 else
373 tmp = ac_to_integer(&ctx->ac, tmp);
374
375 /* For the integer-like cases, do a natural sign extension.
376 *
377 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
378 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
379 * exponent.
380 */
381 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
382 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
383 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
384 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
385
386 /* Convert back to the right type. */
387 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
388 LLVMValueRef clamp;
389 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
390 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
391 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
392 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
393 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
394 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
395 }
396
397 fetches[3] = tmp;
398 }
399
400 for (unsigned i = 0; i < 4; ++i)
401 out[i] = ac_to_float(&ctx->ac, fetches[i]);
402 }
403
404 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
405 unsigned swizzle)
406 {
407 if (swizzle > 0)
408 return ctx->i32_0;
409
410 switch (ctx->type) {
411 case PIPE_SHADER_VERTEX:
412 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
413 case PIPE_SHADER_TESS_CTRL:
414 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
415 case PIPE_SHADER_TESS_EVAL:
416 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
417 case PIPE_SHADER_GEOMETRY:
418 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
419 default:
420 assert(0);
421 return ctx->i32_0;
422 }
423 }
424
425 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
426 {
427 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
428
429 /* For non-indexed draws, the base vertex set by the driver
430 * (for direct draws) or the CP (for indirect draws) is the
431 * first vertex ID, but GLSL expects 0 to be returned.
432 */
433 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
434 ctx->vs_state_bits);
435 LLVMValueRef indexed;
436
437 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
438 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
439
440 return LLVMBuildSelect(ctx->ac.builder, indexed,
441 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
442 ctx->i32_0, "");
443 }
444
445 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
446 {
447 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
448
449 LLVMValueRef values[3];
450 LLVMValueRef result;
451 unsigned i;
452 unsigned *properties = ctx->shader->selector->info.properties;
453
454 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
455 unsigned sizes[3] = {
456 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
457 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
458 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
459 };
460
461 for (i = 0; i < 3; ++i)
462 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
463
464 result = ac_build_gather_values(&ctx->ac, values, 3);
465 } else {
466 result = ac_get_arg(&ctx->ac, ctx->block_size);
467 }
468
469 return result;
470 }
471
472 void si_declare_compute_memory(struct si_shader_context *ctx)
473 {
474 struct si_shader_selector *sel = ctx->shader->selector;
475 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
476
477 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
478 LLVMValueRef var;
479
480 assert(!ctx->ac.lds);
481
482 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
483 LLVMArrayType(ctx->i8, lds_size),
484 "compute_lds",
485 AC_ADDR_SPACE_LDS);
486 LLVMSetAlignment(var, 64 * 1024);
487
488 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
489 }
490
491 /* Initialize arguments for the shader export intrinsic */
492 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx,
493 LLVMValueRef *values,
494 unsigned target,
495 struct ac_export_args *args)
496 {
497 args->enabled_channels = 0xf; /* writemask - default is 0xf */
498 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
499 args->done = 0; /* Specify whether this is the last export */
500 args->target = target; /* Specify the target we are exporting */
501 args->compr = false;
502
503 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
504 }
505
506 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
507 struct ac_export_args *pos, LLVMValueRef *out_elts)
508 {
509 unsigned reg_index;
510 unsigned chan;
511 unsigned const_chan;
512 LLVMValueRef base_elt;
513 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
514 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
515 SI_VS_CONST_CLIP_PLANES, 0);
516 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
517
518 for (reg_index = 0; reg_index < 2; reg_index ++) {
519 struct ac_export_args *args = &pos[2 + reg_index];
520
521 args->out[0] =
522 args->out[1] =
523 args->out[2] =
524 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
525
526 /* Compute dot products of position and user clip plane vectors */
527 for (chan = 0; chan < 4; chan++) {
528 for (const_chan = 0; const_chan < 4; const_chan++) {
529 LLVMValueRef addr =
530 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
531 const_chan) * 4, 0);
532 base_elt = si_buffer_load_const(ctx, const_resource,
533 addr);
534 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
535 out_elts[const_chan], args->out[chan]);
536 }
537 }
538
539 args->enabled_channels = 0xf;
540 args->valid_mask = 0;
541 args->done = 0;
542 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
543 args->compr = 0;
544 }
545 }
546
547 static void si_dump_streamout(struct pipe_stream_output_info *so)
548 {
549 unsigned i;
550
551 if (so->num_outputs)
552 fprintf(stderr, "STREAMOUT\n");
553
554 for (i = 0; i < so->num_outputs; i++) {
555 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
556 so->output[i].start_component;
557 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
558 i, so->output[i].output_buffer,
559 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
560 so->output[i].register_index,
561 mask & 1 ? "x" : "",
562 mask & 2 ? "y" : "",
563 mask & 4 ? "z" : "",
564 mask & 8 ? "w" : "");
565 }
566 }
567
568 void si_emit_streamout_output(struct si_shader_context *ctx,
569 LLVMValueRef const *so_buffers,
570 LLVMValueRef const *so_write_offsets,
571 struct pipe_stream_output *stream_out,
572 struct si_shader_output_values *shader_out)
573 {
574 unsigned buf_idx = stream_out->output_buffer;
575 unsigned start = stream_out->start_component;
576 unsigned num_comps = stream_out->num_components;
577 LLVMValueRef out[4];
578
579 assert(num_comps && num_comps <= 4);
580 if (!num_comps || num_comps > 4)
581 return;
582
583 /* Load the output as int. */
584 for (int j = 0; j < num_comps; j++) {
585 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
586
587 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
588 }
589
590 /* Pack the output. */
591 LLVMValueRef vdata = NULL;
592
593 switch (num_comps) {
594 case 1: /* as i32 */
595 vdata = out[0];
596 break;
597 case 2: /* as v2i32 */
598 case 3: /* as v3i32 */
599 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
600 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
601 break;
602 }
603 /* as v4i32 (aligned to 4) */
604 out[3] = LLVMGetUndef(ctx->i32);
605 /* fall through */
606 case 4: /* as v4i32 */
607 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
608 break;
609 }
610
611 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
612 vdata, num_comps,
613 so_write_offsets[buf_idx],
614 ctx->i32_0,
615 stream_out->dst_offset * 4, ac_glc | ac_slc);
616 }
617
618 /**
619 * Write streamout data to buffers for vertex stream @p stream (different
620 * vertex streams can occur for GS copy shaders).
621 */
622 void si_llvm_emit_streamout(struct si_shader_context *ctx,
623 struct si_shader_output_values *outputs,
624 unsigned noutput, unsigned stream)
625 {
626 struct si_shader_selector *sel = ctx->shader->selector;
627 struct pipe_stream_output_info *so = &sel->so;
628 LLVMBuilderRef builder = ctx->ac.builder;
629 int i;
630
631 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
632 LLVMValueRef so_vtx_count =
633 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
634
635 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
636
637 /* can_emit = tid < so_vtx_count; */
638 LLVMValueRef can_emit =
639 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
640
641 /* Emit the streamout code conditionally. This actually avoids
642 * out-of-bounds buffer access. The hw tells us via the SGPR
643 * (so_vtx_count) which threads are allowed to emit streamout data. */
644 ac_build_ifcc(&ctx->ac, can_emit, 6501);
645 {
646 /* The buffer offset is computed as follows:
647 * ByteOffset = streamout_offset[buffer_id]*4 +
648 * (streamout_write_index + thread_id)*stride[buffer_id] +
649 * attrib_offset
650 */
651
652 LLVMValueRef so_write_index =
653 ac_get_arg(&ctx->ac,
654 ctx->streamout_write_index);
655
656 /* Compute (streamout_write_index + thread_id). */
657 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
658
659 /* Load the descriptor and compute the write offset for each
660 * enabled buffer. */
661 LLVMValueRef so_write_offset[4] = {};
662 LLVMValueRef so_buffers[4];
663 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
664 ctx->rw_buffers);
665
666 for (i = 0; i < 4; i++) {
667 if (!so->stride[i])
668 continue;
669
670 LLVMValueRef offset = LLVMConstInt(ctx->i32,
671 SI_VS_STREAMOUT_BUF0 + i, 0);
672
673 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
674
675 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
676 ctx->streamout_offset[i]);
677 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
678
679 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
680 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
681 so_offset);
682 }
683
684 /* Write streamout data. */
685 for (i = 0; i < so->num_outputs; i++) {
686 unsigned reg = so->output[i].register_index;
687
688 if (reg >= noutput)
689 continue;
690
691 if (stream != so->output[i].stream)
692 continue;
693
694 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
695 &so->output[i], &outputs[reg]);
696 }
697 }
698 ac_build_endif(&ctx->ac, 6501);
699 }
700
701 static void si_export_param(struct si_shader_context *ctx, unsigned index,
702 LLVMValueRef *values)
703 {
704 struct ac_export_args args;
705
706 si_llvm_init_vs_export_args(ctx, values,
707 V_008DFC_SQ_EXP_PARAM + index, &args);
708 ac_build_export(&ctx->ac, &args);
709 }
710
711 static void si_build_param_exports(struct si_shader_context *ctx,
712 struct si_shader_output_values *outputs,
713 unsigned noutput)
714 {
715 struct si_shader *shader = ctx->shader;
716 unsigned param_count = 0;
717
718 for (unsigned i = 0; i < noutput; i++) {
719 unsigned semantic_name = outputs[i].semantic_name;
720 unsigned semantic_index = outputs[i].semantic_index;
721
722 if (outputs[i].vertex_stream[0] != 0 &&
723 outputs[i].vertex_stream[1] != 0 &&
724 outputs[i].vertex_stream[2] != 0 &&
725 outputs[i].vertex_stream[3] != 0)
726 continue;
727
728 switch (semantic_name) {
729 case TGSI_SEMANTIC_LAYER:
730 case TGSI_SEMANTIC_VIEWPORT_INDEX:
731 case TGSI_SEMANTIC_CLIPDIST:
732 case TGSI_SEMANTIC_COLOR:
733 case TGSI_SEMANTIC_BCOLOR:
734 case TGSI_SEMANTIC_PRIMID:
735 case TGSI_SEMANTIC_FOG:
736 case TGSI_SEMANTIC_TEXCOORD:
737 case TGSI_SEMANTIC_GENERIC:
738 break;
739 default:
740 continue;
741 }
742
743 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
744 semantic_index < SI_MAX_IO_GENERIC) &&
745 shader->key.opt.kill_outputs &
746 (1ull << si_shader_io_get_unique_index(semantic_name,
747 semantic_index, true)))
748 continue;
749
750 si_export_param(ctx, param_count, outputs[i].values);
751
752 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
753 shader->info.vs_output_param_offset[i] = param_count++;
754 }
755
756 shader->info.nr_param_exports = param_count;
757 }
758
759 /**
760 * Vertex color clamping.
761 *
762 * This uses a state constant loaded in a user data SGPR and
763 * an IF statement is added that clamps all colors if the constant
764 * is true.
765 */
766 static void si_vertex_color_clamping(struct si_shader_context *ctx,
767 struct si_shader_output_values *outputs,
768 unsigned noutput)
769 {
770 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
771 bool has_colors = false;
772
773 /* Store original colors to alloca variables. */
774 for (unsigned i = 0; i < noutput; i++) {
775 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
776 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
777 continue;
778
779 for (unsigned j = 0; j < 4; j++) {
780 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->f32, "");
781 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
782 }
783 has_colors = true;
784 }
785
786 if (!has_colors)
787 return;
788
789 /* The state is in the first bit of the user SGPR. */
790 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
791 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
792
793 ac_build_ifcc(&ctx->ac, cond, 6502);
794
795 /* Store clamped colors to alloca variables within the conditional block. */
796 for (unsigned i = 0; i < noutput; i++) {
797 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
798 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
799 continue;
800
801 for (unsigned j = 0; j < 4; j++) {
802 LLVMBuildStore(ctx->ac.builder,
803 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
804 addr[i][j]);
805 }
806 }
807 ac_build_endif(&ctx->ac, 6502);
808
809 /* Load clamped colors */
810 for (unsigned i = 0; i < noutput; i++) {
811 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
812 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
813 continue;
814
815 for (unsigned j = 0; j < 4; j++) {
816 outputs[i].values[j] =
817 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
818 }
819 }
820 }
821
822 /* Generate export instructions for hardware VS shader stage or NGG GS stage
823 * (position and parameter data only).
824 */
825 void si_llvm_export_vs(struct si_shader_context *ctx,
826 struct si_shader_output_values *outputs,
827 unsigned noutput)
828 {
829 struct si_shader *shader = ctx->shader;
830 struct ac_export_args pos_args[4] = {};
831 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
832 unsigned pos_idx;
833 int i;
834
835 si_vertex_color_clamping(ctx, outputs, noutput);
836
837 /* Build position exports. */
838 for (i = 0; i < noutput; i++) {
839 switch (outputs[i].semantic_name) {
840 case TGSI_SEMANTIC_POSITION:
841 si_llvm_init_vs_export_args(ctx, outputs[i].values,
842 V_008DFC_SQ_EXP_POS, &pos_args[0]);
843 break;
844 case TGSI_SEMANTIC_PSIZE:
845 psize_value = outputs[i].values[0];
846 break;
847 case TGSI_SEMANTIC_LAYER:
848 layer_value = outputs[i].values[0];
849 break;
850 case TGSI_SEMANTIC_VIEWPORT_INDEX:
851 viewport_index_value = outputs[i].values[0];
852 break;
853 case TGSI_SEMANTIC_EDGEFLAG:
854 edgeflag_value = outputs[i].values[0];
855 break;
856 case TGSI_SEMANTIC_CLIPDIST:
857 if (!shader->key.opt.clip_disable) {
858 unsigned index = 2 + outputs[i].semantic_index;
859 si_llvm_init_vs_export_args(ctx, outputs[i].values,
860 V_008DFC_SQ_EXP_POS + index,
861 &pos_args[index]);
862 }
863 break;
864 case TGSI_SEMANTIC_CLIPVERTEX:
865 if (!shader->key.opt.clip_disable) {
866 si_llvm_emit_clipvertex(ctx, pos_args,
867 outputs[i].values);
868 }
869 break;
870 }
871 }
872
873 /* We need to add the position output manually if it's missing. */
874 if (!pos_args[0].out[0]) {
875 pos_args[0].enabled_channels = 0xf; /* writemask */
876 pos_args[0].valid_mask = 0; /* EXEC mask */
877 pos_args[0].done = 0; /* last export? */
878 pos_args[0].target = V_008DFC_SQ_EXP_POS;
879 pos_args[0].compr = 0; /* COMPR flag */
880 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
881 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
882 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
883 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
884 }
885
886 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
887 !shader->key.as_ngg;
888
889 /* Write the misc vector (point size, edgeflag, layer, viewport). */
890 if (shader->selector->info.writes_psize ||
891 pos_writes_edgeflag ||
892 shader->selector->info.writes_viewport_index ||
893 shader->selector->info.writes_layer) {
894 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
895 (pos_writes_edgeflag << 1) |
896 (shader->selector->info.writes_layer << 2);
897
898 pos_args[1].valid_mask = 0; /* EXEC mask */
899 pos_args[1].done = 0; /* last export? */
900 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
901 pos_args[1].compr = 0; /* COMPR flag */
902 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
903 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
904 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
905 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
906
907 if (shader->selector->info.writes_psize)
908 pos_args[1].out[0] = psize_value;
909
910 if (pos_writes_edgeflag) {
911 /* The output is a float, but the hw expects an integer
912 * with the first bit containing the edge flag. */
913 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
914 edgeflag_value,
915 ctx->i32, "");
916 edgeflag_value = ac_build_umin(&ctx->ac,
917 edgeflag_value,
918 ctx->i32_1);
919
920 /* The LLVM intrinsic expects a float. */
921 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
922 }
923
924 if (ctx->screen->info.chip_class >= GFX9) {
925 /* GFX9 has the layer in out.z[10:0] and the viewport
926 * index in out.z[19:16].
927 */
928 if (shader->selector->info.writes_layer)
929 pos_args[1].out[2] = layer_value;
930
931 if (shader->selector->info.writes_viewport_index) {
932 LLVMValueRef v = viewport_index_value;
933
934 v = ac_to_integer(&ctx->ac, v);
935 v = LLVMBuildShl(ctx->ac.builder, v,
936 LLVMConstInt(ctx->i32, 16, 0), "");
937 v = LLVMBuildOr(ctx->ac.builder, v,
938 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
939 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
940 pos_args[1].enabled_channels |= 1 << 2;
941 }
942 } else {
943 if (shader->selector->info.writes_layer)
944 pos_args[1].out[2] = layer_value;
945
946 if (shader->selector->info.writes_viewport_index) {
947 pos_args[1].out[3] = viewport_index_value;
948 pos_args[1].enabled_channels |= 1 << 3;
949 }
950 }
951 }
952
953 for (i = 0; i < 4; i++)
954 if (pos_args[i].out[0])
955 shader->info.nr_pos_exports++;
956
957 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
958 * Setting valid_mask=1 prevents it and has no other effect.
959 */
960 if (ctx->screen->info.family == CHIP_NAVI10 ||
961 ctx->screen->info.family == CHIP_NAVI12 ||
962 ctx->screen->info.family == CHIP_NAVI14)
963 pos_args[0].valid_mask = 1;
964
965 pos_idx = 0;
966 for (i = 0; i < 4; i++) {
967 if (!pos_args[i].out[0])
968 continue;
969
970 /* Specify the target we are exporting */
971 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
972
973 if (pos_idx == shader->info.nr_pos_exports)
974 /* Specify that this is the last export */
975 pos_args[i].done = 1;
976
977 ac_build_export(&ctx->ac, &pos_args[i]);
978 }
979
980 /* Build parameter exports. */
981 si_build_param_exports(ctx, outputs, noutput);
982 }
983
984 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
985 unsigned max_outputs,
986 LLVMValueRef *addrs)
987 {
988 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
989 struct si_shader_info *info = &ctx->shader->selector->info;
990 struct si_shader_output_values *outputs = NULL;
991 int i,j;
992
993 assert(!ctx->shader->is_gs_copy_shader);
994 assert(info->num_outputs <= max_outputs);
995
996 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
997
998 for (i = 0; i < info->num_outputs; i++) {
999 outputs[i].semantic_name = info->output_semantic_name[i];
1000 outputs[i].semantic_index = info->output_semantic_index[i];
1001
1002 for (j = 0; j < 4; j++) {
1003 outputs[i].values[j] =
1004 LLVMBuildLoad(ctx->ac.builder,
1005 addrs[4 * i + j],
1006 "");
1007 outputs[i].vertex_stream[j] =
1008 (info->output_streams[i] >> (2 * j)) & 3;
1009 }
1010 }
1011
1012 if (!ctx->screen->use_ngg_streamout &&
1013 ctx->shader->selector->so.num_outputs)
1014 si_llvm_emit_streamout(ctx, outputs, i, 0);
1015
1016 /* Export PrimitiveID. */
1017 if (ctx->shader->key.mono.u.vs_export_prim_id) {
1018 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
1019 outputs[i].semantic_index = 0;
1020 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
1021 for (j = 1; j < 4; j++)
1022 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
1023
1024 memset(outputs[i].vertex_stream, 0,
1025 sizeof(outputs[i].vertex_stream));
1026 i++;
1027 }
1028
1029 si_llvm_export_vs(ctx, outputs, i);
1030 FREE(outputs);
1031 }
1032
1033 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
1034 unsigned max_outputs,
1035 LLVMValueRef *addrs)
1036 {
1037 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1038 struct si_shader_info *info = &ctx->shader->selector->info;
1039 LLVMValueRef pos[4] = {};
1040
1041 assert(info->num_outputs <= max_outputs);
1042
1043 for (unsigned i = 0; i < info->num_outputs; i++) {
1044 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
1045 continue;
1046
1047 for (unsigned chan = 0; chan < 4; chan++)
1048 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
1049 break;
1050 }
1051 assert(pos[0] != NULL);
1052
1053 /* Return the position output. */
1054 LLVMValueRef ret = ctx->return_value;
1055 for (unsigned chan = 0; chan < 4; chan++)
1056 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
1057 ctx->return_value = ret;
1058 }
1059
1060 static void declare_streamout_params(struct si_shader_context *ctx,
1061 struct pipe_stream_output_info *so)
1062 {
1063 if (ctx->screen->use_ngg_streamout) {
1064 if (ctx->type == PIPE_SHADER_TESS_EVAL)
1065 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1066 return;
1067 }
1068
1069 /* Streamout SGPRs. */
1070 if (so->num_outputs) {
1071 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
1072 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
1073 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
1074 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1075 }
1076
1077 /* A streamout buffer offset is loaded if the stride is non-zero. */
1078 for (int i = 0; i < 4; i++) {
1079 if (!so->stride[i])
1080 continue;
1081
1082 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
1083 }
1084 }
1085
1086 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
1087 {
1088 switch (shader->selector->type) {
1089 case PIPE_SHADER_VERTEX:
1090 case PIPE_SHADER_TESS_EVAL:
1091 return shader->key.as_ngg ? 128 : 0;
1092
1093 case PIPE_SHADER_TESS_CTRL:
1094 /* Return this so that LLVM doesn't remove s_barrier
1095 * instructions on chips where we use s_barrier. */
1096 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
1097
1098 case PIPE_SHADER_GEOMETRY:
1099 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
1100
1101 case PIPE_SHADER_COMPUTE:
1102 break; /* see below */
1103
1104 default:
1105 return 0;
1106 }
1107
1108 const unsigned *properties = shader->selector->info.properties;
1109 unsigned max_work_group_size =
1110 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
1111 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
1112 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
1113
1114 if (!max_work_group_size) {
1115 /* This is a variable group size compute shader,
1116 * compile it for the maximum possible group size.
1117 */
1118 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
1119 }
1120 return max_work_group_size;
1121 }
1122
1123 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
1124 bool assign_params)
1125 {
1126 enum ac_arg_type const_shader_buf_type;
1127
1128 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
1129 ctx->shader->selector->info.shader_buffers_declared == 0)
1130 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
1131 else
1132 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
1133
1134 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
1135 assign_params ? &ctx->const_and_shader_buffers :
1136 &ctx->other_const_and_shader_buffers);
1137 }
1138
1139 static void declare_samplers_and_images(struct si_shader_context *ctx,
1140 bool assign_params)
1141 {
1142 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
1143 assign_params ? &ctx->samplers_and_images :
1144 &ctx->other_samplers_and_images);
1145 }
1146
1147 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
1148 bool assign_params)
1149 {
1150 declare_const_and_shader_buffers(ctx, assign_params);
1151 declare_samplers_and_images(ctx, assign_params);
1152 }
1153
1154 static void declare_global_desc_pointers(struct si_shader_context *ctx)
1155 {
1156 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
1157 &ctx->rw_buffers);
1158 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
1159 &ctx->bindless_samplers_and_images);
1160 }
1161
1162 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
1163 {
1164 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1165 if (!ctx->shader->is_gs_copy_shader) {
1166 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
1167 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
1168 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
1169 }
1170 }
1171
1172 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
1173 {
1174 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
1175
1176 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
1177 if (num_vbos_in_user_sgprs) {
1178 unsigned user_sgprs = ctx->args.num_sgprs_used;
1179
1180 if (si_is_merged_shader(ctx))
1181 user_sgprs -= 8;
1182 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
1183
1184 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
1185 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
1186 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1187
1188 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
1189 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
1190 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
1191 }
1192 }
1193
1194 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
1195 unsigned *num_prolog_vgprs)
1196 {
1197 struct si_shader *shader = ctx->shader;
1198
1199 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
1200 if (shader->key.as_ls) {
1201 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
1202 if (ctx->screen->info.chip_class >= GFX10) {
1203 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
1204 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1205 } else {
1206 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1207 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
1208 }
1209 } else if (ctx->screen->info.chip_class >= GFX10) {
1210 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
1211 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1212 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
1213 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1214 } else {
1215 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1216 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
1217 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
1218 }
1219
1220 if (!shader->is_gs_copy_shader) {
1221 /* Vertex load indices. */
1222 if (shader->selector->info.num_inputs) {
1223 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1224 &ctx->vertex_index0);
1225 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
1226 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
1227 }
1228 *num_prolog_vgprs += shader->selector->info.num_inputs;
1229 }
1230 }
1231
1232 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
1233 unsigned vs_blit_property)
1234 {
1235 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1236 &ctx->vs_blit_inputs); /* i16 x1, y1 */
1237 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
1238 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
1239
1240 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
1241 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
1242 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
1243 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
1244 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
1245 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
1246 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
1247 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
1248 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
1249 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
1250 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
1251 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
1252 }
1253 }
1254
1255 static void declare_tes_input_vgprs(struct si_shader_context *ctx)
1256 {
1257 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
1258 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
1259 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
1260 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
1261 }
1262
1263 enum {
1264 /* Convenient merged shader definitions. */
1265 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
1266 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
1267 };
1268
1269 void si_add_arg_checked(struct ac_shader_args *args,
1270 enum ac_arg_regfile file,
1271 unsigned registers, enum ac_arg_type type,
1272 struct ac_arg *arg,
1273 unsigned idx)
1274 {
1275 assert(args->arg_count == idx);
1276 ac_add_arg(args, file, registers, type, arg);
1277 }
1278
1279 void si_create_function(struct si_shader_context *ctx)
1280 {
1281 struct si_shader *shader = ctx->shader;
1282 LLVMTypeRef returns[AC_MAX_ARGS];
1283 unsigned i, num_return_sgprs;
1284 unsigned num_returns = 0;
1285 unsigned num_prolog_vgprs = 0;
1286 unsigned type = ctx->type;
1287 unsigned vs_blit_property =
1288 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
1289
1290 memset(&ctx->args, 0, sizeof(ctx->args));
1291
1292 /* Set MERGED shaders. */
1293 if (ctx->screen->info.chip_class >= GFX9) {
1294 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
1295 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
1296 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
1297 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
1298 }
1299
1300 switch (type) {
1301 case PIPE_SHADER_VERTEX:
1302 declare_global_desc_pointers(ctx);
1303
1304 if (vs_blit_property) {
1305 declare_vs_blit_inputs(ctx, vs_blit_property);
1306
1307 /* VGPRs */
1308 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1309 break;
1310 }
1311
1312 declare_per_stage_desc_pointers(ctx, true);
1313 declare_vs_specific_input_sgprs(ctx);
1314 if (!shader->is_gs_copy_shader)
1315 declare_vb_descriptor_input_sgprs(ctx);
1316
1317 if (shader->key.as_es) {
1318 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1319 &ctx->es2gs_offset);
1320 } else if (shader->key.as_ls) {
1321 /* no extra parameters */
1322 } else {
1323 /* The locations of the other parameters are assigned dynamically. */
1324 declare_streamout_params(ctx, &shader->selector->so);
1325 }
1326
1327 /* VGPRs */
1328 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1329
1330 /* Return values */
1331 if (shader->key.opt.vs_as_prim_discard_cs) {
1332 for (i = 0; i < 4; i++)
1333 returns[num_returns++] = ctx->f32; /* VGPRs */
1334 }
1335 break;
1336
1337 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
1338 declare_global_desc_pointers(ctx);
1339 declare_per_stage_desc_pointers(ctx, true);
1340 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1341 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
1342 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1343 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1344 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1345 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1346
1347 /* VGPRs */
1348 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
1349 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
1350
1351 /* param_tcs_offchip_offset and param_tcs_factor_offset are
1352 * placed after the user SGPRs.
1353 */
1354 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
1355 returns[num_returns++] = ctx->i32; /* SGPRs */
1356 for (i = 0; i < 11; i++)
1357 returns[num_returns++] = ctx->f32; /* VGPRs */
1358 break;
1359
1360 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
1361 /* Merged stages have 8 system SGPRs at the beginning. */
1362 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
1363 declare_per_stage_desc_pointers(ctx,
1364 ctx->type == PIPE_SHADER_TESS_CTRL);
1365 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1366 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
1367 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1368 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
1369 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1370 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1371
1372 declare_global_desc_pointers(ctx);
1373 declare_per_stage_desc_pointers(ctx,
1374 ctx->type == PIPE_SHADER_VERTEX);
1375 declare_vs_specific_input_sgprs(ctx);
1376
1377 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1378 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
1379 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1380 declare_vb_descriptor_input_sgprs(ctx);
1381
1382 /* VGPRs (first TCS, then VS) */
1383 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
1384 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
1385
1386 if (ctx->type == PIPE_SHADER_VERTEX) {
1387 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1388
1389 /* LS return values are inputs to the TCS main shader part. */
1390 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
1391 returns[num_returns++] = ctx->i32; /* SGPRs */
1392 for (i = 0; i < 2; i++)
1393 returns[num_returns++] = ctx->f32; /* VGPRs */
1394 } else {
1395 /* TCS return values are inputs to the TCS epilog.
1396 *
1397 * param_tcs_offchip_offset, param_tcs_factor_offset,
1398 * param_tcs_offchip_layout, and param_rw_buffers
1399 * should be passed to the epilog.
1400 */
1401 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
1402 returns[num_returns++] = ctx->i32; /* SGPRs */
1403 for (i = 0; i < 11; i++)
1404 returns[num_returns++] = ctx->f32; /* VGPRs */
1405 }
1406 break;
1407
1408 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
1409 /* Merged stages have 8 system SGPRs at the beginning. */
1410 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
1411 declare_per_stage_desc_pointers(ctx,
1412 ctx->type == PIPE_SHADER_GEOMETRY);
1413
1414 if (ctx->shader->key.as_ngg)
1415 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
1416 else
1417 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
1418
1419 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
1420 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1421 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
1422 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
1423 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
1424
1425 declare_global_desc_pointers(ctx);
1426 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
1427 declare_per_stage_desc_pointers(ctx,
1428 (ctx->type == PIPE_SHADER_VERTEX ||
1429 ctx->type == PIPE_SHADER_TESS_EVAL));
1430 }
1431
1432 if (ctx->type == PIPE_SHADER_VERTEX) {
1433 if (vs_blit_property)
1434 declare_vs_blit_inputs(ctx, vs_blit_property);
1435 else
1436 declare_vs_specific_input_sgprs(ctx);
1437 } else {
1438 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1439 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1440 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
1441 /* Declare as many input SGPRs as the VS has. */
1442 }
1443
1444 if (ctx->type == PIPE_SHADER_VERTEX)
1445 declare_vb_descriptor_input_sgprs(ctx);
1446
1447 /* VGPRs (first GS, then VS/TES) */
1448 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
1449 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
1450 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
1451 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
1452 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
1453
1454 if (ctx->type == PIPE_SHADER_VERTEX) {
1455 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1456 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
1457 declare_tes_input_vgprs(ctx);
1458 }
1459
1460 if (ctx->shader->key.as_es &&
1461 (ctx->type == PIPE_SHADER_VERTEX ||
1462 ctx->type == PIPE_SHADER_TESS_EVAL)) {
1463 unsigned num_user_sgprs;
1464
1465 if (ctx->type == PIPE_SHADER_VERTEX)
1466 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
1467 else
1468 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
1469
1470 /* ES return values are inputs to GS. */
1471 for (i = 0; i < 8 + num_user_sgprs; i++)
1472 returns[num_returns++] = ctx->i32; /* SGPRs */
1473 for (i = 0; i < 5; i++)
1474 returns[num_returns++] = ctx->f32; /* VGPRs */
1475 }
1476 break;
1477
1478 case PIPE_SHADER_TESS_EVAL:
1479 declare_global_desc_pointers(ctx);
1480 declare_per_stage_desc_pointers(ctx, true);
1481 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1482 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1483 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
1484
1485 if (shader->key.as_es) {
1486 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1487 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1488 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
1489 } else {
1490 declare_streamout_params(ctx, &shader->selector->so);
1491 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1492 }
1493
1494 /* VGPRs */
1495 declare_tes_input_vgprs(ctx);
1496 break;
1497
1498 case PIPE_SHADER_GEOMETRY:
1499 declare_global_desc_pointers(ctx);
1500 declare_per_stage_desc_pointers(ctx, true);
1501 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
1502 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
1503
1504 /* VGPRs */
1505 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
1506 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
1507 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
1508 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
1509 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
1510 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
1511 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
1512 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
1513 break;
1514
1515 case PIPE_SHADER_FRAGMENT:
1516 declare_global_desc_pointers(ctx);
1517 declare_per_stage_desc_pointers(ctx, true);
1518 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
1519 SI_PARAM_ALPHA_REF);
1520 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1521 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
1522
1523 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
1524 SI_PARAM_PERSP_SAMPLE);
1525 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1526 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
1527 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1528 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
1529 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
1530 NULL, SI_PARAM_PERSP_PULL_MODEL);
1531 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1532 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
1533 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1534 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
1535 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1536 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
1537 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
1538 NULL, SI_PARAM_LINE_STIPPLE_TEX);
1539 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1540 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
1541 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1542 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
1543 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1544 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
1545 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1546 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
1547 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
1548 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1549 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
1550 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
1551 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1552 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
1553 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1554 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
1555 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1556 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
1557
1558 /* Color inputs from the prolog. */
1559 if (shader->selector->info.colors_read) {
1560 unsigned num_color_elements =
1561 util_bitcount(shader->selector->info.colors_read);
1562
1563 for (i = 0; i < num_color_elements; i++)
1564 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
1565
1566 num_prolog_vgprs += num_color_elements;
1567 }
1568
1569 /* Outputs for the epilog. */
1570 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
1571 num_returns =
1572 num_return_sgprs +
1573 util_bitcount(shader->selector->info.colors_written) * 4 +
1574 shader->selector->info.writes_z +
1575 shader->selector->info.writes_stencil +
1576 shader->selector->info.writes_samplemask +
1577 1 /* SampleMaskIn */;
1578
1579 num_returns = MAX2(num_returns,
1580 num_return_sgprs +
1581 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
1582
1583 for (i = 0; i < num_return_sgprs; i++)
1584 returns[i] = ctx->i32;
1585 for (; i < num_returns; i++)
1586 returns[i] = ctx->f32;
1587 break;
1588
1589 case PIPE_SHADER_COMPUTE:
1590 declare_global_desc_pointers(ctx);
1591 declare_per_stage_desc_pointers(ctx, true);
1592 if (shader->selector->info.uses_grid_size)
1593 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
1594 &ctx->args.num_work_groups);
1595 if (shader->selector->info.uses_block_size &&
1596 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
1597 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
1598
1599 unsigned cs_user_data_dwords =
1600 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
1601 if (cs_user_data_dwords) {
1602 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
1603 &ctx->cs_user_data);
1604 }
1605
1606 /* Hardware SGPRs. */
1607 for (i = 0; i < 3; i++) {
1608 if (shader->selector->info.uses_block_id[i]) {
1609 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1610 &ctx->args.workgroup_ids[i]);
1611 }
1612 }
1613 if (shader->selector->info.uses_subgroup_info)
1614 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
1615
1616 /* Hardware VGPRs. */
1617 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
1618 &ctx->args.local_invocation_ids);
1619 break;
1620 default:
1621 assert(0 && "unimplemented shader");
1622 return;
1623 }
1624
1625 si_llvm_create_func(ctx, "main", returns, num_returns,
1626 si_get_max_workgroup_size(shader));
1627
1628 /* Reserve register locations for VGPR inputs the PS prolog may need. */
1629 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
1630 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
1631 "InitialPSInputAddr",
1632 S_0286D0_PERSP_SAMPLE_ENA(1) |
1633 S_0286D0_PERSP_CENTER_ENA(1) |
1634 S_0286D0_PERSP_CENTROID_ENA(1) |
1635 S_0286D0_LINEAR_SAMPLE_ENA(1) |
1636 S_0286D0_LINEAR_CENTER_ENA(1) |
1637 S_0286D0_LINEAR_CENTROID_ENA(1) |
1638 S_0286D0_FRONT_FACE_ENA(1) |
1639 S_0286D0_ANCILLARY_ENA(1) |
1640 S_0286D0_POS_FIXED_PT_ENA(1));
1641 }
1642
1643 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
1644 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
1645
1646 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
1647 shader->info.num_input_vgprs -= num_prolog_vgprs;
1648
1649 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
1650 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
1651 /* The LSHS size is not known until draw time, so we append it
1652 * at the end of whatever LDS use there may be in the rest of
1653 * the shader (currently none, unless LLVM decides to do its
1654 * own LDS-based lowering).
1655 */
1656 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
1657 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
1658 "__lds_end", AC_ADDR_SPACE_LDS);
1659 LLVMSetAlignment(ctx->ac.lds, 256);
1660 } else {
1661 ac_declare_lds_as_pointer(&ctx->ac);
1662 }
1663 }
1664
1665 /* Unlike radv, we override these arguments in the prolog, so to the
1666 * API shader they appear as normal arguments.
1667 */
1668 if (ctx->type == PIPE_SHADER_VERTEX) {
1669 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
1670 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
1671 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
1672 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
1673 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
1674 }
1675 }
1676
1677 /* For the UMR disassembler. */
1678 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
1679 #define DEBUGGER_NUM_MARKERS 5
1680
1681 static bool si_shader_binary_open(struct si_screen *screen,
1682 struct si_shader *shader,
1683 struct ac_rtld_binary *rtld)
1684 {
1685 const struct si_shader_selector *sel = shader->selector;
1686 const char *part_elfs[5];
1687 size_t part_sizes[5];
1688 unsigned num_parts = 0;
1689
1690 #define add_part(shader_or_part) \
1691 if (shader_or_part) { \
1692 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
1693 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
1694 num_parts++; \
1695 }
1696
1697 add_part(shader->prolog);
1698 add_part(shader->previous_stage);
1699 add_part(shader->prolog2);
1700 add_part(shader);
1701 add_part(shader->epilog);
1702
1703 #undef add_part
1704
1705 struct ac_rtld_symbol lds_symbols[2];
1706 unsigned num_lds_symbols = 0;
1707
1708 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
1709 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
1710 /* We add this symbol even on LLVM <= 8 to ensure that
1711 * shader->config.lds_size is set correctly below.
1712 */
1713 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1714 sym->name = "esgs_ring";
1715 sym->size = shader->gs_info.esgs_ring_size;
1716 sym->align = 64 * 1024;
1717 }
1718
1719 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
1720 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1721 sym->name = "ngg_emit";
1722 sym->size = shader->ngg.ngg_emit_size * 4;
1723 sym->align = 4;
1724 }
1725
1726 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
1727 .info = &screen->info,
1728 .options = {
1729 .halt_at_entry = screen->options.halt_shaders,
1730 },
1731 .shader_type = tgsi_processor_to_shader_stage(sel->type),
1732 .wave_size = si_get_shader_wave_size(shader),
1733 .num_parts = num_parts,
1734 .elf_ptrs = part_elfs,
1735 .elf_sizes = part_sizes,
1736 .num_shared_lds_symbols = num_lds_symbols,
1737 .shared_lds_symbols = lds_symbols });
1738
1739 if (rtld->lds_size > 0) {
1740 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
1741 shader->config.lds_size =
1742 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
1743 }
1744
1745 return ok;
1746 }
1747
1748 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
1749 {
1750 struct ac_rtld_binary rtld;
1751 si_shader_binary_open(screen, shader, &rtld);
1752 return rtld.exec_size;
1753 }
1754
1755 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
1756 {
1757 uint64_t *scratch_va = data;
1758
1759 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
1760 *value = (uint32_t)*scratch_va;
1761 return true;
1762 }
1763 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
1764 /* Enable scratch coalescing. */
1765 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
1766 S_008F04_SWIZZLE_ENABLE(1);
1767 return true;
1768 }
1769
1770 return false;
1771 }
1772
1773 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
1774 uint64_t scratch_va)
1775 {
1776 struct ac_rtld_binary binary;
1777 if (!si_shader_binary_open(sscreen, shader, &binary))
1778 return false;
1779
1780 si_resource_reference(&shader->bo, NULL);
1781 shader->bo = si_aligned_buffer_create(&sscreen->b,
1782 sscreen->info.cpdma_prefetch_writes_memory ?
1783 0 : SI_RESOURCE_FLAG_READ_ONLY,
1784 PIPE_USAGE_IMMUTABLE,
1785 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
1786 256);
1787 if (!shader->bo)
1788 return false;
1789
1790 /* Upload. */
1791 struct ac_rtld_upload_info u = {};
1792 u.binary = &binary;
1793 u.get_external_symbol = si_get_external_symbol;
1794 u.cb_data = &scratch_va;
1795 u.rx_va = shader->bo->gpu_address;
1796 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
1797 PIPE_TRANSFER_READ_WRITE |
1798 PIPE_TRANSFER_UNSYNCHRONIZED |
1799 RADEON_TRANSFER_TEMPORARY);
1800 if (!u.rx_ptr)
1801 return false;
1802
1803 bool ok = ac_rtld_upload(&u);
1804
1805 sscreen->ws->buffer_unmap(shader->bo->buf);
1806 ac_rtld_close(&binary);
1807
1808 return ok;
1809 }
1810
1811 static void si_shader_dump_disassembly(struct si_screen *screen,
1812 const struct si_shader_binary *binary,
1813 enum pipe_shader_type shader_type,
1814 unsigned wave_size,
1815 struct pipe_debug_callback *debug,
1816 const char *name, FILE *file)
1817 {
1818 struct ac_rtld_binary rtld_binary;
1819
1820 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
1821 .info = &screen->info,
1822 .shader_type = tgsi_processor_to_shader_stage(shader_type),
1823 .wave_size = wave_size,
1824 .num_parts = 1,
1825 .elf_ptrs = &binary->elf_buffer,
1826 .elf_sizes = &binary->elf_size }))
1827 return;
1828
1829 const char *disasm;
1830 size_t nbytes;
1831
1832 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
1833 goto out;
1834
1835 if (nbytes > INT_MAX)
1836 goto out;
1837
1838 if (debug && debug->debug_message) {
1839 /* Very long debug messages are cut off, so send the
1840 * disassembly one line at a time. This causes more
1841 * overhead, but on the plus side it simplifies
1842 * parsing of resulting logs.
1843 */
1844 pipe_debug_message(debug, SHADER_INFO,
1845 "Shader Disassembly Begin");
1846
1847 uint64_t line = 0;
1848 while (line < nbytes) {
1849 int count = nbytes - line;
1850 const char *nl = memchr(disasm + line, '\n', nbytes - line);
1851 if (nl)
1852 count = nl - (disasm + line);
1853
1854 if (count) {
1855 pipe_debug_message(debug, SHADER_INFO,
1856 "%.*s", count, disasm + line);
1857 }
1858
1859 line += count + 1;
1860 }
1861
1862 pipe_debug_message(debug, SHADER_INFO,
1863 "Shader Disassembly End");
1864 }
1865
1866 if (file) {
1867 fprintf(file, "Shader %s disassembly:\n", name);
1868 fprintf(file, "%*s", (int)nbytes, disasm);
1869 }
1870
1871 out:
1872 ac_rtld_close(&rtld_binary);
1873 }
1874
1875 static void si_calculate_max_simd_waves(struct si_shader *shader)
1876 {
1877 struct si_screen *sscreen = shader->selector->screen;
1878 struct ac_shader_config *conf = &shader->config;
1879 unsigned num_inputs = shader->selector->info.num_inputs;
1880 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
1881 unsigned lds_per_wave = 0;
1882 unsigned max_simd_waves;
1883
1884 max_simd_waves = sscreen->info.max_wave64_per_simd;
1885
1886 /* Compute LDS usage for PS. */
1887 switch (shader->selector->type) {
1888 case PIPE_SHADER_FRAGMENT:
1889 /* The minimum usage per wave is (num_inputs * 48). The maximum
1890 * usage is (num_inputs * 48 * 16).
1891 * We can get anything in between and it varies between waves.
1892 *
1893 * The 48 bytes per input for a single primitive is equal to
1894 * 4 bytes/component * 4 components/input * 3 points.
1895 *
1896 * Other stages don't know the size at compile time or don't
1897 * allocate LDS per wave, but instead they do it per thread group.
1898 */
1899 lds_per_wave = conf->lds_size * lds_increment +
1900 align(num_inputs * 48, lds_increment);
1901 break;
1902 case PIPE_SHADER_COMPUTE:
1903 if (shader->selector) {
1904 unsigned max_workgroup_size =
1905 si_get_max_workgroup_size(shader);
1906 lds_per_wave = (conf->lds_size * lds_increment) /
1907 DIV_ROUND_UP(max_workgroup_size,
1908 sscreen->compute_wave_size);
1909 }
1910 break;
1911 default:;
1912 }
1913
1914 /* Compute the per-SIMD wave counts. */
1915 if (conf->num_sgprs) {
1916 max_simd_waves =
1917 MIN2(max_simd_waves,
1918 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
1919 }
1920
1921 if (conf->num_vgprs) {
1922 /* Always print wave limits as Wave64, so that we can compare
1923 * Wave32 and Wave64 with shader-db fairly. */
1924 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
1925 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
1926 }
1927
1928 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
1929 * 16KB makes some SIMDs unoccupied).
1930 *
1931 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
1932 */
1933 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
1934 unsigned max_lds_per_simd = max_lds_size / 4;
1935 if (lds_per_wave)
1936 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1937
1938 shader->info.max_simd_waves = max_simd_waves;
1939 }
1940
1941 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
1942 struct si_shader *shader,
1943 struct pipe_debug_callback *debug)
1944 {
1945 const struct ac_shader_config *conf = &shader->config;
1946
1947 if (screen->options.debug_disassembly)
1948 si_shader_dump_disassembly(screen, &shader->binary,
1949 shader->selector->type,
1950 si_get_shader_wave_size(shader),
1951 debug, "main", NULL);
1952
1953 pipe_debug_message(debug, SHADER_INFO,
1954 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1955 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1956 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1957 conf->num_sgprs, conf->num_vgprs,
1958 si_get_shader_binary_size(screen, shader),
1959 conf->lds_size, conf->scratch_bytes_per_wave,
1960 shader->info.max_simd_waves, conf->spilled_sgprs,
1961 conf->spilled_vgprs, shader->info.private_mem_vgprs);
1962 }
1963
1964 static void si_shader_dump_stats(struct si_screen *sscreen,
1965 struct si_shader *shader,
1966 FILE *file,
1967 bool check_debug_option)
1968 {
1969 const struct ac_shader_config *conf = &shader->config;
1970
1971 if (!check_debug_option ||
1972 si_can_dump_shader(sscreen, shader->selector->type)) {
1973 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
1974 fprintf(file, "*** SHADER CONFIG ***\n"
1975 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1976 "SPI_PS_INPUT_ENA = 0x%04x\n",
1977 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1978 }
1979
1980 fprintf(file, "*** SHADER STATS ***\n"
1981 "SGPRS: %d\n"
1982 "VGPRS: %d\n"
1983 "Spilled SGPRs: %d\n"
1984 "Spilled VGPRs: %d\n"
1985 "Private memory VGPRs: %d\n"
1986 "Code Size: %d bytes\n"
1987 "LDS: %d blocks\n"
1988 "Scratch: %d bytes per wave\n"
1989 "Max Waves: %d\n"
1990 "********************\n\n\n",
1991 conf->num_sgprs, conf->num_vgprs,
1992 conf->spilled_sgprs, conf->spilled_vgprs,
1993 shader->info.private_mem_vgprs,
1994 si_get_shader_binary_size(sscreen, shader),
1995 conf->lds_size, conf->scratch_bytes_per_wave,
1996 shader->info.max_simd_waves);
1997 }
1998 }
1999
2000 const char *si_get_shader_name(const struct si_shader *shader)
2001 {
2002 switch (shader->selector->type) {
2003 case PIPE_SHADER_VERTEX:
2004 if (shader->key.as_es)
2005 return "Vertex Shader as ES";
2006 else if (shader->key.as_ls)
2007 return "Vertex Shader as LS";
2008 else if (shader->key.opt.vs_as_prim_discard_cs)
2009 return "Vertex Shader as Primitive Discard CS";
2010 else if (shader->key.as_ngg)
2011 return "Vertex Shader as ESGS";
2012 else
2013 return "Vertex Shader as VS";
2014 case PIPE_SHADER_TESS_CTRL:
2015 return "Tessellation Control Shader";
2016 case PIPE_SHADER_TESS_EVAL:
2017 if (shader->key.as_es)
2018 return "Tessellation Evaluation Shader as ES";
2019 else if (shader->key.as_ngg)
2020 return "Tessellation Evaluation Shader as ESGS";
2021 else
2022 return "Tessellation Evaluation Shader as VS";
2023 case PIPE_SHADER_GEOMETRY:
2024 if (shader->is_gs_copy_shader)
2025 return "GS Copy Shader as VS";
2026 else
2027 return "Geometry Shader";
2028 case PIPE_SHADER_FRAGMENT:
2029 return "Pixel Shader";
2030 case PIPE_SHADER_COMPUTE:
2031 return "Compute Shader";
2032 default:
2033 return "Unknown Shader";
2034 }
2035 }
2036
2037 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
2038 struct pipe_debug_callback *debug,
2039 FILE *file, bool check_debug_option)
2040 {
2041 enum pipe_shader_type shader_type = shader->selector->type;
2042
2043 if (!check_debug_option ||
2044 si_can_dump_shader(sscreen, shader_type))
2045 si_dump_shader_key(shader, file);
2046
2047 if (!check_debug_option && shader->binary.llvm_ir_string) {
2048 if (shader->previous_stage &&
2049 shader->previous_stage->binary.llvm_ir_string) {
2050 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
2051 si_get_shader_name(shader));
2052 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
2053 }
2054
2055 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
2056 si_get_shader_name(shader));
2057 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
2058 }
2059
2060 if (!check_debug_option ||
2061 (si_can_dump_shader(sscreen, shader_type) &&
2062 !(sscreen->debug_flags & DBG(NO_ASM)))) {
2063 unsigned wave_size = si_get_shader_wave_size(shader);
2064
2065 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
2066
2067 if (shader->prolog)
2068 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
2069 shader_type, wave_size, debug, "prolog", file);
2070 if (shader->previous_stage)
2071 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
2072 shader_type, wave_size, debug, "previous stage", file);
2073 if (shader->prolog2)
2074 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
2075 shader_type, wave_size, debug, "prolog2", file);
2076
2077 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
2078 wave_size, debug, "main", file);
2079
2080 if (shader->epilog)
2081 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
2082 shader_type, wave_size, debug, "epilog", file);
2083 fprintf(file, "\n");
2084 }
2085
2086 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
2087 }
2088
2089 static void si_dump_shader_key_vs(const struct si_shader_key *key,
2090 const struct si_vs_prolog_bits *prolog,
2091 const char *prefix, FILE *f)
2092 {
2093 fprintf(f, " %s.instance_divisor_is_one = %u\n",
2094 prefix, prolog->instance_divisor_is_one);
2095 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
2096 prefix, prolog->instance_divisor_is_fetched);
2097 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
2098 prefix, prolog->unpack_instance_id_from_vertex_id);
2099 fprintf(f, " %s.ls_vgpr_fix = %u\n",
2100 prefix, prolog->ls_vgpr_fix);
2101
2102 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
2103 fprintf(f, " mono.vs.fix_fetch = {");
2104 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
2105 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
2106 if (i)
2107 fprintf(f, ", ");
2108 if (!fix.bits)
2109 fprintf(f, "0");
2110 else
2111 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
2112 fix.u.num_channels_m1, fix.u.format);
2113 }
2114 fprintf(f, "}\n");
2115 }
2116
2117 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
2118 {
2119 const struct si_shader_key *key = &shader->key;
2120 enum pipe_shader_type shader_type = shader->selector->type;
2121
2122 fprintf(f, "SHADER KEY\n");
2123
2124 switch (shader_type) {
2125 case PIPE_SHADER_VERTEX:
2126 si_dump_shader_key_vs(key, &key->part.vs.prolog,
2127 "part.vs.prolog", f);
2128 fprintf(f, " as_es = %u\n", key->as_es);
2129 fprintf(f, " as_ls = %u\n", key->as_ls);
2130 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2131 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
2132 key->mono.u.vs_export_prim_id);
2133 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
2134 key->opt.vs_as_prim_discard_cs);
2135 fprintf(f, " opt.cs_prim_type = %s\n",
2136 tgsi_primitive_names[key->opt.cs_prim_type]);
2137 fprintf(f, " opt.cs_indexed = %u\n",
2138 key->opt.cs_indexed);
2139 fprintf(f, " opt.cs_instancing = %u\n",
2140 key->opt.cs_instancing);
2141 fprintf(f, " opt.cs_primitive_restart = %u\n",
2142 key->opt.cs_primitive_restart);
2143 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
2144 key->opt.cs_provoking_vertex_first);
2145 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
2146 key->opt.cs_need_correct_orientation);
2147 fprintf(f, " opt.cs_cull_front = %u\n",
2148 key->opt.cs_cull_front);
2149 fprintf(f, " opt.cs_cull_back = %u\n",
2150 key->opt.cs_cull_back);
2151 fprintf(f, " opt.cs_cull_z = %u\n",
2152 key->opt.cs_cull_z);
2153 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
2154 key->opt.cs_halfz_clip_space);
2155 break;
2156
2157 case PIPE_SHADER_TESS_CTRL:
2158 if (shader->selector->screen->info.chip_class >= GFX9) {
2159 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
2160 "part.tcs.ls_prolog", f);
2161 }
2162 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
2163 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
2164 break;
2165
2166 case PIPE_SHADER_TESS_EVAL:
2167 fprintf(f, " as_es = %u\n", key->as_es);
2168 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2169 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
2170 key->mono.u.vs_export_prim_id);
2171 break;
2172
2173 case PIPE_SHADER_GEOMETRY:
2174 if (shader->is_gs_copy_shader)
2175 break;
2176
2177 if (shader->selector->screen->info.chip_class >= GFX9 &&
2178 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
2179 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
2180 "part.gs.vs_prolog", f);
2181 }
2182 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
2183 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
2184 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2185 break;
2186
2187 case PIPE_SHADER_COMPUTE:
2188 break;
2189
2190 case PIPE_SHADER_FRAGMENT:
2191 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
2192 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
2193 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
2194 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
2195 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
2196 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
2197 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
2198 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
2199 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
2200 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
2201 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
2202 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
2203 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
2204 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
2205 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
2206 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
2207 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
2208 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
2209 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
2210 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
2211 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
2212 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
2213 break;
2214
2215 default:
2216 assert(0);
2217 }
2218
2219 if ((shader_type == PIPE_SHADER_GEOMETRY ||
2220 shader_type == PIPE_SHADER_TESS_EVAL ||
2221 shader_type == PIPE_SHADER_VERTEX) &&
2222 !key->as_es && !key->as_ls) {
2223 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
2224 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
2225 }
2226 }
2227
2228 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
2229 {
2230 struct si_shader *shader = ctx->shader;
2231 struct si_shader_info *info = &shader->selector->info;
2232
2233 if ((ctx->type != PIPE_SHADER_VERTEX &&
2234 ctx->type != PIPE_SHADER_TESS_EVAL) ||
2235 shader->key.as_ls ||
2236 shader->key.as_es)
2237 return;
2238
2239 ac_optimize_vs_outputs(&ctx->ac,
2240 ctx->main_fn,
2241 shader->info.vs_output_param_offset,
2242 info->num_outputs,
2243 &shader->info.nr_param_exports);
2244 }
2245
2246 static void si_init_exec_from_input(struct si_shader_context *ctx,
2247 struct ac_arg param, unsigned bitoffset)
2248 {
2249 LLVMValueRef args[] = {
2250 ac_get_arg(&ctx->ac, param),
2251 LLVMConstInt(ctx->i32, bitoffset, 0),
2252 };
2253 ac_build_intrinsic(&ctx->ac,
2254 "llvm.amdgcn.init.exec.from.input",
2255 ctx->voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
2256 }
2257
2258 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
2259 const struct si_vs_prolog_bits *key)
2260 {
2261 /* VGPR initialization fixup for Vega10 and Raven is always done in the
2262 * VS prolog. */
2263 return sel->vs_needs_prolog ||
2264 key->ls_vgpr_fix ||
2265 key->unpack_instance_id_from_vertex_id;
2266 }
2267
2268 static bool si_build_main_function(struct si_shader_context *ctx,
2269 struct nir_shader *nir, bool free_nir)
2270 {
2271 struct si_shader *shader = ctx->shader;
2272 struct si_shader_selector *sel = shader->selector;
2273
2274 si_llvm_init_resource_callbacks(ctx);
2275
2276 switch (ctx->type) {
2277 case PIPE_SHADER_VERTEX:
2278 if (shader->key.as_ls)
2279 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
2280 else if (shader->key.as_es)
2281 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
2282 else if (shader->key.opt.vs_as_prim_discard_cs)
2283 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
2284 else if (shader->key.as_ngg)
2285 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
2286 else
2287 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
2288 ctx->abi.load_base_vertex = get_base_vertex;
2289 break;
2290 case PIPE_SHADER_TESS_CTRL:
2291 si_llvm_init_tcs_callbacks(ctx);
2292 break;
2293 case PIPE_SHADER_TESS_EVAL:
2294 si_llvm_init_tes_callbacks(ctx);
2295
2296 if (shader->key.as_es)
2297 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
2298 else if (shader->key.as_ngg)
2299 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
2300 else
2301 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
2302 break;
2303 case PIPE_SHADER_GEOMETRY:
2304 si_llvm_init_gs_callbacks(ctx);
2305 break;
2306 case PIPE_SHADER_FRAGMENT:
2307 si_llvm_init_ps_callbacks(ctx);
2308 break;
2309 case PIPE_SHADER_COMPUTE:
2310 ctx->abi.load_local_group_size = get_block_size;
2311 break;
2312 default:
2313 assert(!"Unsupported shader type");
2314 return false;
2315 }
2316
2317 si_create_function(ctx);
2318
2319 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
2320 si_preload_esgs_ring(ctx);
2321
2322 if (ctx->type == PIPE_SHADER_GEOMETRY)
2323 si_preload_gs_rings(ctx);
2324 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
2325 si_llvm_preload_tes_rings(ctx);
2326
2327 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
2328 sel->info.tessfactors_are_def_in_all_invocs) {
2329 for (unsigned i = 0; i < 6; i++) {
2330 ctx->invoc0_tess_factors[i] =
2331 ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
2332 }
2333 }
2334
2335 if (ctx->type == PIPE_SHADER_GEOMETRY) {
2336 for (unsigned i = 0; i < 4; i++) {
2337 ctx->gs_next_vertex[i] =
2338 ac_build_alloca(&ctx->ac, ctx->i32, "");
2339 }
2340 if (shader->key.as_ngg) {
2341 for (unsigned i = 0; i < 4; ++i) {
2342 ctx->gs_curprim_verts[i] =
2343 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
2344 ctx->gs_generated_prims[i] =
2345 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
2346 }
2347
2348 unsigned scratch_size = 8;
2349 if (sel->so.num_outputs)
2350 scratch_size = 44;
2351
2352 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
2353 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2354 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
2355 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
2356 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
2357
2358 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2359 LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
2360 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
2361 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
2362 }
2363 }
2364
2365 if (ctx->type != PIPE_SHADER_GEOMETRY &&
2366 (shader->key.as_ngg && !shader->key.as_es)) {
2367 /* Unconditionally declare scratch space base for streamout and
2368 * vertex compaction. Whether space is actually allocated is
2369 * determined during linking / PM4 creation.
2370 *
2371 * Add an extra dword per vertex to ensure an odd stride, which
2372 * avoids bank conflicts for SoA accesses.
2373 */
2374 if (!gfx10_is_ngg_passthrough(shader))
2375 si_llvm_declare_esgs_ring(ctx);
2376
2377 /* This is really only needed when streamout and / or vertex
2378 * compaction is enabled.
2379 */
2380 if (sel->so.num_outputs && !ctx->gs_ngg_scratch) {
2381 LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
2382 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2383 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
2384 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
2385 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
2386 }
2387 }
2388
2389 /* For GFX9 merged shaders:
2390 * - Set EXEC for the first shader. If the prolog is present, set
2391 * EXEC there instead.
2392 * - Add a barrier before the second shader.
2393 * - In the second shader, reset EXEC to ~0 and wrap the main part in
2394 * an if-statement. This is required for correctness in geometry
2395 * shaders, to ensure that empty GS waves do not send GS_EMIT and
2396 * GS_CUT messages.
2397 *
2398 * For monolithic merged shaders, the first shader is wrapped in an
2399 * if-block together with its prolog in si_build_wrapper_function.
2400 *
2401 * NGG vertex and tess eval shaders running as the last
2402 * vertex/geometry stage handle execution explicitly using
2403 * if-statements.
2404 */
2405 if (ctx->screen->info.chip_class >= GFX9) {
2406 if (!shader->is_monolithic &&
2407 (shader->key.as_es || shader->key.as_ls) &&
2408 (ctx->type == PIPE_SHADER_TESS_EVAL ||
2409 (ctx->type == PIPE_SHADER_VERTEX &&
2410 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
2411 si_init_exec_from_input(ctx,
2412 ctx->merged_wave_info, 0);
2413 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
2414 ctx->type == PIPE_SHADER_GEOMETRY ||
2415 (shader->key.as_ngg && !shader->key.as_es)) {
2416 LLVMValueRef thread_enabled;
2417 bool nested_barrier;
2418
2419 if (!shader->is_monolithic ||
2420 (ctx->type == PIPE_SHADER_TESS_EVAL &&
2421 (shader->key.as_ngg && !shader->key.as_es)))
2422 ac_init_exec_full_mask(&ctx->ac);
2423
2424 if ((ctx->type == PIPE_SHADER_VERTEX ||
2425 ctx->type == PIPE_SHADER_TESS_EVAL) &&
2426 shader->key.as_ngg && !shader->key.as_es) {
2427 gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
2428
2429 /* Build the primitive export at the beginning
2430 * of the shader if possible.
2431 */
2432 if (gfx10_ngg_export_prim_early(shader))
2433 gfx10_ngg_build_export_prim(ctx, NULL);
2434 }
2435
2436 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
2437 ctx->type == PIPE_SHADER_GEOMETRY) {
2438 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
2439 gfx10_ngg_gs_emit_prologue(ctx);
2440 nested_barrier = false;
2441 } else {
2442 nested_barrier = true;
2443 }
2444
2445 thread_enabled = si_is_gs_thread(ctx);
2446 } else {
2447 thread_enabled = si_is_es_thread(ctx);
2448 nested_barrier = false;
2449 }
2450
2451 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
2452 ctx->merged_wrap_if_label = 11500;
2453 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
2454
2455 if (nested_barrier) {
2456 /* Execute a barrier before the second shader in
2457 * a merged shader.
2458 *
2459 * Execute the barrier inside the conditional block,
2460 * so that empty waves can jump directly to s_endpgm,
2461 * which will also signal the barrier.
2462 *
2463 * This is possible in gfx9, because an empty wave
2464 * for the second shader does not participate in
2465 * the epilogue. With NGG, empty waves may still
2466 * be required to export data (e.g. GS output vertices),
2467 * so we cannot let them exit early.
2468 *
2469 * If the shader is TCS and the TCS epilog is present
2470 * and contains a barrier, it will wait there and then
2471 * reach s_endpgm.
2472 */
2473 si_llvm_emit_barrier(ctx);
2474 }
2475 }
2476 }
2477
2478 if (sel->force_correct_derivs_after_kill) {
2479 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
2480 /* true = don't kill. */
2481 LLVMBuildStore(ctx->ac.builder, ctx->i1true,
2482 ctx->postponed_kill);
2483 }
2484
2485 bool success = si_nir_build_llvm(ctx, nir);
2486 if (free_nir)
2487 ralloc_free(nir);
2488 if (!success) {
2489 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
2490 return false;
2491 }
2492
2493 si_llvm_build_ret(ctx, ctx->return_value);
2494 return true;
2495 }
2496
2497 /**
2498 * Compute the VS prolog key, which contains all the information needed to
2499 * build the VS prolog function, and set shader->info bits where needed.
2500 *
2501 * \param info Shader info of the vertex shader.
2502 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
2503 * \param prolog_key Key of the VS prolog
2504 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
2505 * \param key Output shader part key.
2506 */
2507 static void si_get_vs_prolog_key(const struct si_shader_info *info,
2508 unsigned num_input_sgprs,
2509 const struct si_vs_prolog_bits *prolog_key,
2510 struct si_shader *shader_out,
2511 union si_shader_part_key *key)
2512 {
2513 memset(key, 0, sizeof(*key));
2514 key->vs_prolog.states = *prolog_key;
2515 key->vs_prolog.num_input_sgprs = num_input_sgprs;
2516 key->vs_prolog.num_inputs = info->num_inputs;
2517 key->vs_prolog.as_ls = shader_out->key.as_ls;
2518 key->vs_prolog.as_es = shader_out->key.as_es;
2519 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
2520
2521 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
2522 key->vs_prolog.as_ls = 1;
2523 key->vs_prolog.num_merged_next_stage_vgprs = 2;
2524 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
2525 key->vs_prolog.as_es = 1;
2526 key->vs_prolog.num_merged_next_stage_vgprs = 5;
2527 } else if (shader_out->key.as_ngg) {
2528 key->vs_prolog.num_merged_next_stage_vgprs = 5;
2529 }
2530
2531 /* Enable loading the InstanceID VGPR. */
2532 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
2533
2534 if ((key->vs_prolog.states.instance_divisor_is_one |
2535 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
2536 shader_out->info.uses_instanceid = true;
2537 }
2538
2539 /**
2540 * Given a list of shader part functions, build a wrapper function that
2541 * runs them in sequence to form a monolithic shader.
2542 */
2543 void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
2544 unsigned num_parts, unsigned main_part,
2545 unsigned next_shader_first_part)
2546 {
2547 LLVMBuilderRef builder = ctx->ac.builder;
2548 /* PS epilog has one arg per color component; gfx9 merged shader
2549 * prologs need to forward 40 SGPRs.
2550 */
2551 LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
2552 LLVMTypeRef function_type;
2553 unsigned num_first_params;
2554 unsigned num_out, initial_num_out;
2555 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
2556 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
2557 unsigned num_sgprs, num_vgprs;
2558 unsigned gprs;
2559
2560 memset(&ctx->args, 0, sizeof(ctx->args));
2561
2562 for (unsigned i = 0; i < num_parts; ++i) {
2563 ac_add_function_attr(ctx->ac.context, parts[i], -1,
2564 AC_FUNC_ATTR_ALWAYSINLINE);
2565 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
2566 }
2567
2568 /* The parameters of the wrapper function correspond to those of the
2569 * first part in terms of SGPRs and VGPRs, but we use the types of the
2570 * main part to get the right types. This is relevant for the
2571 * dereferenceable attribute on descriptor table pointers.
2572 */
2573 num_sgprs = 0;
2574 num_vgprs = 0;
2575
2576 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
2577 num_first_params = LLVMCountParamTypes(function_type);
2578
2579 for (unsigned i = 0; i < num_first_params; ++i) {
2580 LLVMValueRef param = LLVMGetParam(parts[0], i);
2581
2582 if (ac_is_sgpr_param(param)) {
2583 assert(num_vgprs == 0);
2584 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
2585 } else {
2586 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
2587 }
2588 }
2589
2590 gprs = 0;
2591 while (gprs < num_sgprs + num_vgprs) {
2592 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
2593 LLVMTypeRef type = LLVMTypeOf(param);
2594 unsigned size = ac_get_type_size(type) / 4;
2595
2596 /* This is going to get casted anyways, so we don't have to
2597 * have the exact same type. But we do have to preserve the
2598 * pointer-ness so that LLVM knows about it.
2599 */
2600 enum ac_arg_type arg_type = AC_ARG_INT;
2601 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
2602 type = LLVMGetElementType(type);
2603
2604 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
2605 if (LLVMGetVectorSize(type) == 4)
2606 arg_type = AC_ARG_CONST_DESC_PTR;
2607 else if (LLVMGetVectorSize(type) == 8)
2608 arg_type = AC_ARG_CONST_IMAGE_PTR;
2609 else
2610 assert(0);
2611 } else if (type == ctx->f32) {
2612 arg_type = AC_ARG_CONST_FLOAT_PTR;
2613 } else {
2614 assert(0);
2615 }
2616 }
2617
2618 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR,
2619 size, arg_type, NULL);
2620
2621 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
2622 assert(gprs + size <= num_sgprs + num_vgprs &&
2623 (gprs >= num_sgprs || gprs + size <= num_sgprs));
2624
2625 gprs += size;
2626 }
2627
2628 /* Prepare the return type. */
2629 unsigned num_returns = 0;
2630 LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
2631
2632 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
2633 return_type = LLVMGetReturnType(last_func_type);
2634
2635 switch (LLVMGetTypeKind(return_type)) {
2636 case LLVMStructTypeKind:
2637 num_returns = LLVMCountStructElementTypes(return_type);
2638 assert(num_returns <= ARRAY_SIZE(returns));
2639 LLVMGetStructElementTypes(return_type, returns);
2640 break;
2641 case LLVMVoidTypeKind:
2642 break;
2643 default:
2644 unreachable("unexpected type");
2645 }
2646
2647 si_llvm_create_func(ctx, "wrapper", returns, num_returns,
2648 si_get_max_workgroup_size(ctx->shader));
2649
2650 if (si_is_merged_shader(ctx))
2651 ac_init_exec_full_mask(&ctx->ac);
2652
2653 /* Record the arguments of the function as if they were an output of
2654 * a previous part.
2655 */
2656 num_out = 0;
2657 num_out_sgpr = 0;
2658
2659 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
2660 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
2661 LLVMTypeRef param_type = LLVMTypeOf(param);
2662 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->i32 : ctx->f32;
2663 unsigned size = ac_get_type_size(param_type) / 4;
2664
2665 if (size == 1) {
2666 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2667 param = LLVMBuildPtrToInt(builder, param, ctx->i32, "");
2668 param_type = ctx->i32;
2669 }
2670
2671 if (param_type != out_type)
2672 param = LLVMBuildBitCast(builder, param, out_type, "");
2673 out[num_out++] = param;
2674 } else {
2675 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
2676
2677 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2678 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
2679 param_type = ctx->i64;
2680 }
2681
2682 if (param_type != vector_type)
2683 param = LLVMBuildBitCast(builder, param, vector_type, "");
2684
2685 for (unsigned j = 0; j < size; ++j)
2686 out[num_out++] = LLVMBuildExtractElement(
2687 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
2688 }
2689
2690 if (ctx->args.args[i].file == AC_ARG_SGPR)
2691 num_out_sgpr = num_out;
2692 }
2693
2694 memcpy(initial, out, sizeof(out));
2695 initial_num_out = num_out;
2696 initial_num_out_sgpr = num_out_sgpr;
2697
2698 /* Now chain the parts. */
2699 LLVMValueRef ret = NULL;
2700 for (unsigned part = 0; part < num_parts; ++part) {
2701 LLVMValueRef in[AC_MAX_ARGS];
2702 LLVMTypeRef ret_type;
2703 unsigned out_idx = 0;
2704 unsigned num_params = LLVMCountParams(parts[part]);
2705
2706 /* Merged shaders are executed conditionally depending
2707 * on the number of enabled threads passed in the input SGPRs. */
2708 if (is_multi_part_shader(ctx) && part == 0) {
2709 LLVMValueRef ena, count = initial[3];
2710
2711 count = LLVMBuildAnd(builder, count,
2712 LLVMConstInt(ctx->i32, 0x7f, 0), "");
2713 ena = LLVMBuildICmp(builder, LLVMIntULT,
2714 ac_get_thread_id(&ctx->ac), count, "");
2715 ac_build_ifcc(&ctx->ac, ena, 6506);
2716 }
2717
2718 /* Derive arguments for the next part from outputs of the
2719 * previous one.
2720 */
2721 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
2722 LLVMValueRef param;
2723 LLVMTypeRef param_type;
2724 bool is_sgpr;
2725 unsigned param_size;
2726 LLVMValueRef arg = NULL;
2727
2728 param = LLVMGetParam(parts[part], param_idx);
2729 param_type = LLVMTypeOf(param);
2730 param_size = ac_get_type_size(param_type) / 4;
2731 is_sgpr = ac_is_sgpr_param(param);
2732
2733 if (is_sgpr) {
2734 ac_add_function_attr(ctx->ac.context, parts[part],
2735 param_idx + 1, AC_FUNC_ATTR_INREG);
2736 } else if (out_idx < num_out_sgpr) {
2737 /* Skip returned SGPRs the current part doesn't
2738 * declare on the input. */
2739 out_idx = num_out_sgpr;
2740 }
2741
2742 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
2743
2744 if (param_size == 1)
2745 arg = out[out_idx];
2746 else
2747 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
2748
2749 if (LLVMTypeOf(arg) != param_type) {
2750 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2751 if (LLVMGetPointerAddressSpace(param_type) ==
2752 AC_ADDR_SPACE_CONST_32BIT) {
2753 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
2754 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
2755 } else {
2756 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
2757 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
2758 }
2759 } else {
2760 arg = LLVMBuildBitCast(builder, arg, param_type, "");
2761 }
2762 }
2763
2764 in[param_idx] = arg;
2765 out_idx += param_size;
2766 }
2767
2768 ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
2769
2770 if (is_multi_part_shader(ctx) &&
2771 part + 1 == next_shader_first_part) {
2772 ac_build_endif(&ctx->ac, 6506);
2773
2774 /* The second half of the merged shader should use
2775 * the inputs from the toplevel (wrapper) function,
2776 * not the return value from the last call.
2777 *
2778 * That's because the last call was executed condi-
2779 * tionally, so we can't consume it in the main
2780 * block.
2781 */
2782 memcpy(out, initial, sizeof(initial));
2783 num_out = initial_num_out;
2784 num_out_sgpr = initial_num_out_sgpr;
2785 continue;
2786 }
2787
2788 /* Extract the returned GPRs. */
2789 ret_type = LLVMTypeOf(ret);
2790 num_out = 0;
2791 num_out_sgpr = 0;
2792
2793 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
2794 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
2795
2796 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
2797
2798 for (unsigned i = 0; i < ret_size; ++i) {
2799 LLVMValueRef val =
2800 LLVMBuildExtractValue(builder, ret, i, "");
2801
2802 assert(num_out < ARRAY_SIZE(out));
2803 out[num_out++] = val;
2804
2805 if (LLVMTypeOf(val) == ctx->i32) {
2806 assert(num_out_sgpr + 1 == num_out);
2807 num_out_sgpr = num_out;
2808 }
2809 }
2810 }
2811 }
2812
2813 /* Return the value from the last part. */
2814 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
2815 LLVMBuildRetVoid(builder);
2816 else
2817 LLVMBuildRet(builder, ret);
2818 }
2819
2820 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
2821 struct si_shader_selector *sel)
2822 {
2823 if (!compiler->low_opt_passes)
2824 return false;
2825
2826 /* Assume a slow CPU. */
2827 assert(!sel->screen->info.has_dedicated_vram &&
2828 sel->screen->info.chip_class <= GFX8);
2829
2830 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
2831 * buffer stores. */
2832 return sel->type == PIPE_SHADER_COMPUTE &&
2833 sel->info.num_memory_instructions > 1000;
2834 }
2835
2836 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
2837 bool *free_nir)
2838 {
2839 *free_nir = false;
2840
2841 if (sel->nir) {
2842 return sel->nir;
2843 } else if (sel->nir_binary) {
2844 struct pipe_screen *screen = &sel->screen->b;
2845 const void *options =
2846 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
2847 sel->type);
2848
2849 struct blob_reader blob_reader;
2850 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
2851 *free_nir = true;
2852 return nir_deserialize(NULL, options, &blob_reader);
2853 }
2854 return NULL;
2855 }
2856
2857 int si_compile_shader(struct si_screen *sscreen,
2858 struct ac_llvm_compiler *compiler,
2859 struct si_shader *shader,
2860 struct pipe_debug_callback *debug)
2861 {
2862 struct si_shader_selector *sel = shader->selector;
2863 struct si_shader_context ctx;
2864 bool free_nir;
2865 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
2866 int r = -1;
2867
2868 /* Dump NIR before doing NIR->LLVM conversion in case the
2869 * conversion fails. */
2870 if (si_can_dump_shader(sscreen, sel->type) &&
2871 !(sscreen->debug_flags & DBG(NO_NIR))) {
2872 nir_print_shader(nir, stderr);
2873 si_dump_streamout(&sel->so);
2874 }
2875
2876 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
2877 si_llvm_context_set_ir(&ctx, shader);
2878
2879 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
2880 sizeof(shader->info.vs_output_param_offset));
2881
2882 shader->info.uses_instanceid = sel->info.uses_instanceid;
2883
2884 if (!si_build_main_function(&ctx, nir, free_nir)) {
2885 si_llvm_dispose(&ctx);
2886 return -1;
2887 }
2888
2889 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
2890 LLVMValueRef parts[2];
2891 bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog);
2892
2893 parts[1] = ctx.main_fn;
2894
2895 if (need_prolog) {
2896 union si_shader_part_key prolog_key;
2897 si_get_vs_prolog_key(&sel->info,
2898 shader->info.num_input_sgprs,
2899 &shader->key.part.vs.prolog,
2900 shader, &prolog_key);
2901 prolog_key.vs_prolog.is_monolithic = true;
2902 si_build_vs_prolog_function(&ctx, &prolog_key);
2903 parts[0] = ctx.main_fn;
2904 }
2905
2906 si_build_wrapper_function(&ctx, parts + !need_prolog,
2907 1 + need_prolog, need_prolog, 0);
2908
2909 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
2910 si_build_prim_discard_compute_shader(&ctx);
2911 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
2912 if (sscreen->info.chip_class >= GFX9) {
2913 struct si_shader_selector *ls = shader->key.part.tcs.ls;
2914 LLVMValueRef parts[4];
2915 bool vs_needs_prolog =
2916 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
2917
2918 /* TCS main part */
2919 parts[2] = ctx.main_fn;
2920
2921 /* TCS epilog */
2922 union si_shader_part_key tcs_epilog_key;
2923 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
2924 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2925 si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
2926 parts[3] = ctx.main_fn;
2927
2928 /* VS as LS main part */
2929 nir = get_nir_shader(ls, &free_nir);
2930 struct si_shader shader_ls = {};
2931 shader_ls.selector = ls;
2932 shader_ls.key.as_ls = 1;
2933 shader_ls.key.mono = shader->key.mono;
2934 shader_ls.key.opt = shader->key.opt;
2935 shader_ls.is_monolithic = true;
2936 si_llvm_context_set_ir(&ctx, &shader_ls);
2937
2938 if (!si_build_main_function(&ctx, nir, free_nir)) {
2939 si_llvm_dispose(&ctx);
2940 return -1;
2941 }
2942 shader->info.uses_instanceid |= ls->info.uses_instanceid;
2943 parts[1] = ctx.main_fn;
2944
2945 /* LS prolog */
2946 if (vs_needs_prolog) {
2947 union si_shader_part_key vs_prolog_key;
2948 si_get_vs_prolog_key(&ls->info,
2949 shader_ls.info.num_input_sgprs,
2950 &shader->key.part.tcs.ls_prolog,
2951 shader, &vs_prolog_key);
2952 vs_prolog_key.vs_prolog.is_monolithic = true;
2953 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
2954 parts[0] = ctx.main_fn;
2955 }
2956
2957 /* Reset the shader context. */
2958 ctx.shader = shader;
2959 ctx.type = PIPE_SHADER_TESS_CTRL;
2960
2961 si_build_wrapper_function(&ctx,
2962 parts + !vs_needs_prolog,
2963 4 - !vs_needs_prolog, vs_needs_prolog,
2964 vs_needs_prolog ? 2 : 1);
2965 } else {
2966 LLVMValueRef parts[2];
2967 union si_shader_part_key epilog_key;
2968
2969 parts[0] = ctx.main_fn;
2970
2971 memset(&epilog_key, 0, sizeof(epilog_key));
2972 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2973 si_llvm_build_tcs_epilog(&ctx, &epilog_key);
2974 parts[1] = ctx.main_fn;
2975
2976 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
2977 }
2978 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
2979 if (ctx.screen->info.chip_class >= GFX9) {
2980 struct si_shader_selector *es = shader->key.part.gs.es;
2981 LLVMValueRef es_prolog = NULL;
2982 LLVMValueRef es_main = NULL;
2983 LLVMValueRef gs_prolog = NULL;
2984 LLVMValueRef gs_main = ctx.main_fn;
2985
2986 /* GS prolog */
2987 union si_shader_part_key gs_prolog_key;
2988 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
2989 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
2990 gs_prolog_key.gs_prolog.is_monolithic = true;
2991 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
2992 si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
2993 gs_prolog = ctx.main_fn;
2994
2995 /* ES main part */
2996 nir = get_nir_shader(es, &free_nir);
2997 struct si_shader shader_es = {};
2998 shader_es.selector = es;
2999 shader_es.key.as_es = 1;
3000 shader_es.key.as_ngg = shader->key.as_ngg;
3001 shader_es.key.mono = shader->key.mono;
3002 shader_es.key.opt = shader->key.opt;
3003 shader_es.is_monolithic = true;
3004 si_llvm_context_set_ir(&ctx, &shader_es);
3005
3006 if (!si_build_main_function(&ctx, nir, free_nir)) {
3007 si_llvm_dispose(&ctx);
3008 return -1;
3009 }
3010 shader->info.uses_instanceid |= es->info.uses_instanceid;
3011 es_main = ctx.main_fn;
3012
3013 /* ES prolog */
3014 if (es->type == PIPE_SHADER_VERTEX &&
3015 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog)) {
3016 union si_shader_part_key vs_prolog_key;
3017 si_get_vs_prolog_key(&es->info,
3018 shader_es.info.num_input_sgprs,
3019 &shader->key.part.gs.vs_prolog,
3020 shader, &vs_prolog_key);
3021 vs_prolog_key.vs_prolog.is_monolithic = true;
3022 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
3023 es_prolog = ctx.main_fn;
3024 }
3025
3026 /* Reset the shader context. */
3027 ctx.shader = shader;
3028 ctx.type = PIPE_SHADER_GEOMETRY;
3029
3030 /* Prepare the array of shader parts. */
3031 LLVMValueRef parts[4];
3032 unsigned num_parts = 0, main_part, next_first_part;
3033
3034 if (es_prolog)
3035 parts[num_parts++] = es_prolog;
3036
3037 parts[main_part = num_parts++] = es_main;
3038 parts[next_first_part = num_parts++] = gs_prolog;
3039 parts[num_parts++] = gs_main;
3040
3041 si_build_wrapper_function(&ctx, parts, num_parts,
3042 main_part, next_first_part);
3043 } else {
3044 LLVMValueRef parts[2];
3045 union si_shader_part_key prolog_key;
3046
3047 parts[1] = ctx.main_fn;
3048
3049 memset(&prolog_key, 0, sizeof(prolog_key));
3050 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
3051 si_llvm_build_gs_prolog(&ctx, &prolog_key);
3052 parts[0] = ctx.main_fn;
3053
3054 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
3055 }
3056 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
3057 si_llvm_build_monolithic_ps(&ctx, shader);
3058 }
3059
3060 si_llvm_optimize_module(&ctx);
3061
3062 /* Post-optimization transformations and analysis. */
3063 si_optimize_vs_outputs(&ctx);
3064
3065 if ((debug && debug->debug_message) ||
3066 si_can_dump_shader(sscreen, ctx.type)) {
3067 ctx.shader->info.private_mem_vgprs =
3068 ac_count_scratch_private_memory(ctx.main_fn);
3069 }
3070
3071 /* Make sure the input is a pointer and not integer followed by inttoptr. */
3072 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
3073 LLVMPointerTypeKind);
3074
3075 /* Compile to bytecode. */
3076 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
3077 &ctx.ac, debug, ctx.type, si_get_shader_name(shader),
3078 si_should_optimize_less(compiler, shader->selector));
3079 si_llvm_dispose(&ctx);
3080 if (r) {
3081 fprintf(stderr, "LLVM failed to compile shader\n");
3082 return r;
3083 }
3084
3085 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
3086 * LLVM 3.9svn has this bug.
3087 */
3088 if (sel->type == PIPE_SHADER_COMPUTE) {
3089 unsigned wave_size = sscreen->compute_wave_size;
3090 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
3091 (wave_size == 32 ? 2 : 1);
3092 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
3093 unsigned max_sgprs_per_wave = 128;
3094 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
3095 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
3096 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
3097 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
3098
3099 max_vgprs = max_vgprs / waves_per_simd;
3100 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
3101
3102 if (shader->config.num_sgprs > max_sgprs ||
3103 shader->config.num_vgprs > max_vgprs) {
3104 fprintf(stderr, "LLVM failed to compile a shader correctly: "
3105 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
3106 shader->config.num_sgprs, shader->config.num_vgprs,
3107 max_sgprs, max_vgprs);
3108
3109 /* Just terminate the process, because dependent
3110 * shaders can hang due to bad input data, but use
3111 * the env var to allow shader-db to work.
3112 */
3113 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
3114 abort();
3115 }
3116 }
3117
3118 /* Add the scratch offset to input SGPRs. */
3119 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(&ctx))
3120 shader->info.num_input_sgprs += 1; /* scratch byte offset */
3121
3122 /* Calculate the number of fragment input VGPRs. */
3123 if (ctx.type == PIPE_SHADER_FRAGMENT) {
3124 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
3125 &shader->info.face_vgpr_index,
3126 &shader->info.ancillary_vgpr_index);
3127 }
3128
3129 si_calculate_max_simd_waves(shader);
3130 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
3131 return 0;
3132 }
3133
3134 /**
3135 * Create, compile and return a shader part (prolog or epilog).
3136 *
3137 * \param sscreen screen
3138 * \param list list of shader parts of the same category
3139 * \param type shader type
3140 * \param key shader part key
3141 * \param prolog whether the part being requested is a prolog
3142 * \param tm LLVM target machine
3143 * \param debug debug callback
3144 * \param build the callback responsible for building the main function
3145 * \return non-NULL on success
3146 */
3147 static struct si_shader_part *
3148 si_get_shader_part(struct si_screen *sscreen,
3149 struct si_shader_part **list,
3150 enum pipe_shader_type type,
3151 bool prolog,
3152 union si_shader_part_key *key,
3153 struct ac_llvm_compiler *compiler,
3154 struct pipe_debug_callback *debug,
3155 void (*build)(struct si_shader_context *,
3156 union si_shader_part_key *),
3157 const char *name)
3158 {
3159 struct si_shader_part *result;
3160
3161 simple_mtx_lock(&sscreen->shader_parts_mutex);
3162
3163 /* Find existing. */
3164 for (result = *list; result; result = result->next) {
3165 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
3166 simple_mtx_unlock(&sscreen->shader_parts_mutex);
3167 return result;
3168 }
3169 }
3170
3171 /* Compile a new one. */
3172 result = CALLOC_STRUCT(si_shader_part);
3173 result->key = *key;
3174
3175 struct si_shader shader = {};
3176
3177 switch (type) {
3178 case PIPE_SHADER_VERTEX:
3179 shader.key.as_ls = key->vs_prolog.as_ls;
3180 shader.key.as_es = key->vs_prolog.as_es;
3181 shader.key.as_ngg = key->vs_prolog.as_ngg;
3182 break;
3183 case PIPE_SHADER_TESS_CTRL:
3184 assert(!prolog);
3185 shader.key.part.tcs.epilog = key->tcs_epilog.states;
3186 break;
3187 case PIPE_SHADER_GEOMETRY:
3188 assert(prolog);
3189 shader.key.as_ngg = key->gs_prolog.as_ngg;
3190 break;
3191 case PIPE_SHADER_FRAGMENT:
3192 if (prolog)
3193 shader.key.part.ps.prolog = key->ps_prolog.states;
3194 else
3195 shader.key.part.ps.epilog = key->ps_epilog.states;
3196 break;
3197 default:
3198 unreachable("bad shader part");
3199 }
3200
3201 struct si_shader_context ctx;
3202 si_llvm_context_init(&ctx, sscreen, compiler,
3203 si_get_wave_size(sscreen, type, shader.key.as_ngg,
3204 shader.key.as_es));
3205 ctx.shader = &shader;
3206 ctx.type = type;
3207
3208 build(&ctx, key);
3209
3210 /* Compile. */
3211 si_llvm_optimize_module(&ctx);
3212
3213 if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
3214 &ctx.ac, debug, ctx.type, name, false)) {
3215 FREE(result);
3216 result = NULL;
3217 goto out;
3218 }
3219
3220 result->next = *list;
3221 *list = result;
3222
3223 out:
3224 si_llvm_dispose(&ctx);
3225 simple_mtx_unlock(&sscreen->shader_parts_mutex);
3226 return result;
3227 }
3228
3229 /**
3230 * Build the vertex shader prolog function.
3231 *
3232 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
3233 * All inputs are returned unmodified. The vertex load indices are
3234 * stored after them, which will be used by the API VS for fetching inputs.
3235 *
3236 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
3237 * input_v0,
3238 * input_v1,
3239 * input_v2,
3240 * input_v3,
3241 * (VertexID + BaseVertex),
3242 * (InstanceID + StartInstance),
3243 * (InstanceID / 2 + StartInstance)
3244 */
3245 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
3246 union si_shader_part_key *key)
3247 {
3248 LLVMTypeRef *returns;
3249 LLVMValueRef ret, func;
3250 int num_returns, i;
3251 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
3252 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
3253 struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
3254 struct ac_arg input_vgpr_param[9];
3255 LLVMValueRef input_vgprs[9];
3256 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
3257 num_input_vgprs;
3258 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
3259
3260 memset(&ctx->args, 0, sizeof(ctx->args));
3261
3262 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
3263 returns = alloca((num_all_input_regs + key->vs_prolog.num_inputs) *
3264 sizeof(LLVMTypeRef));
3265 num_returns = 0;
3266
3267 /* Declare input and output SGPRs. */
3268 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
3269 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3270 &input_sgpr_param[i]);
3271 returns[num_returns++] = ctx->i32;
3272 }
3273
3274 struct ac_arg merged_wave_info = input_sgpr_param[3];
3275
3276 /* Preloaded VGPRs (outputs must be floats) */
3277 for (i = 0; i < num_input_vgprs; i++) {
3278 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &input_vgpr_param[i]);
3279 returns[num_returns++] = ctx->f32;
3280 }
3281
3282 /* Vertex load indices. */
3283 for (i = 0; i < key->vs_prolog.num_inputs; i++)
3284 returns[num_returns++] = ctx->f32;
3285
3286 /* Create the function. */
3287 si_llvm_create_func(ctx, "vs_prolog", returns, num_returns, 0);
3288 func = ctx->main_fn;
3289
3290 for (i = 0; i < num_input_vgprs; i++) {
3291 input_vgprs[i] = ac_get_arg(&ctx->ac, input_vgpr_param[i]);
3292 }
3293
3294 if (key->vs_prolog.num_merged_next_stage_vgprs) {
3295 if (!key->vs_prolog.is_monolithic)
3296 si_init_exec_from_input(ctx, merged_wave_info, 0);
3297
3298 if (key->vs_prolog.as_ls &&
3299 ctx->screen->info.has_ls_vgpr_init_bug) {
3300 /* If there are no HS threads, SPI loads the LS VGPRs
3301 * starting at VGPR 0. Shift them back to where they
3302 * belong.
3303 */
3304 LLVMValueRef has_hs_threads =
3305 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
3306 si_unpack_param(ctx, input_sgpr_param[3], 8, 8),
3307 ctx->i32_0, "");
3308
3309 for (i = 4; i > 0; --i) {
3310 input_vgprs[i + 1] =
3311 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
3312 input_vgprs[i + 1],
3313 input_vgprs[i - 1], "");
3314 }
3315 }
3316 }
3317
3318 unsigned vertex_id_vgpr = first_vs_vgpr;
3319 unsigned instance_id_vgpr =
3320 ctx->screen->info.chip_class >= GFX10 ?
3321 first_vs_vgpr + 3 :
3322 first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
3323
3324 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
3325 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
3326
3327 /* InstanceID = VertexID >> 16;
3328 * VertexID = VertexID & 0xffff;
3329 */
3330 if (key->vs_prolog.states.unpack_instance_id_from_vertex_id) {
3331 ctx->abi.instance_id = LLVMBuildLShr(ctx->ac.builder, ctx->abi.vertex_id,
3332 LLVMConstInt(ctx->i32, 16, 0), "");
3333 ctx->abi.vertex_id = LLVMBuildAnd(ctx->ac.builder, ctx->abi.vertex_id,
3334 LLVMConstInt(ctx->i32, 0xffff, 0), "");
3335 }
3336
3337 /* Copy inputs to outputs. This should be no-op, as the registers match,
3338 * but it will prevent the compiler from overwriting them unintentionally.
3339 */
3340 ret = ctx->return_value;
3341 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
3342 LLVMValueRef p = LLVMGetParam(func, i);
3343 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
3344 }
3345 for (i = 0; i < num_input_vgprs; i++) {
3346 LLVMValueRef p = input_vgprs[i];
3347
3348 if (i == vertex_id_vgpr)
3349 p = ctx->abi.vertex_id;
3350 else if (i == instance_id_vgpr)
3351 p = ctx->abi.instance_id;
3352
3353 p = ac_to_float(&ctx->ac, p);
3354 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
3355 key->vs_prolog.num_input_sgprs + i, "");
3356 }
3357
3358 /* Compute vertex load indices from instance divisors. */
3359 LLVMValueRef instance_divisor_constbuf = NULL;
3360
3361 if (key->vs_prolog.states.instance_divisor_is_fetched) {
3362 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
3363 LLVMValueRef buf_index =
3364 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
3365 instance_divisor_constbuf =
3366 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
3367 }
3368
3369 for (i = 0; i < key->vs_prolog.num_inputs; i++) {
3370 bool divisor_is_one =
3371 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
3372 bool divisor_is_fetched =
3373 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
3374 LLVMValueRef index = NULL;
3375
3376 if (divisor_is_one) {
3377 index = ctx->abi.instance_id;
3378 } else if (divisor_is_fetched) {
3379 LLVMValueRef udiv_factors[4];
3380
3381 for (unsigned j = 0; j < 4; j++) {
3382 udiv_factors[j] =
3383 si_buffer_load_const(ctx, instance_divisor_constbuf,
3384 LLVMConstInt(ctx->i32, i*16 + j*4, 0));
3385 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
3386 }
3387 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
3388 * Such InstanceID might not be achievable in a reasonable time though.
3389 */
3390 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id,
3391 udiv_factors[0], udiv_factors[1],
3392 udiv_factors[2], udiv_factors[3]);
3393 }
3394
3395 if (divisor_is_one || divisor_is_fetched) {
3396 /* Add StartInstance. */
3397 index = LLVMBuildAdd(ctx->ac.builder, index,
3398 LLVMGetParam(ctx->main_fn, user_sgpr_base +
3399 SI_SGPR_START_INSTANCE), "");
3400 } else {
3401 /* VertexID + BaseVertex */
3402 index = LLVMBuildAdd(ctx->ac.builder,
3403 ctx->abi.vertex_id,
3404 LLVMGetParam(func, user_sgpr_base +
3405 SI_SGPR_BASE_VERTEX), "");
3406 }
3407
3408 index = ac_to_float(&ctx->ac, index);
3409 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
3410 ctx->args.arg_count + i, "");
3411 }
3412
3413 si_llvm_build_ret(ctx, ret);
3414 }
3415
3416 static bool si_get_vs_prolog(struct si_screen *sscreen,
3417 struct ac_llvm_compiler *compiler,
3418 struct si_shader *shader,
3419 struct pipe_debug_callback *debug,
3420 struct si_shader *main_part,
3421 const struct si_vs_prolog_bits *key)
3422 {
3423 struct si_shader_selector *vs = main_part->selector;
3424
3425 if (!si_vs_needs_prolog(vs, key))
3426 return true;
3427
3428 /* Get the prolog. */
3429 union si_shader_part_key prolog_key;
3430 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
3431 key, shader, &prolog_key);
3432
3433 shader->prolog =
3434 si_get_shader_part(sscreen, &sscreen->vs_prologs,
3435 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
3436 debug, si_build_vs_prolog_function,
3437 "Vertex Shader Prolog");
3438 return shader->prolog != NULL;
3439 }
3440
3441 /**
3442 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
3443 */
3444 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
3445 struct ac_llvm_compiler *compiler,
3446 struct si_shader *shader,
3447 struct pipe_debug_callback *debug)
3448 {
3449 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
3450 &shader->key.part.vs.prolog);
3451 }
3452
3453 /**
3454 * Select and compile (or reuse) TCS parts (epilog).
3455 */
3456 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
3457 struct ac_llvm_compiler *compiler,
3458 struct si_shader *shader,
3459 struct pipe_debug_callback *debug)
3460 {
3461 if (sscreen->info.chip_class >= GFX9) {
3462 struct si_shader *ls_main_part =
3463 shader->key.part.tcs.ls->main_shader_part_ls;
3464
3465 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
3466 &shader->key.part.tcs.ls_prolog))
3467 return false;
3468
3469 shader->previous_stage = ls_main_part;
3470 }
3471
3472 /* Get the epilog. */
3473 union si_shader_part_key epilog_key;
3474 memset(&epilog_key, 0, sizeof(epilog_key));
3475 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
3476
3477 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
3478 PIPE_SHADER_TESS_CTRL, false,
3479 &epilog_key, compiler, debug,
3480 si_llvm_build_tcs_epilog,
3481 "Tessellation Control Shader Epilog");
3482 return shader->epilog != NULL;
3483 }
3484
3485 /**
3486 * Select and compile (or reuse) GS parts (prolog).
3487 */
3488 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
3489 struct ac_llvm_compiler *compiler,
3490 struct si_shader *shader,
3491 struct pipe_debug_callback *debug)
3492 {
3493 if (sscreen->info.chip_class >= GFX9) {
3494 struct si_shader *es_main_part;
3495 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
3496
3497 if (shader->key.as_ngg)
3498 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
3499 else
3500 es_main_part = shader->key.part.gs.es->main_shader_part_es;
3501
3502 if (es_type == PIPE_SHADER_VERTEX &&
3503 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
3504 &shader->key.part.gs.vs_prolog))
3505 return false;
3506
3507 shader->previous_stage = es_main_part;
3508 }
3509
3510 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
3511 return true;
3512
3513 union si_shader_part_key prolog_key;
3514 memset(&prolog_key, 0, sizeof(prolog_key));
3515 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
3516 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
3517
3518 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
3519 PIPE_SHADER_GEOMETRY, true,
3520 &prolog_key, compiler, debug,
3521 si_llvm_build_gs_prolog,
3522 "Geometry Shader Prolog");
3523 return shader->prolog2 != NULL;
3524 }
3525
3526 /**
3527 * Compute the PS prolog key, which contains all the information needed to
3528 * build the PS prolog function, and set related bits in shader->config.
3529 */
3530 void si_get_ps_prolog_key(struct si_shader *shader,
3531 union si_shader_part_key *key,
3532 bool separate_prolog)
3533 {
3534 struct si_shader_info *info = &shader->selector->info;
3535
3536 memset(key, 0, sizeof(*key));
3537 key->ps_prolog.states = shader->key.part.ps.prolog;
3538 key->ps_prolog.colors_read = info->colors_read;
3539 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
3540 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
3541 key->ps_prolog.wqm = info->uses_derivatives &&
3542 (key->ps_prolog.colors_read ||
3543 key->ps_prolog.states.force_persp_sample_interp ||
3544 key->ps_prolog.states.force_linear_sample_interp ||
3545 key->ps_prolog.states.force_persp_center_interp ||
3546 key->ps_prolog.states.force_linear_center_interp ||
3547 key->ps_prolog.states.bc_optimize_for_persp ||
3548 key->ps_prolog.states.bc_optimize_for_linear);
3549 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
3550
3551 if (info->colors_read) {
3552 unsigned *color = shader->selector->color_attr_index;
3553
3554 if (shader->key.part.ps.prolog.color_two_side) {
3555 /* BCOLORs are stored after the last input. */
3556 key->ps_prolog.num_interp_inputs = info->num_inputs;
3557 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
3558 if (separate_prolog)
3559 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
3560 }
3561
3562 for (unsigned i = 0; i < 2; i++) {
3563 unsigned interp = info->input_interpolate[color[i]];
3564 unsigned location = info->input_interpolate_loc[color[i]];
3565
3566 if (!(info->colors_read & (0xf << i*4)))
3567 continue;
3568
3569 key->ps_prolog.color_attr_index[i] = color[i];
3570
3571 if (shader->key.part.ps.prolog.flatshade_colors &&
3572 interp == TGSI_INTERPOLATE_COLOR)
3573 interp = TGSI_INTERPOLATE_CONSTANT;
3574
3575 switch (interp) {
3576 case TGSI_INTERPOLATE_CONSTANT:
3577 key->ps_prolog.color_interp_vgpr_index[i] = -1;
3578 break;
3579 case TGSI_INTERPOLATE_PERSPECTIVE:
3580 case TGSI_INTERPOLATE_COLOR:
3581 /* Force the interpolation location for colors here. */
3582 if (shader->key.part.ps.prolog.force_persp_sample_interp)
3583 location = TGSI_INTERPOLATE_LOC_SAMPLE;
3584 if (shader->key.part.ps.prolog.force_persp_center_interp)
3585 location = TGSI_INTERPOLATE_LOC_CENTER;
3586
3587 switch (location) {
3588 case TGSI_INTERPOLATE_LOC_SAMPLE:
3589 key->ps_prolog.color_interp_vgpr_index[i] = 0;
3590 if (separate_prolog) {
3591 shader->config.spi_ps_input_ena |=
3592 S_0286CC_PERSP_SAMPLE_ENA(1);
3593 }
3594 break;
3595 case TGSI_INTERPOLATE_LOC_CENTER:
3596 key->ps_prolog.color_interp_vgpr_index[i] = 2;
3597 if (separate_prolog) {
3598 shader->config.spi_ps_input_ena |=
3599 S_0286CC_PERSP_CENTER_ENA(1);
3600 }
3601 break;
3602 case TGSI_INTERPOLATE_LOC_CENTROID:
3603 key->ps_prolog.color_interp_vgpr_index[i] = 4;
3604 if (separate_prolog) {
3605 shader->config.spi_ps_input_ena |=
3606 S_0286CC_PERSP_CENTROID_ENA(1);
3607 }
3608 break;
3609 default:
3610 assert(0);
3611 }
3612 break;
3613 case TGSI_INTERPOLATE_LINEAR:
3614 /* Force the interpolation location for colors here. */
3615 if (shader->key.part.ps.prolog.force_linear_sample_interp)
3616 location = TGSI_INTERPOLATE_LOC_SAMPLE;
3617 if (shader->key.part.ps.prolog.force_linear_center_interp)
3618 location = TGSI_INTERPOLATE_LOC_CENTER;
3619
3620 /* The VGPR assignment for non-monolithic shaders
3621 * works because InitialPSInputAddr is set on the
3622 * main shader and PERSP_PULL_MODEL is never used.
3623 */
3624 switch (location) {
3625 case TGSI_INTERPOLATE_LOC_SAMPLE:
3626 key->ps_prolog.color_interp_vgpr_index[i] =
3627 separate_prolog ? 6 : 9;
3628 if (separate_prolog) {
3629 shader->config.spi_ps_input_ena |=
3630 S_0286CC_LINEAR_SAMPLE_ENA(1);
3631 }
3632 break;
3633 case TGSI_INTERPOLATE_LOC_CENTER:
3634 key->ps_prolog.color_interp_vgpr_index[i] =
3635 separate_prolog ? 8 : 11;
3636 if (separate_prolog) {
3637 shader->config.spi_ps_input_ena |=
3638 S_0286CC_LINEAR_CENTER_ENA(1);
3639 }
3640 break;
3641 case TGSI_INTERPOLATE_LOC_CENTROID:
3642 key->ps_prolog.color_interp_vgpr_index[i] =
3643 separate_prolog ? 10 : 13;
3644 if (separate_prolog) {
3645 shader->config.spi_ps_input_ena |=
3646 S_0286CC_LINEAR_CENTROID_ENA(1);
3647 }
3648 break;
3649 default:
3650 assert(0);
3651 }
3652 break;
3653 default:
3654 assert(0);
3655 }
3656 }
3657 }
3658 }
3659
3660 /**
3661 * Check whether a PS prolog is required based on the key.
3662 */
3663 bool si_need_ps_prolog(const union si_shader_part_key *key)
3664 {
3665 return key->ps_prolog.colors_read ||
3666 key->ps_prolog.states.force_persp_sample_interp ||
3667 key->ps_prolog.states.force_linear_sample_interp ||
3668 key->ps_prolog.states.force_persp_center_interp ||
3669 key->ps_prolog.states.force_linear_center_interp ||
3670 key->ps_prolog.states.bc_optimize_for_persp ||
3671 key->ps_prolog.states.bc_optimize_for_linear ||
3672 key->ps_prolog.states.poly_stipple ||
3673 key->ps_prolog.states.samplemask_log_ps_iter;
3674 }
3675
3676 /**
3677 * Compute the PS epilog key, which contains all the information needed to
3678 * build the PS epilog function.
3679 */
3680 void si_get_ps_epilog_key(struct si_shader *shader,
3681 union si_shader_part_key *key)
3682 {
3683 struct si_shader_info *info = &shader->selector->info;
3684 memset(key, 0, sizeof(*key));
3685 key->ps_epilog.colors_written = info->colors_written;
3686 key->ps_epilog.writes_z = info->writes_z;
3687 key->ps_epilog.writes_stencil = info->writes_stencil;
3688 key->ps_epilog.writes_samplemask = info->writes_samplemask;
3689 key->ps_epilog.states = shader->key.part.ps.epilog;
3690 }
3691
3692 /**
3693 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
3694 */
3695 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
3696 struct ac_llvm_compiler *compiler,
3697 struct si_shader *shader,
3698 struct pipe_debug_callback *debug)
3699 {
3700 union si_shader_part_key prolog_key;
3701 union si_shader_part_key epilog_key;
3702
3703 /* Get the prolog. */
3704 si_get_ps_prolog_key(shader, &prolog_key, true);
3705
3706 /* The prolog is a no-op if these aren't set. */
3707 if (si_need_ps_prolog(&prolog_key)) {
3708 shader->prolog =
3709 si_get_shader_part(sscreen, &sscreen->ps_prologs,
3710 PIPE_SHADER_FRAGMENT, true,
3711 &prolog_key, compiler, debug,
3712 si_llvm_build_ps_prolog,
3713 "Fragment Shader Prolog");
3714 if (!shader->prolog)
3715 return false;
3716 }
3717
3718 /* Get the epilog. */
3719 si_get_ps_epilog_key(shader, &epilog_key);
3720
3721 shader->epilog =
3722 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
3723 PIPE_SHADER_FRAGMENT, false,
3724 &epilog_key, compiler, debug,
3725 si_llvm_build_ps_epilog,
3726 "Fragment Shader Epilog");
3727 if (!shader->epilog)
3728 return false;
3729
3730 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
3731 if (shader->key.part.ps.prolog.poly_stipple) {
3732 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
3733 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
3734 }
3735
3736 /* Set up the enable bits for per-sample shading if needed. */
3737 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
3738 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
3739 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3740 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
3741 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
3742 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
3743 }
3744 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
3745 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
3746 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3747 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
3748 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
3749 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
3750 }
3751 if (shader->key.part.ps.prolog.force_persp_center_interp &&
3752 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
3753 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3754 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
3755 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
3756 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
3757 }
3758 if (shader->key.part.ps.prolog.force_linear_center_interp &&
3759 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
3760 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3761 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
3762 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
3763 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
3764 }
3765
3766 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
3767 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
3768 !(shader->config.spi_ps_input_ena & 0xf)) {
3769 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
3770 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
3771 }
3772
3773 /* At least one pair of interpolation weights must be enabled. */
3774 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
3775 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
3776 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
3777 }
3778
3779 /* Samplemask fixup requires the sample ID. */
3780 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
3781 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
3782 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
3783 }
3784
3785 /* The sample mask input is always enabled, because the API shader always
3786 * passes it through to the epilog. Disable it here if it's unused.
3787 */
3788 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
3789 !shader->selector->info.reads_samplemask)
3790 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
3791
3792 return true;
3793 }
3794
3795 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
3796 unsigned *lds_size)
3797 {
3798 /* If tessellation is all offchip and on-chip GS isn't used, this
3799 * workaround is not needed.
3800 */
3801 return;
3802
3803 /* SPI barrier management bug:
3804 * Make sure we have at least 4k of LDS in use to avoid the bug.
3805 * It applies to workgroup sizes of more than one wavefront.
3806 */
3807 if (sscreen->info.family == CHIP_BONAIRE ||
3808 sscreen->info.family == CHIP_KABINI)
3809 *lds_size = MAX2(*lds_size, 8);
3810 }
3811
3812 void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader)
3813 {
3814 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
3815
3816 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
3817
3818 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
3819 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
3820 si_multiwave_lds_size_workaround(sscreen,
3821 &shader->config.lds_size);
3822 }
3823 }
3824
3825 bool si_create_shader_variant(struct si_screen *sscreen,
3826 struct ac_llvm_compiler *compiler,
3827 struct si_shader *shader,
3828 struct pipe_debug_callback *debug)
3829 {
3830 struct si_shader_selector *sel = shader->selector;
3831 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
3832 int r;
3833
3834 /* LS, ES, VS are compiled on demand if the main part hasn't been
3835 * compiled for that stage.
3836 *
3837 * GS are compiled on demand if the main part hasn't been compiled
3838 * for the chosen NGG-ness.
3839 *
3840 * Vertex shaders are compiled on demand when a vertex fetch
3841 * workaround must be applied.
3842 */
3843 if (shader->is_monolithic) {
3844 /* Monolithic shader (compiled as a whole, has many variants,
3845 * may take a long time to compile).
3846 */
3847 r = si_compile_shader(sscreen, compiler, shader, debug);
3848 if (r)
3849 return false;
3850 } else {
3851 /* The shader consists of several parts:
3852 *
3853 * - the middle part is the user shader, it has 1 variant only
3854 * and it was compiled during the creation of the shader
3855 * selector
3856 * - the prolog part is inserted at the beginning
3857 * - the epilog part is inserted at the end
3858 *
3859 * The prolog and epilog have many (but simple) variants.
3860 *
3861 * Starting with gfx9, geometry and tessellation control
3862 * shaders also contain the prolog and user shader parts of
3863 * the previous shader stage.
3864 */
3865
3866 if (!mainp)
3867 return false;
3868
3869 /* Copy the compiled shader data over. */
3870 shader->is_binary_shared = true;
3871 shader->binary = mainp->binary;
3872 shader->config = mainp->config;
3873 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
3874 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
3875 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
3876 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
3877 memcpy(shader->info.vs_output_param_offset,
3878 mainp->info.vs_output_param_offset,
3879 sizeof(mainp->info.vs_output_param_offset));
3880 shader->info.uses_instanceid = mainp->info.uses_instanceid;
3881 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
3882 shader->info.nr_param_exports = mainp->info.nr_param_exports;
3883
3884 /* Select prologs and/or epilogs. */
3885 switch (sel->type) {
3886 case PIPE_SHADER_VERTEX:
3887 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
3888 return false;
3889 break;
3890 case PIPE_SHADER_TESS_CTRL:
3891 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
3892 return false;
3893 break;
3894 case PIPE_SHADER_TESS_EVAL:
3895 break;
3896 case PIPE_SHADER_GEOMETRY:
3897 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
3898 return false;
3899 break;
3900 case PIPE_SHADER_FRAGMENT:
3901 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
3902 return false;
3903
3904 /* Make sure we have at least as many VGPRs as there
3905 * are allocated inputs.
3906 */
3907 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3908 shader->info.num_input_vgprs);
3909 break;
3910 default:;
3911 }
3912
3913 /* Update SGPR and VGPR counts. */
3914 if (shader->prolog) {
3915 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
3916 shader->prolog->config.num_sgprs);
3917 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3918 shader->prolog->config.num_vgprs);
3919 }
3920 if (shader->previous_stage) {
3921 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
3922 shader->previous_stage->config.num_sgprs);
3923 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3924 shader->previous_stage->config.num_vgprs);
3925 shader->config.spilled_sgprs =
3926 MAX2(shader->config.spilled_sgprs,
3927 shader->previous_stage->config.spilled_sgprs);
3928 shader->config.spilled_vgprs =
3929 MAX2(shader->config.spilled_vgprs,
3930 shader->previous_stage->config.spilled_vgprs);
3931 shader->info.private_mem_vgprs =
3932 MAX2(shader->info.private_mem_vgprs,
3933 shader->previous_stage->info.private_mem_vgprs);
3934 shader->config.scratch_bytes_per_wave =
3935 MAX2(shader->config.scratch_bytes_per_wave,
3936 shader->previous_stage->config.scratch_bytes_per_wave);
3937 shader->info.uses_instanceid |=
3938 shader->previous_stage->info.uses_instanceid;
3939 }
3940 if (shader->prolog2) {
3941 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
3942 shader->prolog2->config.num_sgprs);
3943 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3944 shader->prolog2->config.num_vgprs);
3945 }
3946 if (shader->epilog) {
3947 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
3948 shader->epilog->config.num_sgprs);
3949 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3950 shader->epilog->config.num_vgprs);
3951 }
3952 si_calculate_max_simd_waves(shader);
3953 }
3954
3955 if (shader->key.as_ngg) {
3956 assert(!shader->key.as_es && !shader->key.as_ls);
3957 gfx10_ngg_calculate_subgroup_info(shader);
3958 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
3959 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
3960 }
3961
3962 si_fix_resource_usage(sscreen, shader);
3963 si_shader_dump(sscreen, shader, debug, stderr, true);
3964
3965 /* Upload. */
3966 if (!si_shader_binary_upload(sscreen, shader, 0)) {
3967 fprintf(stderr, "LLVM failed to upload shader\n");
3968 return false;
3969 }
3970
3971 return true;
3972 }
3973
3974 void si_shader_destroy(struct si_shader *shader)
3975 {
3976 if (shader->scratch_bo)
3977 si_resource_reference(&shader->scratch_bo, NULL);
3978
3979 si_resource_reference(&shader->bo, NULL);
3980
3981 if (!shader->is_binary_shared)
3982 si_shader_binary_clean(&shader->binary);
3983
3984 free(shader->shader_log);
3985 }