radeonsi: remove useless #includes
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "util/u_memory.h"
26 #include "tgsi/tgsi_strings.h"
27 #include "tgsi/tgsi_from_mesa.h"
28
29 #include "ac_exp_param.h"
30 #include "ac_rtld.h"
31 #include "si_shader_internal.h"
32 #include "si_pipe.h"
33 #include "sid.h"
34
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_serialize.h"
37
38 static const char scratch_rsrc_dword0_symbol[] =
39 "SCRATCH_RSRC_DWORD0";
40
41 static const char scratch_rsrc_dword1_symbol[] =
42 "SCRATCH_RSRC_DWORD1";
43
44 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
45
46 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
47 union si_shader_part_key *key);
48
49 /** Whether the shader runs as a combination of multiple API shaders */
50 static bool is_multi_part_shader(struct si_shader_context *ctx)
51 {
52 if (ctx->screen->info.chip_class <= GFX8)
53 return false;
54
55 return ctx->shader->key.as_ls ||
56 ctx->shader->key.as_es ||
57 ctx->type == PIPE_SHADER_TESS_CTRL ||
58 ctx->type == PIPE_SHADER_GEOMETRY;
59 }
60
61 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
62 bool si_is_merged_shader(struct si_shader_context *ctx)
63 {
64 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
65 }
66
67 /**
68 * Returns a unique index for a per-patch semantic name and index. The index
69 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
70 * can be calculated.
71 */
72 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
73 {
74 switch (semantic_name) {
75 case TGSI_SEMANTIC_TESSOUTER:
76 return 0;
77 case TGSI_SEMANTIC_TESSINNER:
78 return 1;
79 case TGSI_SEMANTIC_PATCH:
80 assert(index < 30);
81 return 2 + index;
82
83 default:
84 assert(!"invalid semantic name");
85 return 0;
86 }
87 }
88
89 /**
90 * Returns a unique index for a semantic name and index. The index must be
91 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
92 * calculated.
93 */
94 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
95 unsigned is_varying)
96 {
97 switch (semantic_name) {
98 case TGSI_SEMANTIC_POSITION:
99 return 0;
100 case TGSI_SEMANTIC_GENERIC:
101 /* Since some shader stages use the the highest used IO index
102 * to determine the size to allocate for inputs/outputs
103 * (in LDS, tess and GS rings). GENERIC should be placed right
104 * after POSITION to make that size as small as possible.
105 */
106 if (index < SI_MAX_IO_GENERIC)
107 return 1 + index;
108
109 assert(!"invalid generic index");
110 return 0;
111 case TGSI_SEMANTIC_FOG:
112 return SI_MAX_IO_GENERIC + 1;
113 case TGSI_SEMANTIC_COLOR:
114 assert(index < 2);
115 return SI_MAX_IO_GENERIC + 2 + index;
116 case TGSI_SEMANTIC_BCOLOR:
117 assert(index < 2);
118 /* If it's a varying, COLOR and BCOLOR alias. */
119 if (is_varying)
120 return SI_MAX_IO_GENERIC + 2 + index;
121 else
122 return SI_MAX_IO_GENERIC + 4 + index;
123 case TGSI_SEMANTIC_TEXCOORD:
124 assert(index < 8);
125 return SI_MAX_IO_GENERIC + 6 + index;
126
127 /* These are rarely used between LS and HS or ES and GS. */
128 case TGSI_SEMANTIC_CLIPDIST:
129 assert(index < 2);
130 return SI_MAX_IO_GENERIC + 6 + 8 + index;
131 case TGSI_SEMANTIC_CLIPVERTEX:
132 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
133 case TGSI_SEMANTIC_PSIZE:
134 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
135
136 /* These can't be written by LS, HS, and ES. */
137 case TGSI_SEMANTIC_LAYER:
138 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
139 case TGSI_SEMANTIC_VIEWPORT_INDEX:
140 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
141 case TGSI_SEMANTIC_PRIMID:
142 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
143 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
144 default:
145 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
146 assert(!"invalid semantic name");
147 return 0;
148 }
149 }
150
151 /**
152 * Get the value of a shader input parameter and extract a bitfield.
153 */
154 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
155 LLVMValueRef value, unsigned rshift,
156 unsigned bitwidth)
157 {
158 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
159 value = ac_to_integer(&ctx->ac, value);
160
161 if (rshift)
162 value = LLVMBuildLShr(ctx->ac.builder, value,
163 LLVMConstInt(ctx->i32, rshift, 0), "");
164
165 if (rshift + bitwidth < 32) {
166 unsigned mask = (1 << bitwidth) - 1;
167 value = LLVMBuildAnd(ctx->ac.builder, value,
168 LLVMConstInt(ctx->i32, mask, 0), "");
169 }
170
171 return value;
172 }
173
174 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
175 struct ac_arg param, unsigned rshift,
176 unsigned bitwidth)
177 {
178 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
179
180 return unpack_llvm_param(ctx, value, rshift, bitwidth);
181 }
182
183 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
184 LLVMValueRef i32, unsigned index)
185 {
186 assert(index <= 1);
187
188 if (index == 1)
189 return LLVMBuildAShr(ctx->ac.builder, i32,
190 LLVMConstInt(ctx->i32, 16, 0), "");
191
192 return LLVMBuildSExt(ctx->ac.builder,
193 LLVMBuildTrunc(ctx->ac.builder, i32,
194 ctx->ac.i16, ""),
195 ctx->i32, "");
196 }
197
198 void si_llvm_load_input_vs(
199 struct si_shader_context *ctx,
200 unsigned input_index,
201 LLVMValueRef out[4])
202 {
203 const struct si_shader_info *info = &ctx->shader->selector->info;
204 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
205
206 if (vs_blit_property) {
207 LLVMValueRef vertex_id = ctx->abi.vertex_id;
208 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
209 LLVMIntULE, vertex_id,
210 ctx->i32_1, "");
211 /* Use LLVMIntNE, because we have 3 vertices and only
212 * the middle one should use y2.
213 */
214 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
215 LLVMIntNE, vertex_id,
216 ctx->i32_1, "");
217
218 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
219 if (input_index == 0) {
220 /* Position: */
221 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
222 param_vs_blit_inputs);
223 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
224 param_vs_blit_inputs + 1);
225
226 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
227 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
228 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
229 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
230
231 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
232 x1, x2, "");
233 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
234 y1, y2, "");
235
236 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
237 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
238 out[2] = LLVMGetParam(ctx->main_fn,
239 param_vs_blit_inputs + 2);
240 out[3] = ctx->ac.f32_1;
241 return;
242 }
243
244 /* Color or texture coordinates: */
245 assert(input_index == 1);
246
247 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
248 for (int i = 0; i < 4; i++) {
249 out[i] = LLVMGetParam(ctx->main_fn,
250 param_vs_blit_inputs + 3 + i);
251 }
252 } else {
253 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
254 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
255 param_vs_blit_inputs + 3);
256 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
257 param_vs_blit_inputs + 4);
258 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
259 param_vs_blit_inputs + 5);
260 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
261 param_vs_blit_inputs + 6);
262
263 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
264 x1, x2, "");
265 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
266 y1, y2, "");
267 out[2] = LLVMGetParam(ctx->main_fn,
268 param_vs_blit_inputs + 7);
269 out[3] = LLVMGetParam(ctx->main_fn,
270 param_vs_blit_inputs + 8);
271 }
272 return;
273 }
274
275 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
276 union si_vs_fix_fetch fix_fetch;
277 LLVMValueRef vb_desc;
278 LLVMValueRef vertex_index;
279 LLVMValueRef tmp;
280
281 if (input_index < num_vbos_in_user_sgprs) {
282 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
283 } else {
284 unsigned index= input_index - num_vbos_in_user_sgprs;
285 vb_desc = ac_build_load_to_sgpr(&ctx->ac,
286 ac_get_arg(&ctx->ac, ctx->vertex_buffers),
287 LLVMConstInt(ctx->i32, index, 0));
288 }
289
290 vertex_index = LLVMGetParam(ctx->main_fn,
291 ctx->vertex_index0.arg_index +
292 input_index);
293
294 /* Use the open-coded implementation for all loads of doubles and
295 * of dword-sized data that needs fixups. We need to insert conversion
296 * code anyway, and the amd/common code does it for us.
297 *
298 * Note: On LLVM <= 8, we can only open-code formats with
299 * channel size >= 4 bytes.
300 */
301 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
302 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
303 if (opencode ||
304 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
305 (fix_fetch.u.log_size == 2)) {
306 tmp = ac_build_opencoded_load_format(
307 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
308 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
309 vb_desc, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
310 for (unsigned i = 0; i < 4; ++i)
311 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
312 return;
313 }
314
315 /* Do multiple loads for special formats. */
316 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
317 LLVMValueRef fetches[4];
318 unsigned num_fetches;
319 unsigned fetch_stride;
320 unsigned channels_per_fetch;
321
322 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
323 num_fetches = MIN2(required_channels, 3);
324 fetch_stride = 1 << fix_fetch.u.log_size;
325 channels_per_fetch = 1;
326 } else {
327 num_fetches = 1;
328 fetch_stride = 0;
329 channels_per_fetch = required_channels;
330 }
331
332 for (unsigned i = 0; i < num_fetches; ++i) {
333 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
334 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
335 channels_per_fetch, 0, true);
336 }
337
338 if (num_fetches == 1 && channels_per_fetch > 1) {
339 LLVMValueRef fetch = fetches[0];
340 for (unsigned i = 0; i < channels_per_fetch; ++i) {
341 tmp = LLVMConstInt(ctx->i32, i, false);
342 fetches[i] = LLVMBuildExtractElement(
343 ctx->ac.builder, fetch, tmp, "");
344 }
345 num_fetches = channels_per_fetch;
346 channels_per_fetch = 1;
347 }
348
349 for (unsigned i = num_fetches; i < 4; ++i)
350 fetches[i] = LLVMGetUndef(ctx->f32);
351
352 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
353 required_channels == 4) {
354 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
355 fetches[3] = ctx->ac.i32_1;
356 else
357 fetches[3] = ctx->ac.f32_1;
358 } else if (fix_fetch.u.log_size == 3 &&
359 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
360 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
361 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
362 required_channels == 4) {
363 /* For 2_10_10_10, the hardware returns an unsigned value;
364 * convert it to a signed one.
365 */
366 LLVMValueRef tmp = fetches[3];
367 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
368
369 /* First, recover the sign-extended signed integer value. */
370 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
371 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
372 else
373 tmp = ac_to_integer(&ctx->ac, tmp);
374
375 /* For the integer-like cases, do a natural sign extension.
376 *
377 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
378 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
379 * exponent.
380 */
381 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
382 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
383 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
384 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
385
386 /* Convert back to the right type. */
387 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
388 LLVMValueRef clamp;
389 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
390 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
391 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
392 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
393 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
394 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
395 }
396
397 fetches[3] = tmp;
398 }
399
400 for (unsigned i = 0; i < 4; ++i)
401 out[i] = ac_to_float(&ctx->ac, fetches[i]);
402 }
403
404 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
405 unsigned swizzle)
406 {
407 if (swizzle > 0)
408 return ctx->i32_0;
409
410 switch (ctx->type) {
411 case PIPE_SHADER_VERTEX:
412 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
413 case PIPE_SHADER_TESS_CTRL:
414 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
415 case PIPE_SHADER_TESS_EVAL:
416 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
417 case PIPE_SHADER_GEOMETRY:
418 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
419 default:
420 assert(0);
421 return ctx->i32_0;
422 }
423 }
424
425 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
426 {
427 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
428
429 /* For non-indexed draws, the base vertex set by the driver
430 * (for direct draws) or the CP (for indirect draws) is the
431 * first vertex ID, but GLSL expects 0 to be returned.
432 */
433 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
434 ctx->vs_state_bits);
435 LLVMValueRef indexed;
436
437 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
438 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
439
440 return LLVMBuildSelect(ctx->ac.builder, indexed,
441 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
442 ctx->i32_0, "");
443 }
444
445 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
446 {
447 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
448
449 LLVMValueRef values[3];
450 LLVMValueRef result;
451 unsigned i;
452 unsigned *properties = ctx->shader->selector->info.properties;
453
454 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
455 unsigned sizes[3] = {
456 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
457 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
458 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
459 };
460
461 for (i = 0; i < 3; ++i)
462 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
463
464 result = ac_build_gather_values(&ctx->ac, values, 3);
465 } else {
466 result = ac_get_arg(&ctx->ac, ctx->block_size);
467 }
468
469 return result;
470 }
471
472 void si_declare_compute_memory(struct si_shader_context *ctx)
473 {
474 struct si_shader_selector *sel = ctx->shader->selector;
475 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
476
477 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
478 LLVMValueRef var;
479
480 assert(!ctx->ac.lds);
481
482 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
483 LLVMArrayType(ctx->i8, lds_size),
484 "compute_lds",
485 AC_ADDR_SPACE_LDS);
486 LLVMSetAlignment(var, 64 * 1024);
487
488 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
489 }
490
491 /* Initialize arguments for the shader export intrinsic */
492 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx,
493 LLVMValueRef *values,
494 unsigned target,
495 struct ac_export_args *args)
496 {
497 args->enabled_channels = 0xf; /* writemask - default is 0xf */
498 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
499 args->done = 0; /* Specify whether this is the last export */
500 args->target = target; /* Specify the target we are exporting */
501 args->compr = false;
502
503 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
504 }
505
506 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
507 struct ac_export_args *pos, LLVMValueRef *out_elts)
508 {
509 unsigned reg_index;
510 unsigned chan;
511 unsigned const_chan;
512 LLVMValueRef base_elt;
513 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
514 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
515 SI_VS_CONST_CLIP_PLANES, 0);
516 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
517
518 for (reg_index = 0; reg_index < 2; reg_index ++) {
519 struct ac_export_args *args = &pos[2 + reg_index];
520
521 args->out[0] =
522 args->out[1] =
523 args->out[2] =
524 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
525
526 /* Compute dot products of position and user clip plane vectors */
527 for (chan = 0; chan < 4; chan++) {
528 for (const_chan = 0; const_chan < 4; const_chan++) {
529 LLVMValueRef addr =
530 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
531 const_chan) * 4, 0);
532 base_elt = si_buffer_load_const(ctx, const_resource,
533 addr);
534 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
535 out_elts[const_chan], args->out[chan]);
536 }
537 }
538
539 args->enabled_channels = 0xf;
540 args->valid_mask = 0;
541 args->done = 0;
542 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
543 args->compr = 0;
544 }
545 }
546
547 static void si_dump_streamout(struct pipe_stream_output_info *so)
548 {
549 unsigned i;
550
551 if (so->num_outputs)
552 fprintf(stderr, "STREAMOUT\n");
553
554 for (i = 0; i < so->num_outputs; i++) {
555 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
556 so->output[i].start_component;
557 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
558 i, so->output[i].output_buffer,
559 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
560 so->output[i].register_index,
561 mask & 1 ? "x" : "",
562 mask & 2 ? "y" : "",
563 mask & 4 ? "z" : "",
564 mask & 8 ? "w" : "");
565 }
566 }
567
568 void si_emit_streamout_output(struct si_shader_context *ctx,
569 LLVMValueRef const *so_buffers,
570 LLVMValueRef const *so_write_offsets,
571 struct pipe_stream_output *stream_out,
572 struct si_shader_output_values *shader_out)
573 {
574 unsigned buf_idx = stream_out->output_buffer;
575 unsigned start = stream_out->start_component;
576 unsigned num_comps = stream_out->num_components;
577 LLVMValueRef out[4];
578
579 assert(num_comps && num_comps <= 4);
580 if (!num_comps || num_comps > 4)
581 return;
582
583 /* Load the output as int. */
584 for (int j = 0; j < num_comps; j++) {
585 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
586
587 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
588 }
589
590 /* Pack the output. */
591 LLVMValueRef vdata = NULL;
592
593 switch (num_comps) {
594 case 1: /* as i32 */
595 vdata = out[0];
596 break;
597 case 2: /* as v2i32 */
598 case 3: /* as v3i32 */
599 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
600 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
601 break;
602 }
603 /* as v4i32 (aligned to 4) */
604 out[3] = LLVMGetUndef(ctx->i32);
605 /* fall through */
606 case 4: /* as v4i32 */
607 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
608 break;
609 }
610
611 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
612 vdata, num_comps,
613 so_write_offsets[buf_idx],
614 ctx->i32_0,
615 stream_out->dst_offset * 4, ac_glc | ac_slc);
616 }
617
618 /**
619 * Write streamout data to buffers for vertex stream @p stream (different
620 * vertex streams can occur for GS copy shaders).
621 */
622 void si_llvm_emit_streamout(struct si_shader_context *ctx,
623 struct si_shader_output_values *outputs,
624 unsigned noutput, unsigned stream)
625 {
626 struct si_shader_selector *sel = ctx->shader->selector;
627 struct pipe_stream_output_info *so = &sel->so;
628 LLVMBuilderRef builder = ctx->ac.builder;
629 int i;
630
631 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
632 LLVMValueRef so_vtx_count =
633 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
634
635 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
636
637 /* can_emit = tid < so_vtx_count; */
638 LLVMValueRef can_emit =
639 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
640
641 /* Emit the streamout code conditionally. This actually avoids
642 * out-of-bounds buffer access. The hw tells us via the SGPR
643 * (so_vtx_count) which threads are allowed to emit streamout data. */
644 ac_build_ifcc(&ctx->ac, can_emit, 6501);
645 {
646 /* The buffer offset is computed as follows:
647 * ByteOffset = streamout_offset[buffer_id]*4 +
648 * (streamout_write_index + thread_id)*stride[buffer_id] +
649 * attrib_offset
650 */
651
652 LLVMValueRef so_write_index =
653 ac_get_arg(&ctx->ac,
654 ctx->streamout_write_index);
655
656 /* Compute (streamout_write_index + thread_id). */
657 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
658
659 /* Load the descriptor and compute the write offset for each
660 * enabled buffer. */
661 LLVMValueRef so_write_offset[4] = {};
662 LLVMValueRef so_buffers[4];
663 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
664 ctx->rw_buffers);
665
666 for (i = 0; i < 4; i++) {
667 if (!so->stride[i])
668 continue;
669
670 LLVMValueRef offset = LLVMConstInt(ctx->i32,
671 SI_VS_STREAMOUT_BUF0 + i, 0);
672
673 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
674
675 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
676 ctx->streamout_offset[i]);
677 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
678
679 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
680 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
681 so_offset);
682 }
683
684 /* Write streamout data. */
685 for (i = 0; i < so->num_outputs; i++) {
686 unsigned reg = so->output[i].register_index;
687
688 if (reg >= noutput)
689 continue;
690
691 if (stream != so->output[i].stream)
692 continue;
693
694 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
695 &so->output[i], &outputs[reg]);
696 }
697 }
698 ac_build_endif(&ctx->ac, 6501);
699 }
700
701 static void si_export_param(struct si_shader_context *ctx, unsigned index,
702 LLVMValueRef *values)
703 {
704 struct ac_export_args args;
705
706 si_llvm_init_vs_export_args(ctx, values,
707 V_008DFC_SQ_EXP_PARAM + index, &args);
708 ac_build_export(&ctx->ac, &args);
709 }
710
711 static void si_build_param_exports(struct si_shader_context *ctx,
712 struct si_shader_output_values *outputs,
713 unsigned noutput)
714 {
715 struct si_shader *shader = ctx->shader;
716 unsigned param_count = 0;
717
718 for (unsigned i = 0; i < noutput; i++) {
719 unsigned semantic_name = outputs[i].semantic_name;
720 unsigned semantic_index = outputs[i].semantic_index;
721
722 if (outputs[i].vertex_stream[0] != 0 &&
723 outputs[i].vertex_stream[1] != 0 &&
724 outputs[i].vertex_stream[2] != 0 &&
725 outputs[i].vertex_stream[3] != 0)
726 continue;
727
728 switch (semantic_name) {
729 case TGSI_SEMANTIC_LAYER:
730 case TGSI_SEMANTIC_VIEWPORT_INDEX:
731 case TGSI_SEMANTIC_CLIPDIST:
732 case TGSI_SEMANTIC_COLOR:
733 case TGSI_SEMANTIC_BCOLOR:
734 case TGSI_SEMANTIC_PRIMID:
735 case TGSI_SEMANTIC_FOG:
736 case TGSI_SEMANTIC_TEXCOORD:
737 case TGSI_SEMANTIC_GENERIC:
738 break;
739 default:
740 continue;
741 }
742
743 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
744 semantic_index < SI_MAX_IO_GENERIC) &&
745 shader->key.opt.kill_outputs &
746 (1ull << si_shader_io_get_unique_index(semantic_name,
747 semantic_index, true)))
748 continue;
749
750 si_export_param(ctx, param_count, outputs[i].values);
751
752 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
753 shader->info.vs_output_param_offset[i] = param_count++;
754 }
755
756 shader->info.nr_param_exports = param_count;
757 }
758
759 /**
760 * Vertex color clamping.
761 *
762 * This uses a state constant loaded in a user data SGPR and
763 * an IF statement is added that clamps all colors if the constant
764 * is true.
765 */
766 static void si_vertex_color_clamping(struct si_shader_context *ctx,
767 struct si_shader_output_values *outputs,
768 unsigned noutput)
769 {
770 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
771 bool has_colors = false;
772
773 /* Store original colors to alloca variables. */
774 for (unsigned i = 0; i < noutput; i++) {
775 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
776 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
777 continue;
778
779 for (unsigned j = 0; j < 4; j++) {
780 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->f32, "");
781 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
782 }
783 has_colors = true;
784 }
785
786 if (!has_colors)
787 return;
788
789 /* The state is in the first bit of the user SGPR. */
790 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
791 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
792
793 ac_build_ifcc(&ctx->ac, cond, 6502);
794
795 /* Store clamped colors to alloca variables within the conditional block. */
796 for (unsigned i = 0; i < noutput; i++) {
797 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
798 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
799 continue;
800
801 for (unsigned j = 0; j < 4; j++) {
802 LLVMBuildStore(ctx->ac.builder,
803 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
804 addr[i][j]);
805 }
806 }
807 ac_build_endif(&ctx->ac, 6502);
808
809 /* Load clamped colors */
810 for (unsigned i = 0; i < noutput; i++) {
811 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
812 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
813 continue;
814
815 for (unsigned j = 0; j < 4; j++) {
816 outputs[i].values[j] =
817 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
818 }
819 }
820 }
821
822 /* Generate export instructions for hardware VS shader stage or NGG GS stage
823 * (position and parameter data only).
824 */
825 void si_llvm_export_vs(struct si_shader_context *ctx,
826 struct si_shader_output_values *outputs,
827 unsigned noutput)
828 {
829 struct si_shader *shader = ctx->shader;
830 struct ac_export_args pos_args[4] = {};
831 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
832 unsigned pos_idx;
833 int i;
834
835 si_vertex_color_clamping(ctx, outputs, noutput);
836
837 /* Build position exports. */
838 for (i = 0; i < noutput; i++) {
839 switch (outputs[i].semantic_name) {
840 case TGSI_SEMANTIC_POSITION:
841 si_llvm_init_vs_export_args(ctx, outputs[i].values,
842 V_008DFC_SQ_EXP_POS, &pos_args[0]);
843 break;
844 case TGSI_SEMANTIC_PSIZE:
845 psize_value = outputs[i].values[0];
846 break;
847 case TGSI_SEMANTIC_LAYER:
848 layer_value = outputs[i].values[0];
849 break;
850 case TGSI_SEMANTIC_VIEWPORT_INDEX:
851 viewport_index_value = outputs[i].values[0];
852 break;
853 case TGSI_SEMANTIC_EDGEFLAG:
854 edgeflag_value = outputs[i].values[0];
855 break;
856 case TGSI_SEMANTIC_CLIPDIST:
857 if (!shader->key.opt.clip_disable) {
858 unsigned index = 2 + outputs[i].semantic_index;
859 si_llvm_init_vs_export_args(ctx, outputs[i].values,
860 V_008DFC_SQ_EXP_POS + index,
861 &pos_args[index]);
862 }
863 break;
864 case TGSI_SEMANTIC_CLIPVERTEX:
865 if (!shader->key.opt.clip_disable) {
866 si_llvm_emit_clipvertex(ctx, pos_args,
867 outputs[i].values);
868 }
869 break;
870 }
871 }
872
873 /* We need to add the position output manually if it's missing. */
874 if (!pos_args[0].out[0]) {
875 pos_args[0].enabled_channels = 0xf; /* writemask */
876 pos_args[0].valid_mask = 0; /* EXEC mask */
877 pos_args[0].done = 0; /* last export? */
878 pos_args[0].target = V_008DFC_SQ_EXP_POS;
879 pos_args[0].compr = 0; /* COMPR flag */
880 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
881 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
882 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
883 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
884 }
885
886 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
887 !shader->key.as_ngg;
888
889 /* Write the misc vector (point size, edgeflag, layer, viewport). */
890 if (shader->selector->info.writes_psize ||
891 pos_writes_edgeflag ||
892 shader->selector->info.writes_viewport_index ||
893 shader->selector->info.writes_layer) {
894 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
895 (pos_writes_edgeflag << 1) |
896 (shader->selector->info.writes_layer << 2);
897
898 pos_args[1].valid_mask = 0; /* EXEC mask */
899 pos_args[1].done = 0; /* last export? */
900 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
901 pos_args[1].compr = 0; /* COMPR flag */
902 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
903 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
904 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
905 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
906
907 if (shader->selector->info.writes_psize)
908 pos_args[1].out[0] = psize_value;
909
910 if (pos_writes_edgeflag) {
911 /* The output is a float, but the hw expects an integer
912 * with the first bit containing the edge flag. */
913 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
914 edgeflag_value,
915 ctx->i32, "");
916 edgeflag_value = ac_build_umin(&ctx->ac,
917 edgeflag_value,
918 ctx->i32_1);
919
920 /* The LLVM intrinsic expects a float. */
921 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
922 }
923
924 if (ctx->screen->info.chip_class >= GFX9) {
925 /* GFX9 has the layer in out.z[10:0] and the viewport
926 * index in out.z[19:16].
927 */
928 if (shader->selector->info.writes_layer)
929 pos_args[1].out[2] = layer_value;
930
931 if (shader->selector->info.writes_viewport_index) {
932 LLVMValueRef v = viewport_index_value;
933
934 v = ac_to_integer(&ctx->ac, v);
935 v = LLVMBuildShl(ctx->ac.builder, v,
936 LLVMConstInt(ctx->i32, 16, 0), "");
937 v = LLVMBuildOr(ctx->ac.builder, v,
938 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
939 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
940 pos_args[1].enabled_channels |= 1 << 2;
941 }
942 } else {
943 if (shader->selector->info.writes_layer)
944 pos_args[1].out[2] = layer_value;
945
946 if (shader->selector->info.writes_viewport_index) {
947 pos_args[1].out[3] = viewport_index_value;
948 pos_args[1].enabled_channels |= 1 << 3;
949 }
950 }
951 }
952
953 for (i = 0; i < 4; i++)
954 if (pos_args[i].out[0])
955 shader->info.nr_pos_exports++;
956
957 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
958 * Setting valid_mask=1 prevents it and has no other effect.
959 */
960 if (ctx->screen->info.family == CHIP_NAVI10 ||
961 ctx->screen->info.family == CHIP_NAVI12 ||
962 ctx->screen->info.family == CHIP_NAVI14)
963 pos_args[0].valid_mask = 1;
964
965 pos_idx = 0;
966 for (i = 0; i < 4; i++) {
967 if (!pos_args[i].out[0])
968 continue;
969
970 /* Specify the target we are exporting */
971 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
972
973 if (pos_idx == shader->info.nr_pos_exports)
974 /* Specify that this is the last export */
975 pos_args[i].done = 1;
976
977 ac_build_export(&ctx->ac, &pos_args[i]);
978 }
979
980 /* Build parameter exports. */
981 si_build_param_exports(ctx, outputs, noutput);
982 }
983
984 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
985 unsigned max_outputs,
986 LLVMValueRef *addrs)
987 {
988 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
989 struct si_shader_info *info = &ctx->shader->selector->info;
990 struct si_shader_output_values *outputs = NULL;
991 int i,j;
992
993 assert(!ctx->shader->is_gs_copy_shader);
994 assert(info->num_outputs <= max_outputs);
995
996 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
997
998 for (i = 0; i < info->num_outputs; i++) {
999 outputs[i].semantic_name = info->output_semantic_name[i];
1000 outputs[i].semantic_index = info->output_semantic_index[i];
1001
1002 for (j = 0; j < 4; j++) {
1003 outputs[i].values[j] =
1004 LLVMBuildLoad(ctx->ac.builder,
1005 addrs[4 * i + j],
1006 "");
1007 outputs[i].vertex_stream[j] =
1008 (info->output_streams[i] >> (2 * j)) & 3;
1009 }
1010 }
1011
1012 if (!ctx->screen->use_ngg_streamout &&
1013 ctx->shader->selector->so.num_outputs)
1014 si_llvm_emit_streamout(ctx, outputs, i, 0);
1015
1016 /* Export PrimitiveID. */
1017 if (ctx->shader->key.mono.u.vs_export_prim_id) {
1018 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
1019 outputs[i].semantic_index = 0;
1020 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
1021 for (j = 1; j < 4; j++)
1022 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
1023
1024 memset(outputs[i].vertex_stream, 0,
1025 sizeof(outputs[i].vertex_stream));
1026 i++;
1027 }
1028
1029 si_llvm_export_vs(ctx, outputs, i);
1030 FREE(outputs);
1031 }
1032
1033 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
1034 unsigned max_outputs,
1035 LLVMValueRef *addrs)
1036 {
1037 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1038 struct si_shader_info *info = &ctx->shader->selector->info;
1039 LLVMValueRef pos[4] = {};
1040
1041 assert(info->num_outputs <= max_outputs);
1042
1043 for (unsigned i = 0; i < info->num_outputs; i++) {
1044 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
1045 continue;
1046
1047 for (unsigned chan = 0; chan < 4; chan++)
1048 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
1049 break;
1050 }
1051 assert(pos[0] != NULL);
1052
1053 /* Return the position output. */
1054 LLVMValueRef ret = ctx->return_value;
1055 for (unsigned chan = 0; chan < 4; chan++)
1056 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
1057 ctx->return_value = ret;
1058 }
1059
1060 static void declare_streamout_params(struct si_shader_context *ctx,
1061 struct pipe_stream_output_info *so)
1062 {
1063 if (ctx->screen->use_ngg_streamout) {
1064 if (ctx->type == PIPE_SHADER_TESS_EVAL)
1065 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1066 return;
1067 }
1068
1069 /* Streamout SGPRs. */
1070 if (so->num_outputs) {
1071 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
1072 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
1073 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
1074 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1075 }
1076
1077 /* A streamout buffer offset is loaded if the stride is non-zero. */
1078 for (int i = 0; i < 4; i++) {
1079 if (!so->stride[i])
1080 continue;
1081
1082 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
1083 }
1084 }
1085
1086 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
1087 {
1088 switch (shader->selector->type) {
1089 case PIPE_SHADER_VERTEX:
1090 case PIPE_SHADER_TESS_EVAL:
1091 return shader->key.as_ngg ? 128 : 0;
1092
1093 case PIPE_SHADER_TESS_CTRL:
1094 /* Return this so that LLVM doesn't remove s_barrier
1095 * instructions on chips where we use s_barrier. */
1096 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
1097
1098 case PIPE_SHADER_GEOMETRY:
1099 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
1100
1101 case PIPE_SHADER_COMPUTE:
1102 break; /* see below */
1103
1104 default:
1105 return 0;
1106 }
1107
1108 const unsigned *properties = shader->selector->info.properties;
1109 unsigned max_work_group_size =
1110 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
1111 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
1112 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
1113
1114 if (!max_work_group_size) {
1115 /* This is a variable group size compute shader,
1116 * compile it for the maximum possible group size.
1117 */
1118 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
1119 }
1120 return max_work_group_size;
1121 }
1122
1123 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
1124 bool assign_params)
1125 {
1126 enum ac_arg_type const_shader_buf_type;
1127
1128 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
1129 ctx->shader->selector->info.shader_buffers_declared == 0)
1130 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
1131 else
1132 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
1133
1134 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
1135 assign_params ? &ctx->const_and_shader_buffers :
1136 &ctx->other_const_and_shader_buffers);
1137 }
1138
1139 static void declare_samplers_and_images(struct si_shader_context *ctx,
1140 bool assign_params)
1141 {
1142 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
1143 assign_params ? &ctx->samplers_and_images :
1144 &ctx->other_samplers_and_images);
1145 }
1146
1147 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
1148 bool assign_params)
1149 {
1150 declare_const_and_shader_buffers(ctx, assign_params);
1151 declare_samplers_and_images(ctx, assign_params);
1152 }
1153
1154 static void declare_global_desc_pointers(struct si_shader_context *ctx)
1155 {
1156 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
1157 &ctx->rw_buffers);
1158 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
1159 &ctx->bindless_samplers_and_images);
1160 }
1161
1162 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
1163 {
1164 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1165 if (!ctx->shader->is_gs_copy_shader) {
1166 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
1167 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
1168 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
1169 }
1170 }
1171
1172 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
1173 {
1174 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
1175
1176 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
1177 if (num_vbos_in_user_sgprs) {
1178 unsigned user_sgprs = ctx->args.num_sgprs_used;
1179
1180 if (si_is_merged_shader(ctx))
1181 user_sgprs -= 8;
1182 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
1183
1184 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
1185 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
1186 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1187
1188 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
1189 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
1190 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
1191 }
1192 }
1193
1194 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
1195 unsigned *num_prolog_vgprs)
1196 {
1197 struct si_shader *shader = ctx->shader;
1198
1199 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
1200 if (shader->key.as_ls) {
1201 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
1202 if (ctx->screen->info.chip_class >= GFX10) {
1203 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
1204 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1205 } else {
1206 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1207 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
1208 }
1209 } else if (ctx->screen->info.chip_class >= GFX10) {
1210 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
1211 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1212 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
1213 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1214 } else {
1215 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
1216 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
1217 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
1218 }
1219
1220 if (!shader->is_gs_copy_shader) {
1221 /* Vertex load indices. */
1222 if (shader->selector->info.num_inputs) {
1223 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1224 &ctx->vertex_index0);
1225 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
1226 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
1227 }
1228 *num_prolog_vgprs += shader->selector->info.num_inputs;
1229 }
1230 }
1231
1232 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
1233 unsigned vs_blit_property)
1234 {
1235 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1236 &ctx->vs_blit_inputs); /* i16 x1, y1 */
1237 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
1238 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
1239
1240 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
1241 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
1242 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
1243 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
1244 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
1245 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
1246 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
1247 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
1248 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
1249 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
1250 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
1251 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
1252 }
1253 }
1254
1255 static void declare_tes_input_vgprs(struct si_shader_context *ctx)
1256 {
1257 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
1258 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
1259 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
1260 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
1261 }
1262
1263 enum {
1264 /* Convenient merged shader definitions. */
1265 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
1266 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
1267 };
1268
1269 void si_add_arg_checked(struct ac_shader_args *args,
1270 enum ac_arg_regfile file,
1271 unsigned registers, enum ac_arg_type type,
1272 struct ac_arg *arg,
1273 unsigned idx)
1274 {
1275 assert(args->arg_count == idx);
1276 ac_add_arg(args, file, registers, type, arg);
1277 }
1278
1279 void si_create_function(struct si_shader_context *ctx)
1280 {
1281 struct si_shader *shader = ctx->shader;
1282 LLVMTypeRef returns[AC_MAX_ARGS];
1283 unsigned i, num_return_sgprs;
1284 unsigned num_returns = 0;
1285 unsigned num_prolog_vgprs = 0;
1286 unsigned type = ctx->type;
1287 unsigned vs_blit_property =
1288 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
1289
1290 memset(&ctx->args, 0, sizeof(ctx->args));
1291
1292 /* Set MERGED shaders. */
1293 if (ctx->screen->info.chip_class >= GFX9) {
1294 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
1295 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
1296 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
1297 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
1298 }
1299
1300 switch (type) {
1301 case PIPE_SHADER_VERTEX:
1302 declare_global_desc_pointers(ctx);
1303
1304 if (vs_blit_property) {
1305 declare_vs_blit_inputs(ctx, vs_blit_property);
1306
1307 /* VGPRs */
1308 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1309 break;
1310 }
1311
1312 declare_per_stage_desc_pointers(ctx, true);
1313 declare_vs_specific_input_sgprs(ctx);
1314 if (!shader->is_gs_copy_shader)
1315 declare_vb_descriptor_input_sgprs(ctx);
1316
1317 if (shader->key.as_es) {
1318 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1319 &ctx->es2gs_offset);
1320 } else if (shader->key.as_ls) {
1321 /* no extra parameters */
1322 } else {
1323 /* The locations of the other parameters are assigned dynamically. */
1324 declare_streamout_params(ctx, &shader->selector->so);
1325 }
1326
1327 /* VGPRs */
1328 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1329
1330 /* Return values */
1331 if (shader->key.opt.vs_as_prim_discard_cs) {
1332 for (i = 0; i < 4; i++)
1333 returns[num_returns++] = ctx->f32; /* VGPRs */
1334 }
1335 break;
1336
1337 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
1338 declare_global_desc_pointers(ctx);
1339 declare_per_stage_desc_pointers(ctx, true);
1340 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1341 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
1342 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1343 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1344 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1345 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1346
1347 /* VGPRs */
1348 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
1349 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
1350
1351 /* param_tcs_offchip_offset and param_tcs_factor_offset are
1352 * placed after the user SGPRs.
1353 */
1354 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
1355 returns[num_returns++] = ctx->i32; /* SGPRs */
1356 for (i = 0; i < 11; i++)
1357 returns[num_returns++] = ctx->f32; /* VGPRs */
1358 break;
1359
1360 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
1361 /* Merged stages have 8 system SGPRs at the beginning. */
1362 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
1363 declare_per_stage_desc_pointers(ctx,
1364 ctx->type == PIPE_SHADER_TESS_CTRL);
1365 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1366 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
1367 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
1368 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
1369 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1370 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
1371
1372 declare_global_desc_pointers(ctx);
1373 declare_per_stage_desc_pointers(ctx,
1374 ctx->type == PIPE_SHADER_VERTEX);
1375 declare_vs_specific_input_sgprs(ctx);
1376
1377 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1378 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
1379 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
1380 declare_vb_descriptor_input_sgprs(ctx);
1381
1382 /* VGPRs (first TCS, then VS) */
1383 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
1384 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
1385
1386 if (ctx->type == PIPE_SHADER_VERTEX) {
1387 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1388
1389 /* LS return values are inputs to the TCS main shader part. */
1390 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
1391 returns[num_returns++] = ctx->i32; /* SGPRs */
1392 for (i = 0; i < 2; i++)
1393 returns[num_returns++] = ctx->f32; /* VGPRs */
1394 } else {
1395 /* TCS return values are inputs to the TCS epilog.
1396 *
1397 * param_tcs_offchip_offset, param_tcs_factor_offset,
1398 * param_tcs_offchip_layout, and param_rw_buffers
1399 * should be passed to the epilog.
1400 */
1401 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
1402 returns[num_returns++] = ctx->i32; /* SGPRs */
1403 for (i = 0; i < 11; i++)
1404 returns[num_returns++] = ctx->f32; /* VGPRs */
1405 }
1406 break;
1407
1408 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
1409 /* Merged stages have 8 system SGPRs at the beginning. */
1410 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
1411 declare_per_stage_desc_pointers(ctx,
1412 ctx->type == PIPE_SHADER_GEOMETRY);
1413
1414 if (ctx->shader->key.as_ngg)
1415 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
1416 else
1417 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
1418
1419 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
1420 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1421 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
1422 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
1423 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
1424
1425 declare_global_desc_pointers(ctx);
1426 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
1427 declare_per_stage_desc_pointers(ctx,
1428 (ctx->type == PIPE_SHADER_VERTEX ||
1429 ctx->type == PIPE_SHADER_TESS_EVAL));
1430 }
1431
1432 if (ctx->type == PIPE_SHADER_VERTEX) {
1433 if (vs_blit_property)
1434 declare_vs_blit_inputs(ctx, vs_blit_property);
1435 else
1436 declare_vs_specific_input_sgprs(ctx);
1437 } else {
1438 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1439 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1440 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
1441 /* Declare as many input SGPRs as the VS has. */
1442 }
1443
1444 if (ctx->type == PIPE_SHADER_VERTEX)
1445 declare_vb_descriptor_input_sgprs(ctx);
1446
1447 /* VGPRs (first GS, then VS/TES) */
1448 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
1449 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
1450 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
1451 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
1452 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
1453
1454 if (ctx->type == PIPE_SHADER_VERTEX) {
1455 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
1456 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
1457 declare_tes_input_vgprs(ctx);
1458 }
1459
1460 if (ctx->shader->key.as_es &&
1461 (ctx->type == PIPE_SHADER_VERTEX ||
1462 ctx->type == PIPE_SHADER_TESS_EVAL)) {
1463 unsigned num_user_sgprs;
1464
1465 if (ctx->type == PIPE_SHADER_VERTEX)
1466 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
1467 else
1468 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
1469
1470 /* ES return values are inputs to GS. */
1471 for (i = 0; i < 8 + num_user_sgprs; i++)
1472 returns[num_returns++] = ctx->i32; /* SGPRs */
1473 for (i = 0; i < 5; i++)
1474 returns[num_returns++] = ctx->f32; /* VGPRs */
1475 }
1476 break;
1477
1478 case PIPE_SHADER_TESS_EVAL:
1479 declare_global_desc_pointers(ctx);
1480 declare_per_stage_desc_pointers(ctx, true);
1481 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
1482 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
1483 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
1484
1485 if (shader->key.as_es) {
1486 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1487 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
1488 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
1489 } else {
1490 declare_streamout_params(ctx, &shader->selector->so);
1491 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
1492 }
1493
1494 /* VGPRs */
1495 declare_tes_input_vgprs(ctx);
1496 break;
1497
1498 case PIPE_SHADER_GEOMETRY:
1499 declare_global_desc_pointers(ctx);
1500 declare_per_stage_desc_pointers(ctx, true);
1501 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
1502 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
1503
1504 /* VGPRs */
1505 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
1506 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
1507 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
1508 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
1509 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
1510 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
1511 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
1512 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
1513 break;
1514
1515 case PIPE_SHADER_FRAGMENT:
1516 declare_global_desc_pointers(ctx);
1517 declare_per_stage_desc_pointers(ctx, true);
1518 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
1519 SI_PARAM_ALPHA_REF);
1520 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1521 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
1522
1523 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
1524 SI_PARAM_PERSP_SAMPLE);
1525 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1526 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
1527 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1528 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
1529 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
1530 NULL, SI_PARAM_PERSP_PULL_MODEL);
1531 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1532 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
1533 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1534 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
1535 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
1536 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
1537 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
1538 NULL, SI_PARAM_LINE_STIPPLE_TEX);
1539 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1540 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
1541 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1542 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
1543 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1544 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
1545 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1546 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
1547 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
1548 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1549 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
1550 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
1551 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1552 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
1553 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
1554 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
1555 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
1556 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
1557
1558 /* Color inputs from the prolog. */
1559 if (shader->selector->info.colors_read) {
1560 unsigned num_color_elements =
1561 util_bitcount(shader->selector->info.colors_read);
1562
1563 for (i = 0; i < num_color_elements; i++)
1564 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
1565
1566 num_prolog_vgprs += num_color_elements;
1567 }
1568
1569 /* Outputs for the epilog. */
1570 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
1571 num_returns =
1572 num_return_sgprs +
1573 util_bitcount(shader->selector->info.colors_written) * 4 +
1574 shader->selector->info.writes_z +
1575 shader->selector->info.writes_stencil +
1576 shader->selector->info.writes_samplemask +
1577 1 /* SampleMaskIn */;
1578
1579 num_returns = MAX2(num_returns,
1580 num_return_sgprs +
1581 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
1582
1583 for (i = 0; i < num_return_sgprs; i++)
1584 returns[i] = ctx->i32;
1585 for (; i < num_returns; i++)
1586 returns[i] = ctx->f32;
1587 break;
1588
1589 case PIPE_SHADER_COMPUTE:
1590 declare_global_desc_pointers(ctx);
1591 declare_per_stage_desc_pointers(ctx, true);
1592 if (shader->selector->info.uses_grid_size)
1593 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
1594 &ctx->args.num_work_groups);
1595 if (shader->selector->info.uses_block_size &&
1596 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
1597 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
1598
1599 unsigned cs_user_data_dwords =
1600 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
1601 if (cs_user_data_dwords) {
1602 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
1603 &ctx->cs_user_data);
1604 }
1605
1606 /* Hardware SGPRs. */
1607 for (i = 0; i < 3; i++) {
1608 if (shader->selector->info.uses_block_id[i]) {
1609 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
1610 &ctx->args.workgroup_ids[i]);
1611 }
1612 }
1613 if (shader->selector->info.uses_subgroup_info)
1614 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
1615
1616 /* Hardware VGPRs. */
1617 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
1618 &ctx->args.local_invocation_ids);
1619 break;
1620 default:
1621 assert(0 && "unimplemented shader");
1622 return;
1623 }
1624
1625 si_llvm_create_func(ctx, "main", returns, num_returns,
1626 si_get_max_workgroup_size(shader));
1627
1628 /* Reserve register locations for VGPR inputs the PS prolog may need. */
1629 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
1630 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
1631 "InitialPSInputAddr",
1632 S_0286D0_PERSP_SAMPLE_ENA(1) |
1633 S_0286D0_PERSP_CENTER_ENA(1) |
1634 S_0286D0_PERSP_CENTROID_ENA(1) |
1635 S_0286D0_LINEAR_SAMPLE_ENA(1) |
1636 S_0286D0_LINEAR_CENTER_ENA(1) |
1637 S_0286D0_LINEAR_CENTROID_ENA(1) |
1638 S_0286D0_FRONT_FACE_ENA(1) |
1639 S_0286D0_ANCILLARY_ENA(1) |
1640 S_0286D0_POS_FIXED_PT_ENA(1));
1641 }
1642
1643 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
1644 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
1645
1646 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
1647 shader->info.num_input_vgprs -= num_prolog_vgprs;
1648
1649 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
1650 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
1651 /* The LSHS size is not known until draw time, so we append it
1652 * at the end of whatever LDS use there may be in the rest of
1653 * the shader (currently none, unless LLVM decides to do its
1654 * own LDS-based lowering).
1655 */
1656 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
1657 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
1658 "__lds_end", AC_ADDR_SPACE_LDS);
1659 LLVMSetAlignment(ctx->ac.lds, 256);
1660 } else {
1661 ac_declare_lds_as_pointer(&ctx->ac);
1662 }
1663 }
1664
1665 /* Unlike radv, we override these arguments in the prolog, so to the
1666 * API shader they appear as normal arguments.
1667 */
1668 if (ctx->type == PIPE_SHADER_VERTEX) {
1669 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
1670 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
1671 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
1672 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
1673 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
1674 }
1675 }
1676
1677 /* For the UMR disassembler. */
1678 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
1679 #define DEBUGGER_NUM_MARKERS 5
1680
1681 static bool si_shader_binary_open(struct si_screen *screen,
1682 struct si_shader *shader,
1683 struct ac_rtld_binary *rtld)
1684 {
1685 const struct si_shader_selector *sel = shader->selector;
1686 const char *part_elfs[5];
1687 size_t part_sizes[5];
1688 unsigned num_parts = 0;
1689
1690 #define add_part(shader_or_part) \
1691 if (shader_or_part) { \
1692 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
1693 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
1694 num_parts++; \
1695 }
1696
1697 add_part(shader->prolog);
1698 add_part(shader->previous_stage);
1699 add_part(shader->prolog2);
1700 add_part(shader);
1701 add_part(shader->epilog);
1702
1703 #undef add_part
1704
1705 struct ac_rtld_symbol lds_symbols[2];
1706 unsigned num_lds_symbols = 0;
1707
1708 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
1709 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
1710 /* We add this symbol even on LLVM <= 8 to ensure that
1711 * shader->config.lds_size is set correctly below.
1712 */
1713 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1714 sym->name = "esgs_ring";
1715 sym->size = shader->gs_info.esgs_ring_size;
1716 sym->align = 64 * 1024;
1717 }
1718
1719 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
1720 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1721 sym->name = "ngg_emit";
1722 sym->size = shader->ngg.ngg_emit_size * 4;
1723 sym->align = 4;
1724 }
1725
1726 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
1727 .info = &screen->info,
1728 .options = {
1729 .halt_at_entry = screen->options.halt_shaders,
1730 },
1731 .shader_type = tgsi_processor_to_shader_stage(sel->type),
1732 .wave_size = si_get_shader_wave_size(shader),
1733 .num_parts = num_parts,
1734 .elf_ptrs = part_elfs,
1735 .elf_sizes = part_sizes,
1736 .num_shared_lds_symbols = num_lds_symbols,
1737 .shared_lds_symbols = lds_symbols });
1738
1739 if (rtld->lds_size > 0) {
1740 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
1741 shader->config.lds_size =
1742 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
1743 }
1744
1745 return ok;
1746 }
1747
1748 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
1749 {
1750 struct ac_rtld_binary rtld;
1751 si_shader_binary_open(screen, shader, &rtld);
1752 return rtld.exec_size;
1753 }
1754
1755 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
1756 {
1757 uint64_t *scratch_va = data;
1758
1759 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
1760 *value = (uint32_t)*scratch_va;
1761 return true;
1762 }
1763 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
1764 /* Enable scratch coalescing. */
1765 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
1766 S_008F04_SWIZZLE_ENABLE(1);
1767 return true;
1768 }
1769
1770 return false;
1771 }
1772
1773 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
1774 uint64_t scratch_va)
1775 {
1776 struct ac_rtld_binary binary;
1777 if (!si_shader_binary_open(sscreen, shader, &binary))
1778 return false;
1779
1780 si_resource_reference(&shader->bo, NULL);
1781 shader->bo = si_aligned_buffer_create(&sscreen->b,
1782 sscreen->info.cpdma_prefetch_writes_memory ?
1783 0 : SI_RESOURCE_FLAG_READ_ONLY,
1784 PIPE_USAGE_IMMUTABLE,
1785 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
1786 256);
1787 if (!shader->bo)
1788 return false;
1789
1790 /* Upload. */
1791 struct ac_rtld_upload_info u = {};
1792 u.binary = &binary;
1793 u.get_external_symbol = si_get_external_symbol;
1794 u.cb_data = &scratch_va;
1795 u.rx_va = shader->bo->gpu_address;
1796 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
1797 PIPE_TRANSFER_READ_WRITE |
1798 PIPE_TRANSFER_UNSYNCHRONIZED |
1799 RADEON_TRANSFER_TEMPORARY);
1800 if (!u.rx_ptr)
1801 return false;
1802
1803 bool ok = ac_rtld_upload(&u);
1804
1805 sscreen->ws->buffer_unmap(shader->bo->buf);
1806 ac_rtld_close(&binary);
1807
1808 return ok;
1809 }
1810
1811 static void si_shader_dump_disassembly(struct si_screen *screen,
1812 const struct si_shader_binary *binary,
1813 enum pipe_shader_type shader_type,
1814 unsigned wave_size,
1815 struct pipe_debug_callback *debug,
1816 const char *name, FILE *file)
1817 {
1818 struct ac_rtld_binary rtld_binary;
1819
1820 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
1821 .info = &screen->info,
1822 .shader_type = tgsi_processor_to_shader_stage(shader_type),
1823 .wave_size = wave_size,
1824 .num_parts = 1,
1825 .elf_ptrs = &binary->elf_buffer,
1826 .elf_sizes = &binary->elf_size }))
1827 return;
1828
1829 const char *disasm;
1830 size_t nbytes;
1831
1832 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
1833 goto out;
1834
1835 if (nbytes > INT_MAX)
1836 goto out;
1837
1838 if (debug && debug->debug_message) {
1839 /* Very long debug messages are cut off, so send the
1840 * disassembly one line at a time. This causes more
1841 * overhead, but on the plus side it simplifies
1842 * parsing of resulting logs.
1843 */
1844 pipe_debug_message(debug, SHADER_INFO,
1845 "Shader Disassembly Begin");
1846
1847 uint64_t line = 0;
1848 while (line < nbytes) {
1849 int count = nbytes - line;
1850 const char *nl = memchr(disasm + line, '\n', nbytes - line);
1851 if (nl)
1852 count = nl - (disasm + line);
1853
1854 if (count) {
1855 pipe_debug_message(debug, SHADER_INFO,
1856 "%.*s", count, disasm + line);
1857 }
1858
1859 line += count + 1;
1860 }
1861
1862 pipe_debug_message(debug, SHADER_INFO,
1863 "Shader Disassembly End");
1864 }
1865
1866 if (file) {
1867 fprintf(file, "Shader %s disassembly:\n", name);
1868 fprintf(file, "%*s", (int)nbytes, disasm);
1869 }
1870
1871 out:
1872 ac_rtld_close(&rtld_binary);
1873 }
1874
1875 static void si_calculate_max_simd_waves(struct si_shader *shader)
1876 {
1877 struct si_screen *sscreen = shader->selector->screen;
1878 struct ac_shader_config *conf = &shader->config;
1879 unsigned num_inputs = shader->selector->info.num_inputs;
1880 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
1881 unsigned lds_per_wave = 0;
1882 unsigned max_simd_waves;
1883
1884 max_simd_waves = sscreen->info.max_wave64_per_simd;
1885
1886 /* Compute LDS usage for PS. */
1887 switch (shader->selector->type) {
1888 case PIPE_SHADER_FRAGMENT:
1889 /* The minimum usage per wave is (num_inputs * 48). The maximum
1890 * usage is (num_inputs * 48 * 16).
1891 * We can get anything in between and it varies between waves.
1892 *
1893 * The 48 bytes per input for a single primitive is equal to
1894 * 4 bytes/component * 4 components/input * 3 points.
1895 *
1896 * Other stages don't know the size at compile time or don't
1897 * allocate LDS per wave, but instead they do it per thread group.
1898 */
1899 lds_per_wave = conf->lds_size * lds_increment +
1900 align(num_inputs * 48, lds_increment);
1901 break;
1902 case PIPE_SHADER_COMPUTE:
1903 if (shader->selector) {
1904 unsigned max_workgroup_size =
1905 si_get_max_workgroup_size(shader);
1906 lds_per_wave = (conf->lds_size * lds_increment) /
1907 DIV_ROUND_UP(max_workgroup_size,
1908 sscreen->compute_wave_size);
1909 }
1910 break;
1911 default:;
1912 }
1913
1914 /* Compute the per-SIMD wave counts. */
1915 if (conf->num_sgprs) {
1916 max_simd_waves =
1917 MIN2(max_simd_waves,
1918 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
1919 }
1920
1921 if (conf->num_vgprs) {
1922 /* Always print wave limits as Wave64, so that we can compare
1923 * Wave32 and Wave64 with shader-db fairly. */
1924 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
1925 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
1926 }
1927
1928 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
1929 * 16KB makes some SIMDs unoccupied).
1930 *
1931 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
1932 */
1933 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
1934 unsigned max_lds_per_simd = max_lds_size / 4;
1935 if (lds_per_wave)
1936 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1937
1938 shader->info.max_simd_waves = max_simd_waves;
1939 }
1940
1941 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
1942 struct si_shader *shader,
1943 struct pipe_debug_callback *debug)
1944 {
1945 const struct ac_shader_config *conf = &shader->config;
1946
1947 if (screen->options.debug_disassembly)
1948 si_shader_dump_disassembly(screen, &shader->binary,
1949 shader->selector->type,
1950 si_get_shader_wave_size(shader),
1951 debug, "main", NULL);
1952
1953 pipe_debug_message(debug, SHADER_INFO,
1954 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
1955 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
1956 "Spilled VGPRs: %d PrivMem VGPRs: %d",
1957 conf->num_sgprs, conf->num_vgprs,
1958 si_get_shader_binary_size(screen, shader),
1959 conf->lds_size, conf->scratch_bytes_per_wave,
1960 shader->info.max_simd_waves, conf->spilled_sgprs,
1961 conf->spilled_vgprs, shader->info.private_mem_vgprs);
1962 }
1963
1964 static void si_shader_dump_stats(struct si_screen *sscreen,
1965 struct si_shader *shader,
1966 FILE *file,
1967 bool check_debug_option)
1968 {
1969 const struct ac_shader_config *conf = &shader->config;
1970
1971 if (!check_debug_option ||
1972 si_can_dump_shader(sscreen, shader->selector->type)) {
1973 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
1974 fprintf(file, "*** SHADER CONFIG ***\n"
1975 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1976 "SPI_PS_INPUT_ENA = 0x%04x\n",
1977 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1978 }
1979
1980 fprintf(file, "*** SHADER STATS ***\n"
1981 "SGPRS: %d\n"
1982 "VGPRS: %d\n"
1983 "Spilled SGPRs: %d\n"
1984 "Spilled VGPRs: %d\n"
1985 "Private memory VGPRs: %d\n"
1986 "Code Size: %d bytes\n"
1987 "LDS: %d blocks\n"
1988 "Scratch: %d bytes per wave\n"
1989 "Max Waves: %d\n"
1990 "********************\n\n\n",
1991 conf->num_sgprs, conf->num_vgprs,
1992 conf->spilled_sgprs, conf->spilled_vgprs,
1993 shader->info.private_mem_vgprs,
1994 si_get_shader_binary_size(sscreen, shader),
1995 conf->lds_size, conf->scratch_bytes_per_wave,
1996 shader->info.max_simd_waves);
1997 }
1998 }
1999
2000 const char *si_get_shader_name(const struct si_shader *shader)
2001 {
2002 switch (shader->selector->type) {
2003 case PIPE_SHADER_VERTEX:
2004 if (shader->key.as_es)
2005 return "Vertex Shader as ES";
2006 else if (shader->key.as_ls)
2007 return "Vertex Shader as LS";
2008 else if (shader->key.opt.vs_as_prim_discard_cs)
2009 return "Vertex Shader as Primitive Discard CS";
2010 else if (shader->key.as_ngg)
2011 return "Vertex Shader as ESGS";
2012 else
2013 return "Vertex Shader as VS";
2014 case PIPE_SHADER_TESS_CTRL:
2015 return "Tessellation Control Shader";
2016 case PIPE_SHADER_TESS_EVAL:
2017 if (shader->key.as_es)
2018 return "Tessellation Evaluation Shader as ES";
2019 else if (shader->key.as_ngg)
2020 return "Tessellation Evaluation Shader as ESGS";
2021 else
2022 return "Tessellation Evaluation Shader as VS";
2023 case PIPE_SHADER_GEOMETRY:
2024 if (shader->is_gs_copy_shader)
2025 return "GS Copy Shader as VS";
2026 else
2027 return "Geometry Shader";
2028 case PIPE_SHADER_FRAGMENT:
2029 return "Pixel Shader";
2030 case PIPE_SHADER_COMPUTE:
2031 return "Compute Shader";
2032 default:
2033 return "Unknown Shader";
2034 }
2035 }
2036
2037 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
2038 struct pipe_debug_callback *debug,
2039 FILE *file, bool check_debug_option)
2040 {
2041 enum pipe_shader_type shader_type = shader->selector->type;
2042
2043 if (!check_debug_option ||
2044 si_can_dump_shader(sscreen, shader_type))
2045 si_dump_shader_key(shader, file);
2046
2047 if (!check_debug_option && shader->binary.llvm_ir_string) {
2048 if (shader->previous_stage &&
2049 shader->previous_stage->binary.llvm_ir_string) {
2050 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
2051 si_get_shader_name(shader));
2052 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
2053 }
2054
2055 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
2056 si_get_shader_name(shader));
2057 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
2058 }
2059
2060 if (!check_debug_option ||
2061 (si_can_dump_shader(sscreen, shader_type) &&
2062 !(sscreen->debug_flags & DBG(NO_ASM)))) {
2063 unsigned wave_size = si_get_shader_wave_size(shader);
2064
2065 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
2066
2067 if (shader->prolog)
2068 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
2069 shader_type, wave_size, debug, "prolog", file);
2070 if (shader->previous_stage)
2071 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
2072 shader_type, wave_size, debug, "previous stage", file);
2073 if (shader->prolog2)
2074 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
2075 shader_type, wave_size, debug, "prolog2", file);
2076
2077 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
2078 wave_size, debug, "main", file);
2079
2080 if (shader->epilog)
2081 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
2082 shader_type, wave_size, debug, "epilog", file);
2083 fprintf(file, "\n");
2084 }
2085
2086 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
2087 }
2088
2089 int si_compile_llvm(struct si_screen *sscreen,
2090 struct si_shader_binary *binary,
2091 struct ac_shader_config *conf,
2092 struct ac_llvm_compiler *compiler,
2093 LLVMModuleRef mod,
2094 struct pipe_debug_callback *debug,
2095 enum pipe_shader_type shader_type,
2096 unsigned wave_size,
2097 const char *name,
2098 bool less_optimized)
2099 {
2100 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
2101
2102 if (si_can_dump_shader(sscreen, shader_type)) {
2103 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
2104
2105 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
2106 fprintf(stderr, "%s LLVM IR:\n\n", name);
2107 ac_dump_module(mod);
2108 fprintf(stderr, "\n");
2109 }
2110 }
2111
2112 if (sscreen->record_llvm_ir) {
2113 char *ir = LLVMPrintModuleToString(mod);
2114 binary->llvm_ir_string = strdup(ir);
2115 LLVMDisposeMessage(ir);
2116 }
2117
2118 if (!si_replace_shader(count, binary)) {
2119 unsigned r = si_llvm_compile(mod, binary, compiler, debug,
2120 less_optimized, wave_size);
2121 if (r)
2122 return r;
2123 }
2124
2125 struct ac_rtld_binary rtld;
2126 if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
2127 .info = &sscreen->info,
2128 .shader_type = tgsi_processor_to_shader_stage(shader_type),
2129 .wave_size = wave_size,
2130 .num_parts = 1,
2131 .elf_ptrs = &binary->elf_buffer,
2132 .elf_sizes = &binary->elf_size }))
2133 return -1;
2134
2135 bool ok = ac_rtld_read_config(&rtld, conf);
2136 ac_rtld_close(&rtld);
2137 if (!ok)
2138 return -1;
2139
2140 /* Enable 64-bit and 16-bit denormals, because there is no performance
2141 * cost.
2142 *
2143 * If denormals are enabled, all floating-point output modifiers are
2144 * ignored.
2145 *
2146 * Don't enable denormals for 32-bit floats, because:
2147 * - Floating-point output modifiers would be ignored by the hw.
2148 * - Some opcodes don't support denormals, such as v_mad_f32. We would
2149 * have to stop using those.
2150 * - GFX6 & GFX7 would be very slow.
2151 */
2152 conf->float_mode |= V_00B028_FP_64_DENORMS;
2153
2154 return 0;
2155 }
2156
2157 static void si_dump_shader_key_vs(const struct si_shader_key *key,
2158 const struct si_vs_prolog_bits *prolog,
2159 const char *prefix, FILE *f)
2160 {
2161 fprintf(f, " %s.instance_divisor_is_one = %u\n",
2162 prefix, prolog->instance_divisor_is_one);
2163 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
2164 prefix, prolog->instance_divisor_is_fetched);
2165 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
2166 prefix, prolog->unpack_instance_id_from_vertex_id);
2167 fprintf(f, " %s.ls_vgpr_fix = %u\n",
2168 prefix, prolog->ls_vgpr_fix);
2169
2170 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
2171 fprintf(f, " mono.vs.fix_fetch = {");
2172 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
2173 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
2174 if (i)
2175 fprintf(f, ", ");
2176 if (!fix.bits)
2177 fprintf(f, "0");
2178 else
2179 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
2180 fix.u.num_channels_m1, fix.u.format);
2181 }
2182 fprintf(f, "}\n");
2183 }
2184
2185 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
2186 {
2187 const struct si_shader_key *key = &shader->key;
2188 enum pipe_shader_type shader_type = shader->selector->type;
2189
2190 fprintf(f, "SHADER KEY\n");
2191
2192 switch (shader_type) {
2193 case PIPE_SHADER_VERTEX:
2194 si_dump_shader_key_vs(key, &key->part.vs.prolog,
2195 "part.vs.prolog", f);
2196 fprintf(f, " as_es = %u\n", key->as_es);
2197 fprintf(f, " as_ls = %u\n", key->as_ls);
2198 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2199 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
2200 key->mono.u.vs_export_prim_id);
2201 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
2202 key->opt.vs_as_prim_discard_cs);
2203 fprintf(f, " opt.cs_prim_type = %s\n",
2204 tgsi_primitive_names[key->opt.cs_prim_type]);
2205 fprintf(f, " opt.cs_indexed = %u\n",
2206 key->opt.cs_indexed);
2207 fprintf(f, " opt.cs_instancing = %u\n",
2208 key->opt.cs_instancing);
2209 fprintf(f, " opt.cs_primitive_restart = %u\n",
2210 key->opt.cs_primitive_restart);
2211 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
2212 key->opt.cs_provoking_vertex_first);
2213 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
2214 key->opt.cs_need_correct_orientation);
2215 fprintf(f, " opt.cs_cull_front = %u\n",
2216 key->opt.cs_cull_front);
2217 fprintf(f, " opt.cs_cull_back = %u\n",
2218 key->opt.cs_cull_back);
2219 fprintf(f, " opt.cs_cull_z = %u\n",
2220 key->opt.cs_cull_z);
2221 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
2222 key->opt.cs_halfz_clip_space);
2223 break;
2224
2225 case PIPE_SHADER_TESS_CTRL:
2226 if (shader->selector->screen->info.chip_class >= GFX9) {
2227 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
2228 "part.tcs.ls_prolog", f);
2229 }
2230 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
2231 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
2232 break;
2233
2234 case PIPE_SHADER_TESS_EVAL:
2235 fprintf(f, " as_es = %u\n", key->as_es);
2236 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2237 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
2238 key->mono.u.vs_export_prim_id);
2239 break;
2240
2241 case PIPE_SHADER_GEOMETRY:
2242 if (shader->is_gs_copy_shader)
2243 break;
2244
2245 if (shader->selector->screen->info.chip_class >= GFX9 &&
2246 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
2247 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
2248 "part.gs.vs_prolog", f);
2249 }
2250 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
2251 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
2252 fprintf(f, " as_ngg = %u\n", key->as_ngg);
2253 break;
2254
2255 case PIPE_SHADER_COMPUTE:
2256 break;
2257
2258 case PIPE_SHADER_FRAGMENT:
2259 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
2260 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
2261 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
2262 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
2263 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
2264 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
2265 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
2266 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
2267 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
2268 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
2269 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
2270 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
2271 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
2272 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
2273 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
2274 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
2275 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
2276 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
2277 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
2278 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
2279 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
2280 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
2281 break;
2282
2283 default:
2284 assert(0);
2285 }
2286
2287 if ((shader_type == PIPE_SHADER_GEOMETRY ||
2288 shader_type == PIPE_SHADER_TESS_EVAL ||
2289 shader_type == PIPE_SHADER_VERTEX) &&
2290 !key->as_es && !key->as_ls) {
2291 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
2292 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
2293 }
2294 }
2295
2296 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
2297 {
2298 struct si_shader *shader = ctx->shader;
2299 struct si_shader_info *info = &shader->selector->info;
2300
2301 if ((ctx->type != PIPE_SHADER_VERTEX &&
2302 ctx->type != PIPE_SHADER_TESS_EVAL) ||
2303 shader->key.as_ls ||
2304 shader->key.as_es)
2305 return;
2306
2307 ac_optimize_vs_outputs(&ctx->ac,
2308 ctx->main_fn,
2309 shader->info.vs_output_param_offset,
2310 info->num_outputs,
2311 &shader->info.nr_param_exports);
2312 }
2313
2314 static void si_init_exec_from_input(struct si_shader_context *ctx,
2315 struct ac_arg param, unsigned bitoffset)
2316 {
2317 LLVMValueRef args[] = {
2318 ac_get_arg(&ctx->ac, param),
2319 LLVMConstInt(ctx->i32, bitoffset, 0),
2320 };
2321 ac_build_intrinsic(&ctx->ac,
2322 "llvm.amdgcn.init.exec.from.input",
2323 ctx->voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
2324 }
2325
2326 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
2327 const struct si_vs_prolog_bits *key)
2328 {
2329 /* VGPR initialization fixup for Vega10 and Raven is always done in the
2330 * VS prolog. */
2331 return sel->vs_needs_prolog ||
2332 key->ls_vgpr_fix ||
2333 key->unpack_instance_id_from_vertex_id;
2334 }
2335
2336 static bool si_build_main_function(struct si_shader_context *ctx,
2337 struct nir_shader *nir, bool free_nir)
2338 {
2339 struct si_shader *shader = ctx->shader;
2340 struct si_shader_selector *sel = shader->selector;
2341
2342 si_llvm_init_resource_callbacks(ctx);
2343
2344 switch (ctx->type) {
2345 case PIPE_SHADER_VERTEX:
2346 if (shader->key.as_ls)
2347 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
2348 else if (shader->key.as_es)
2349 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
2350 else if (shader->key.opt.vs_as_prim_discard_cs)
2351 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
2352 else if (shader->key.as_ngg)
2353 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
2354 else
2355 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
2356 ctx->abi.load_base_vertex = get_base_vertex;
2357 break;
2358 case PIPE_SHADER_TESS_CTRL:
2359 si_llvm_init_tcs_callbacks(ctx);
2360 break;
2361 case PIPE_SHADER_TESS_EVAL:
2362 si_llvm_init_tes_callbacks(ctx);
2363
2364 if (shader->key.as_es)
2365 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
2366 else if (shader->key.as_ngg)
2367 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
2368 else
2369 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
2370 break;
2371 case PIPE_SHADER_GEOMETRY:
2372 si_llvm_init_gs_callbacks(ctx);
2373 break;
2374 case PIPE_SHADER_FRAGMENT:
2375 si_llvm_init_ps_callbacks(ctx);
2376 break;
2377 case PIPE_SHADER_COMPUTE:
2378 ctx->abi.load_local_group_size = get_block_size;
2379 break;
2380 default:
2381 assert(!"Unsupported shader type");
2382 return false;
2383 }
2384
2385 si_create_function(ctx);
2386
2387 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY)
2388 si_preload_esgs_ring(ctx);
2389
2390 if (ctx->type == PIPE_SHADER_GEOMETRY)
2391 si_preload_gs_rings(ctx);
2392 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
2393 si_llvm_preload_tes_rings(ctx);
2394
2395 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
2396 sel->info.tessfactors_are_def_in_all_invocs) {
2397 for (unsigned i = 0; i < 6; i++) {
2398 ctx->invoc0_tess_factors[i] =
2399 ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
2400 }
2401 }
2402
2403 if (ctx->type == PIPE_SHADER_GEOMETRY) {
2404 for (unsigned i = 0; i < 4; i++) {
2405 ctx->gs_next_vertex[i] =
2406 ac_build_alloca(&ctx->ac, ctx->i32, "");
2407 }
2408 if (shader->key.as_ngg) {
2409 for (unsigned i = 0; i < 4; ++i) {
2410 ctx->gs_curprim_verts[i] =
2411 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
2412 ctx->gs_generated_prims[i] =
2413 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
2414 }
2415
2416 unsigned scratch_size = 8;
2417 if (sel->so.num_outputs)
2418 scratch_size = 44;
2419
2420 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
2421 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2422 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
2423 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
2424 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
2425
2426 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2427 LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
2428 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
2429 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
2430 }
2431 }
2432
2433 if (ctx->type != PIPE_SHADER_GEOMETRY &&
2434 (shader->key.as_ngg && !shader->key.as_es)) {
2435 /* Unconditionally declare scratch space base for streamout and
2436 * vertex compaction. Whether space is actually allocated is
2437 * determined during linking / PM4 creation.
2438 *
2439 * Add an extra dword per vertex to ensure an odd stride, which
2440 * avoids bank conflicts for SoA accesses.
2441 */
2442 if (!gfx10_is_ngg_passthrough(shader))
2443 si_llvm_declare_esgs_ring(ctx);
2444
2445 /* This is really only needed when streamout and / or vertex
2446 * compaction is enabled.
2447 */
2448 if (sel->so.num_outputs && !ctx->gs_ngg_scratch) {
2449 LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
2450 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
2451 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
2452 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
2453 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
2454 }
2455 }
2456
2457 /* For GFX9 merged shaders:
2458 * - Set EXEC for the first shader. If the prolog is present, set
2459 * EXEC there instead.
2460 * - Add a barrier before the second shader.
2461 * - In the second shader, reset EXEC to ~0 and wrap the main part in
2462 * an if-statement. This is required for correctness in geometry
2463 * shaders, to ensure that empty GS waves do not send GS_EMIT and
2464 * GS_CUT messages.
2465 *
2466 * For monolithic merged shaders, the first shader is wrapped in an
2467 * if-block together with its prolog in si_build_wrapper_function.
2468 *
2469 * NGG vertex and tess eval shaders running as the last
2470 * vertex/geometry stage handle execution explicitly using
2471 * if-statements.
2472 */
2473 if (ctx->screen->info.chip_class >= GFX9) {
2474 if (!shader->is_monolithic &&
2475 (shader->key.as_es || shader->key.as_ls) &&
2476 (ctx->type == PIPE_SHADER_TESS_EVAL ||
2477 (ctx->type == PIPE_SHADER_VERTEX &&
2478 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
2479 si_init_exec_from_input(ctx,
2480 ctx->merged_wave_info, 0);
2481 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
2482 ctx->type == PIPE_SHADER_GEOMETRY ||
2483 (shader->key.as_ngg && !shader->key.as_es)) {
2484 LLVMValueRef thread_enabled;
2485 bool nested_barrier;
2486
2487 if (!shader->is_monolithic ||
2488 (ctx->type == PIPE_SHADER_TESS_EVAL &&
2489 (shader->key.as_ngg && !shader->key.as_es)))
2490 ac_init_exec_full_mask(&ctx->ac);
2491
2492 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
2493 ctx->type == PIPE_SHADER_GEOMETRY) {
2494 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
2495 gfx10_ngg_gs_emit_prologue(ctx);
2496 nested_barrier = false;
2497 } else {
2498 nested_barrier = true;
2499 }
2500
2501 thread_enabled = si_is_gs_thread(ctx);
2502 } else {
2503 thread_enabled = si_is_es_thread(ctx);
2504 nested_barrier = false;
2505 }
2506
2507 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
2508 ctx->merged_wrap_if_label = 11500;
2509 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
2510
2511 if (nested_barrier) {
2512 /* Execute a barrier before the second shader in
2513 * a merged shader.
2514 *
2515 * Execute the barrier inside the conditional block,
2516 * so that empty waves can jump directly to s_endpgm,
2517 * which will also signal the barrier.
2518 *
2519 * This is possible in gfx9, because an empty wave
2520 * for the second shader does not participate in
2521 * the epilogue. With NGG, empty waves may still
2522 * be required to export data (e.g. GS output vertices),
2523 * so we cannot let them exit early.
2524 *
2525 * If the shader is TCS and the TCS epilog is present
2526 * and contains a barrier, it will wait there and then
2527 * reach s_endpgm.
2528 */
2529 si_llvm_emit_barrier(ctx);
2530 }
2531 }
2532 }
2533
2534 if (sel->force_correct_derivs_after_kill) {
2535 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
2536 /* true = don't kill. */
2537 LLVMBuildStore(ctx->ac.builder, ctx->i1true,
2538 ctx->postponed_kill);
2539 }
2540
2541 bool success = si_nir_build_llvm(ctx, nir);
2542 if (free_nir)
2543 ralloc_free(nir);
2544 if (!success) {
2545 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
2546 return false;
2547 }
2548
2549 si_llvm_build_ret(ctx, ctx->return_value);
2550 return true;
2551 }
2552
2553 /**
2554 * Compute the VS prolog key, which contains all the information needed to
2555 * build the VS prolog function, and set shader->info bits where needed.
2556 *
2557 * \param info Shader info of the vertex shader.
2558 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
2559 * \param prolog_key Key of the VS prolog
2560 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
2561 * \param key Output shader part key.
2562 */
2563 static void si_get_vs_prolog_key(const struct si_shader_info *info,
2564 unsigned num_input_sgprs,
2565 const struct si_vs_prolog_bits *prolog_key,
2566 struct si_shader *shader_out,
2567 union si_shader_part_key *key)
2568 {
2569 memset(key, 0, sizeof(*key));
2570 key->vs_prolog.states = *prolog_key;
2571 key->vs_prolog.num_input_sgprs = num_input_sgprs;
2572 key->vs_prolog.num_inputs = info->num_inputs;
2573 key->vs_prolog.as_ls = shader_out->key.as_ls;
2574 key->vs_prolog.as_es = shader_out->key.as_es;
2575 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
2576
2577 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
2578 key->vs_prolog.as_ls = 1;
2579 key->vs_prolog.num_merged_next_stage_vgprs = 2;
2580 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
2581 key->vs_prolog.as_es = 1;
2582 key->vs_prolog.num_merged_next_stage_vgprs = 5;
2583 } else if (shader_out->key.as_ngg) {
2584 key->vs_prolog.num_merged_next_stage_vgprs = 5;
2585 }
2586
2587 /* Enable loading the InstanceID VGPR. */
2588 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
2589
2590 if ((key->vs_prolog.states.instance_divisor_is_one |
2591 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
2592 shader_out->info.uses_instanceid = true;
2593 }
2594
2595 /**
2596 * Given a list of shader part functions, build a wrapper function that
2597 * runs them in sequence to form a monolithic shader.
2598 */
2599 void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
2600 unsigned num_parts, unsigned main_part,
2601 unsigned next_shader_first_part)
2602 {
2603 LLVMBuilderRef builder = ctx->ac.builder;
2604 /* PS epilog has one arg per color component; gfx9 merged shader
2605 * prologs need to forward 40 SGPRs.
2606 */
2607 LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
2608 LLVMTypeRef function_type;
2609 unsigned num_first_params;
2610 unsigned num_out, initial_num_out;
2611 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
2612 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
2613 unsigned num_sgprs, num_vgprs;
2614 unsigned gprs;
2615
2616 memset(&ctx->args, 0, sizeof(ctx->args));
2617
2618 for (unsigned i = 0; i < num_parts; ++i) {
2619 ac_add_function_attr(ctx->ac.context, parts[i], -1,
2620 AC_FUNC_ATTR_ALWAYSINLINE);
2621 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
2622 }
2623
2624 /* The parameters of the wrapper function correspond to those of the
2625 * first part in terms of SGPRs and VGPRs, but we use the types of the
2626 * main part to get the right types. This is relevant for the
2627 * dereferenceable attribute on descriptor table pointers.
2628 */
2629 num_sgprs = 0;
2630 num_vgprs = 0;
2631
2632 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
2633 num_first_params = LLVMCountParamTypes(function_type);
2634
2635 for (unsigned i = 0; i < num_first_params; ++i) {
2636 LLVMValueRef param = LLVMGetParam(parts[0], i);
2637
2638 if (ac_is_sgpr_param(param)) {
2639 assert(num_vgprs == 0);
2640 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
2641 } else {
2642 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
2643 }
2644 }
2645
2646 gprs = 0;
2647 while (gprs < num_sgprs + num_vgprs) {
2648 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
2649 LLVMTypeRef type = LLVMTypeOf(param);
2650 unsigned size = ac_get_type_size(type) / 4;
2651
2652 /* This is going to get casted anyways, so we don't have to
2653 * have the exact same type. But we do have to preserve the
2654 * pointer-ness so that LLVM knows about it.
2655 */
2656 enum ac_arg_type arg_type = AC_ARG_INT;
2657 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
2658 type = LLVMGetElementType(type);
2659
2660 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
2661 if (LLVMGetVectorSize(type) == 4)
2662 arg_type = AC_ARG_CONST_DESC_PTR;
2663 else if (LLVMGetVectorSize(type) == 8)
2664 arg_type = AC_ARG_CONST_IMAGE_PTR;
2665 else
2666 assert(0);
2667 } else if (type == ctx->f32) {
2668 arg_type = AC_ARG_CONST_FLOAT_PTR;
2669 } else {
2670 assert(0);
2671 }
2672 }
2673
2674 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR,
2675 size, arg_type, NULL);
2676
2677 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
2678 assert(gprs + size <= num_sgprs + num_vgprs &&
2679 (gprs >= num_sgprs || gprs + size <= num_sgprs));
2680
2681 gprs += size;
2682 }
2683
2684 /* Prepare the return type. */
2685 unsigned num_returns = 0;
2686 LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
2687
2688 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
2689 return_type = LLVMGetReturnType(last_func_type);
2690
2691 switch (LLVMGetTypeKind(return_type)) {
2692 case LLVMStructTypeKind:
2693 num_returns = LLVMCountStructElementTypes(return_type);
2694 assert(num_returns <= ARRAY_SIZE(returns));
2695 LLVMGetStructElementTypes(return_type, returns);
2696 break;
2697 case LLVMVoidTypeKind:
2698 break;
2699 default:
2700 unreachable("unexpected type");
2701 }
2702
2703 si_llvm_create_func(ctx, "wrapper", returns, num_returns,
2704 si_get_max_workgroup_size(ctx->shader));
2705
2706 if (si_is_merged_shader(ctx))
2707 ac_init_exec_full_mask(&ctx->ac);
2708
2709 /* Record the arguments of the function as if they were an output of
2710 * a previous part.
2711 */
2712 num_out = 0;
2713 num_out_sgpr = 0;
2714
2715 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
2716 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
2717 LLVMTypeRef param_type = LLVMTypeOf(param);
2718 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->i32 : ctx->f32;
2719 unsigned size = ac_get_type_size(param_type) / 4;
2720
2721 if (size == 1) {
2722 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2723 param = LLVMBuildPtrToInt(builder, param, ctx->i32, "");
2724 param_type = ctx->i32;
2725 }
2726
2727 if (param_type != out_type)
2728 param = LLVMBuildBitCast(builder, param, out_type, "");
2729 out[num_out++] = param;
2730 } else {
2731 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
2732
2733 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2734 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
2735 param_type = ctx->i64;
2736 }
2737
2738 if (param_type != vector_type)
2739 param = LLVMBuildBitCast(builder, param, vector_type, "");
2740
2741 for (unsigned j = 0; j < size; ++j)
2742 out[num_out++] = LLVMBuildExtractElement(
2743 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
2744 }
2745
2746 if (ctx->args.args[i].file == AC_ARG_SGPR)
2747 num_out_sgpr = num_out;
2748 }
2749
2750 memcpy(initial, out, sizeof(out));
2751 initial_num_out = num_out;
2752 initial_num_out_sgpr = num_out_sgpr;
2753
2754 /* Now chain the parts. */
2755 LLVMValueRef ret = NULL;
2756 for (unsigned part = 0; part < num_parts; ++part) {
2757 LLVMValueRef in[AC_MAX_ARGS];
2758 LLVMTypeRef ret_type;
2759 unsigned out_idx = 0;
2760 unsigned num_params = LLVMCountParams(parts[part]);
2761
2762 /* Merged shaders are executed conditionally depending
2763 * on the number of enabled threads passed in the input SGPRs. */
2764 if (is_multi_part_shader(ctx) && part == 0) {
2765 LLVMValueRef ena, count = initial[3];
2766
2767 count = LLVMBuildAnd(builder, count,
2768 LLVMConstInt(ctx->i32, 0x7f, 0), "");
2769 ena = LLVMBuildICmp(builder, LLVMIntULT,
2770 ac_get_thread_id(&ctx->ac), count, "");
2771 ac_build_ifcc(&ctx->ac, ena, 6506);
2772 }
2773
2774 /* Derive arguments for the next part from outputs of the
2775 * previous one.
2776 */
2777 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
2778 LLVMValueRef param;
2779 LLVMTypeRef param_type;
2780 bool is_sgpr;
2781 unsigned param_size;
2782 LLVMValueRef arg = NULL;
2783
2784 param = LLVMGetParam(parts[part], param_idx);
2785 param_type = LLVMTypeOf(param);
2786 param_size = ac_get_type_size(param_type) / 4;
2787 is_sgpr = ac_is_sgpr_param(param);
2788
2789 if (is_sgpr) {
2790 ac_add_function_attr(ctx->ac.context, parts[part],
2791 param_idx + 1, AC_FUNC_ATTR_INREG);
2792 } else if (out_idx < num_out_sgpr) {
2793 /* Skip returned SGPRs the current part doesn't
2794 * declare on the input. */
2795 out_idx = num_out_sgpr;
2796 }
2797
2798 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
2799
2800 if (param_size == 1)
2801 arg = out[out_idx];
2802 else
2803 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
2804
2805 if (LLVMTypeOf(arg) != param_type) {
2806 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
2807 if (LLVMGetPointerAddressSpace(param_type) ==
2808 AC_ADDR_SPACE_CONST_32BIT) {
2809 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
2810 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
2811 } else {
2812 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
2813 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
2814 }
2815 } else {
2816 arg = LLVMBuildBitCast(builder, arg, param_type, "");
2817 }
2818 }
2819
2820 in[param_idx] = arg;
2821 out_idx += param_size;
2822 }
2823
2824 ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
2825
2826 if (is_multi_part_shader(ctx) &&
2827 part + 1 == next_shader_first_part) {
2828 ac_build_endif(&ctx->ac, 6506);
2829
2830 /* The second half of the merged shader should use
2831 * the inputs from the toplevel (wrapper) function,
2832 * not the return value from the last call.
2833 *
2834 * That's because the last call was executed condi-
2835 * tionally, so we can't consume it in the main
2836 * block.
2837 */
2838 memcpy(out, initial, sizeof(initial));
2839 num_out = initial_num_out;
2840 num_out_sgpr = initial_num_out_sgpr;
2841 continue;
2842 }
2843
2844 /* Extract the returned GPRs. */
2845 ret_type = LLVMTypeOf(ret);
2846 num_out = 0;
2847 num_out_sgpr = 0;
2848
2849 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
2850 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
2851
2852 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
2853
2854 for (unsigned i = 0; i < ret_size; ++i) {
2855 LLVMValueRef val =
2856 LLVMBuildExtractValue(builder, ret, i, "");
2857
2858 assert(num_out < ARRAY_SIZE(out));
2859 out[num_out++] = val;
2860
2861 if (LLVMTypeOf(val) == ctx->i32) {
2862 assert(num_out_sgpr + 1 == num_out);
2863 num_out_sgpr = num_out;
2864 }
2865 }
2866 }
2867 }
2868
2869 /* Return the value from the last part. */
2870 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
2871 LLVMBuildRetVoid(builder);
2872 else
2873 LLVMBuildRet(builder, ret);
2874 }
2875
2876 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
2877 struct si_shader_selector *sel)
2878 {
2879 if (!compiler->low_opt_passes)
2880 return false;
2881
2882 /* Assume a slow CPU. */
2883 assert(!sel->screen->info.has_dedicated_vram &&
2884 sel->screen->info.chip_class <= GFX8);
2885
2886 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
2887 * buffer stores. */
2888 return sel->type == PIPE_SHADER_COMPUTE &&
2889 sel->info.num_memory_instructions > 1000;
2890 }
2891
2892 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
2893 bool *free_nir)
2894 {
2895 *free_nir = false;
2896
2897 if (sel->nir) {
2898 return sel->nir;
2899 } else if (sel->nir_binary) {
2900 struct pipe_screen *screen = &sel->screen->b;
2901 const void *options =
2902 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
2903 sel->type);
2904
2905 struct blob_reader blob_reader;
2906 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
2907 *free_nir = true;
2908 return nir_deserialize(NULL, options, &blob_reader);
2909 }
2910 return NULL;
2911 }
2912
2913 int si_compile_shader(struct si_screen *sscreen,
2914 struct ac_llvm_compiler *compiler,
2915 struct si_shader *shader,
2916 struct pipe_debug_callback *debug)
2917 {
2918 struct si_shader_selector *sel = shader->selector;
2919 struct si_shader_context ctx;
2920 bool free_nir;
2921 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
2922 int r = -1;
2923
2924 /* Dump NIR before doing NIR->LLVM conversion in case the
2925 * conversion fails. */
2926 if (si_can_dump_shader(sscreen, sel->type) &&
2927 !(sscreen->debug_flags & DBG(NO_NIR))) {
2928 nir_print_shader(nir, stderr);
2929 si_dump_streamout(&sel->so);
2930 }
2931
2932 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
2933 si_llvm_context_set_ir(&ctx, shader);
2934
2935 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
2936 sizeof(shader->info.vs_output_param_offset));
2937
2938 shader->info.uses_instanceid = sel->info.uses_instanceid;
2939
2940 if (!si_build_main_function(&ctx, nir, free_nir)) {
2941 si_llvm_dispose(&ctx);
2942 return -1;
2943 }
2944
2945 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
2946 LLVMValueRef parts[2];
2947 bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog);
2948
2949 parts[1] = ctx.main_fn;
2950
2951 if (need_prolog) {
2952 union si_shader_part_key prolog_key;
2953 si_get_vs_prolog_key(&sel->info,
2954 shader->info.num_input_sgprs,
2955 &shader->key.part.vs.prolog,
2956 shader, &prolog_key);
2957 prolog_key.vs_prolog.is_monolithic = true;
2958 si_build_vs_prolog_function(&ctx, &prolog_key);
2959 parts[0] = ctx.main_fn;
2960 }
2961
2962 si_build_wrapper_function(&ctx, parts + !need_prolog,
2963 1 + need_prolog, need_prolog, 0);
2964
2965 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
2966 si_build_prim_discard_compute_shader(&ctx);
2967 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
2968 if (sscreen->info.chip_class >= GFX9) {
2969 struct si_shader_selector *ls = shader->key.part.tcs.ls;
2970 LLVMValueRef parts[4];
2971 bool vs_needs_prolog =
2972 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
2973
2974 /* TCS main part */
2975 parts[2] = ctx.main_fn;
2976
2977 /* TCS epilog */
2978 union si_shader_part_key tcs_epilog_key;
2979 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
2980 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
2981 si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
2982 parts[3] = ctx.main_fn;
2983
2984 /* VS as LS main part */
2985 nir = get_nir_shader(ls, &free_nir);
2986 struct si_shader shader_ls = {};
2987 shader_ls.selector = ls;
2988 shader_ls.key.as_ls = 1;
2989 shader_ls.key.mono = shader->key.mono;
2990 shader_ls.key.opt = shader->key.opt;
2991 shader_ls.is_monolithic = true;
2992 si_llvm_context_set_ir(&ctx, &shader_ls);
2993
2994 if (!si_build_main_function(&ctx, nir, free_nir)) {
2995 si_llvm_dispose(&ctx);
2996 return -1;
2997 }
2998 shader->info.uses_instanceid |= ls->info.uses_instanceid;
2999 parts[1] = ctx.main_fn;
3000
3001 /* LS prolog */
3002 if (vs_needs_prolog) {
3003 union si_shader_part_key vs_prolog_key;
3004 si_get_vs_prolog_key(&ls->info,
3005 shader_ls.info.num_input_sgprs,
3006 &shader->key.part.tcs.ls_prolog,
3007 shader, &vs_prolog_key);
3008 vs_prolog_key.vs_prolog.is_monolithic = true;
3009 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
3010 parts[0] = ctx.main_fn;
3011 }
3012
3013 /* Reset the shader context. */
3014 ctx.shader = shader;
3015 ctx.type = PIPE_SHADER_TESS_CTRL;
3016
3017 si_build_wrapper_function(&ctx,
3018 parts + !vs_needs_prolog,
3019 4 - !vs_needs_prolog, vs_needs_prolog,
3020 vs_needs_prolog ? 2 : 1);
3021 } else {
3022 LLVMValueRef parts[2];
3023 union si_shader_part_key epilog_key;
3024
3025 parts[0] = ctx.main_fn;
3026
3027 memset(&epilog_key, 0, sizeof(epilog_key));
3028 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
3029 si_llvm_build_tcs_epilog(&ctx, &epilog_key);
3030 parts[1] = ctx.main_fn;
3031
3032 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
3033 }
3034 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
3035 if (ctx.screen->info.chip_class >= GFX9) {
3036 struct si_shader_selector *es = shader->key.part.gs.es;
3037 LLVMValueRef es_prolog = NULL;
3038 LLVMValueRef es_main = NULL;
3039 LLVMValueRef gs_prolog = NULL;
3040 LLVMValueRef gs_main = ctx.main_fn;
3041
3042 /* GS prolog */
3043 union si_shader_part_key gs_prolog_key;
3044 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
3045 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
3046 gs_prolog_key.gs_prolog.is_monolithic = true;
3047 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
3048 si_llvm_build_gs_prolog(&ctx, &gs_prolog_key);
3049 gs_prolog = ctx.main_fn;
3050
3051 /* ES main part */
3052 nir = get_nir_shader(es, &free_nir);
3053 struct si_shader shader_es = {};
3054 shader_es.selector = es;
3055 shader_es.key.as_es = 1;
3056 shader_es.key.as_ngg = shader->key.as_ngg;
3057 shader_es.key.mono = shader->key.mono;
3058 shader_es.key.opt = shader->key.opt;
3059 shader_es.is_monolithic = true;
3060 si_llvm_context_set_ir(&ctx, &shader_es);
3061
3062 if (!si_build_main_function(&ctx, nir, free_nir)) {
3063 si_llvm_dispose(&ctx);
3064 return -1;
3065 }
3066 shader->info.uses_instanceid |= es->info.uses_instanceid;
3067 es_main = ctx.main_fn;
3068
3069 /* ES prolog */
3070 if (es->type == PIPE_SHADER_VERTEX &&
3071 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog)) {
3072 union si_shader_part_key vs_prolog_key;
3073 si_get_vs_prolog_key(&es->info,
3074 shader_es.info.num_input_sgprs,
3075 &shader->key.part.gs.vs_prolog,
3076 shader, &vs_prolog_key);
3077 vs_prolog_key.vs_prolog.is_monolithic = true;
3078 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
3079 es_prolog = ctx.main_fn;
3080 }
3081
3082 /* Reset the shader context. */
3083 ctx.shader = shader;
3084 ctx.type = PIPE_SHADER_GEOMETRY;
3085
3086 /* Prepare the array of shader parts. */
3087 LLVMValueRef parts[4];
3088 unsigned num_parts = 0, main_part, next_first_part;
3089
3090 if (es_prolog)
3091 parts[num_parts++] = es_prolog;
3092
3093 parts[main_part = num_parts++] = es_main;
3094 parts[next_first_part = num_parts++] = gs_prolog;
3095 parts[num_parts++] = gs_main;
3096
3097 si_build_wrapper_function(&ctx, parts, num_parts,
3098 main_part, next_first_part);
3099 } else {
3100 LLVMValueRef parts[2];
3101 union si_shader_part_key prolog_key;
3102
3103 parts[1] = ctx.main_fn;
3104
3105 memset(&prolog_key, 0, sizeof(prolog_key));
3106 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
3107 si_llvm_build_gs_prolog(&ctx, &prolog_key);
3108 parts[0] = ctx.main_fn;
3109
3110 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
3111 }
3112 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
3113 si_llvm_build_monolithic_ps(&ctx, shader);
3114 }
3115
3116 si_llvm_optimize_module(&ctx);
3117
3118 /* Post-optimization transformations and analysis. */
3119 si_optimize_vs_outputs(&ctx);
3120
3121 if ((debug && debug->debug_message) ||
3122 si_can_dump_shader(sscreen, ctx.type)) {
3123 ctx.shader->info.private_mem_vgprs =
3124 ac_count_scratch_private_memory(ctx.main_fn);
3125 }
3126
3127 /* Make sure the input is a pointer and not integer followed by inttoptr. */
3128 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
3129 LLVMPointerTypeKind);
3130
3131 /* Compile to bytecode. */
3132 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
3133 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
3134 si_get_shader_name(shader),
3135 si_should_optimize_less(compiler, shader->selector));
3136 si_llvm_dispose(&ctx);
3137 if (r) {
3138 fprintf(stderr, "LLVM failed to compile shader\n");
3139 return r;
3140 }
3141
3142 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
3143 * LLVM 3.9svn has this bug.
3144 */
3145 if (sel->type == PIPE_SHADER_COMPUTE) {
3146 unsigned wave_size = sscreen->compute_wave_size;
3147 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
3148 (wave_size == 32 ? 2 : 1);
3149 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
3150 unsigned max_sgprs_per_wave = 128;
3151 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
3152 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
3153 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
3154 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
3155
3156 max_vgprs = max_vgprs / waves_per_simd;
3157 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
3158
3159 if (shader->config.num_sgprs > max_sgprs ||
3160 shader->config.num_vgprs > max_vgprs) {
3161 fprintf(stderr, "LLVM failed to compile a shader correctly: "
3162 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
3163 shader->config.num_sgprs, shader->config.num_vgprs,
3164 max_sgprs, max_vgprs);
3165
3166 /* Just terminate the process, because dependent
3167 * shaders can hang due to bad input data, but use
3168 * the env var to allow shader-db to work.
3169 */
3170 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
3171 abort();
3172 }
3173 }
3174
3175 /* Add the scratch offset to input SGPRs. */
3176 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(&ctx))
3177 shader->info.num_input_sgprs += 1; /* scratch byte offset */
3178
3179 /* Calculate the number of fragment input VGPRs. */
3180 if (ctx.type == PIPE_SHADER_FRAGMENT) {
3181 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
3182 &shader->info.face_vgpr_index,
3183 &shader->info.ancillary_vgpr_index);
3184 }
3185
3186 si_calculate_max_simd_waves(shader);
3187 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
3188 return 0;
3189 }
3190
3191 /**
3192 * Create, compile and return a shader part (prolog or epilog).
3193 *
3194 * \param sscreen screen
3195 * \param list list of shader parts of the same category
3196 * \param type shader type
3197 * \param key shader part key
3198 * \param prolog whether the part being requested is a prolog
3199 * \param tm LLVM target machine
3200 * \param debug debug callback
3201 * \param build the callback responsible for building the main function
3202 * \return non-NULL on success
3203 */
3204 static struct si_shader_part *
3205 si_get_shader_part(struct si_screen *sscreen,
3206 struct si_shader_part **list,
3207 enum pipe_shader_type type,
3208 bool prolog,
3209 union si_shader_part_key *key,
3210 struct ac_llvm_compiler *compiler,
3211 struct pipe_debug_callback *debug,
3212 void (*build)(struct si_shader_context *,
3213 union si_shader_part_key *),
3214 const char *name)
3215 {
3216 struct si_shader_part *result;
3217
3218 simple_mtx_lock(&sscreen->shader_parts_mutex);
3219
3220 /* Find existing. */
3221 for (result = *list; result; result = result->next) {
3222 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
3223 simple_mtx_unlock(&sscreen->shader_parts_mutex);
3224 return result;
3225 }
3226 }
3227
3228 /* Compile a new one. */
3229 result = CALLOC_STRUCT(si_shader_part);
3230 result->key = *key;
3231
3232 struct si_shader shader = {};
3233
3234 switch (type) {
3235 case PIPE_SHADER_VERTEX:
3236 shader.key.as_ls = key->vs_prolog.as_ls;
3237 shader.key.as_es = key->vs_prolog.as_es;
3238 shader.key.as_ngg = key->vs_prolog.as_ngg;
3239 break;
3240 case PIPE_SHADER_TESS_CTRL:
3241 assert(!prolog);
3242 shader.key.part.tcs.epilog = key->tcs_epilog.states;
3243 break;
3244 case PIPE_SHADER_GEOMETRY:
3245 assert(prolog);
3246 shader.key.as_ngg = key->gs_prolog.as_ngg;
3247 break;
3248 case PIPE_SHADER_FRAGMENT:
3249 if (prolog)
3250 shader.key.part.ps.prolog = key->ps_prolog.states;
3251 else
3252 shader.key.part.ps.epilog = key->ps_epilog.states;
3253 break;
3254 default:
3255 unreachable("bad shader part");
3256 }
3257
3258 struct si_shader_context ctx;
3259 si_llvm_context_init(&ctx, sscreen, compiler,
3260 si_get_wave_size(sscreen, type, shader.key.as_ngg,
3261 shader.key.as_es));
3262 ctx.shader = &shader;
3263 ctx.type = type;
3264
3265 build(&ctx, key);
3266
3267 /* Compile. */
3268 si_llvm_optimize_module(&ctx);
3269
3270 if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
3271 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
3272 name, false)) {
3273 FREE(result);
3274 result = NULL;
3275 goto out;
3276 }
3277
3278 result->next = *list;
3279 *list = result;
3280
3281 out:
3282 si_llvm_dispose(&ctx);
3283 simple_mtx_unlock(&sscreen->shader_parts_mutex);
3284 return result;
3285 }
3286
3287 /**
3288 * Build the vertex shader prolog function.
3289 *
3290 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
3291 * All inputs are returned unmodified. The vertex load indices are
3292 * stored after them, which will be used by the API VS for fetching inputs.
3293 *
3294 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
3295 * input_v0,
3296 * input_v1,
3297 * input_v2,
3298 * input_v3,
3299 * (VertexID + BaseVertex),
3300 * (InstanceID + StartInstance),
3301 * (InstanceID / 2 + StartInstance)
3302 */
3303 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
3304 union si_shader_part_key *key)
3305 {
3306 LLVMTypeRef *returns;
3307 LLVMValueRef ret, func;
3308 int num_returns, i;
3309 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
3310 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
3311 struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
3312 struct ac_arg input_vgpr_param[9];
3313 LLVMValueRef input_vgprs[9];
3314 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
3315 num_input_vgprs;
3316 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
3317
3318 memset(&ctx->args, 0, sizeof(ctx->args));
3319
3320 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
3321 returns = alloca((num_all_input_regs + key->vs_prolog.num_inputs) *
3322 sizeof(LLVMTypeRef));
3323 num_returns = 0;
3324
3325 /* Declare input and output SGPRs. */
3326 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
3327 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3328 &input_sgpr_param[i]);
3329 returns[num_returns++] = ctx->i32;
3330 }
3331
3332 struct ac_arg merged_wave_info = input_sgpr_param[3];
3333
3334 /* Preloaded VGPRs (outputs must be floats) */
3335 for (i = 0; i < num_input_vgprs; i++) {
3336 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &input_vgpr_param[i]);
3337 returns[num_returns++] = ctx->f32;
3338 }
3339
3340 /* Vertex load indices. */
3341 for (i = 0; i < key->vs_prolog.num_inputs; i++)
3342 returns[num_returns++] = ctx->f32;
3343
3344 /* Create the function. */
3345 si_llvm_create_func(ctx, "vs_prolog", returns, num_returns, 0);
3346 func = ctx->main_fn;
3347
3348 for (i = 0; i < num_input_vgprs; i++) {
3349 input_vgprs[i] = ac_get_arg(&ctx->ac, input_vgpr_param[i]);
3350 }
3351
3352 if (key->vs_prolog.num_merged_next_stage_vgprs) {
3353 if (!key->vs_prolog.is_monolithic)
3354 si_init_exec_from_input(ctx, merged_wave_info, 0);
3355
3356 if (key->vs_prolog.as_ls &&
3357 ctx->screen->info.has_ls_vgpr_init_bug) {
3358 /* If there are no HS threads, SPI loads the LS VGPRs
3359 * starting at VGPR 0. Shift them back to where they
3360 * belong.
3361 */
3362 LLVMValueRef has_hs_threads =
3363 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
3364 si_unpack_param(ctx, input_sgpr_param[3], 8, 8),
3365 ctx->i32_0, "");
3366
3367 for (i = 4; i > 0; --i) {
3368 input_vgprs[i + 1] =
3369 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
3370 input_vgprs[i + 1],
3371 input_vgprs[i - 1], "");
3372 }
3373 }
3374 }
3375
3376 unsigned vertex_id_vgpr = first_vs_vgpr;
3377 unsigned instance_id_vgpr =
3378 ctx->screen->info.chip_class >= GFX10 ?
3379 first_vs_vgpr + 3 :
3380 first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
3381
3382 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
3383 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
3384
3385 /* InstanceID = VertexID >> 16;
3386 * VertexID = VertexID & 0xffff;
3387 */
3388 if (key->vs_prolog.states.unpack_instance_id_from_vertex_id) {
3389 ctx->abi.instance_id = LLVMBuildLShr(ctx->ac.builder, ctx->abi.vertex_id,
3390 LLVMConstInt(ctx->i32, 16, 0), "");
3391 ctx->abi.vertex_id = LLVMBuildAnd(ctx->ac.builder, ctx->abi.vertex_id,
3392 LLVMConstInt(ctx->i32, 0xffff, 0), "");
3393 }
3394
3395 /* Copy inputs to outputs. This should be no-op, as the registers match,
3396 * but it will prevent the compiler from overwriting them unintentionally.
3397 */
3398 ret = ctx->return_value;
3399 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
3400 LLVMValueRef p = LLVMGetParam(func, i);
3401 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
3402 }
3403 for (i = 0; i < num_input_vgprs; i++) {
3404 LLVMValueRef p = input_vgprs[i];
3405
3406 if (i == vertex_id_vgpr)
3407 p = ctx->abi.vertex_id;
3408 else if (i == instance_id_vgpr)
3409 p = ctx->abi.instance_id;
3410
3411 p = ac_to_float(&ctx->ac, p);
3412 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
3413 key->vs_prolog.num_input_sgprs + i, "");
3414 }
3415
3416 /* Compute vertex load indices from instance divisors. */
3417 LLVMValueRef instance_divisor_constbuf = NULL;
3418
3419 if (key->vs_prolog.states.instance_divisor_is_fetched) {
3420 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
3421 LLVMValueRef buf_index =
3422 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
3423 instance_divisor_constbuf =
3424 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
3425 }
3426
3427 for (i = 0; i < key->vs_prolog.num_inputs; i++) {
3428 bool divisor_is_one =
3429 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
3430 bool divisor_is_fetched =
3431 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
3432 LLVMValueRef index = NULL;
3433
3434 if (divisor_is_one) {
3435 index = ctx->abi.instance_id;
3436 } else if (divisor_is_fetched) {
3437 LLVMValueRef udiv_factors[4];
3438
3439 for (unsigned j = 0; j < 4; j++) {
3440 udiv_factors[j] =
3441 si_buffer_load_const(ctx, instance_divisor_constbuf,
3442 LLVMConstInt(ctx->i32, i*16 + j*4, 0));
3443 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
3444 }
3445 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
3446 * Such InstanceID might not be achievable in a reasonable time though.
3447 */
3448 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id,
3449 udiv_factors[0], udiv_factors[1],
3450 udiv_factors[2], udiv_factors[3]);
3451 }
3452
3453 if (divisor_is_one || divisor_is_fetched) {
3454 /* Add StartInstance. */
3455 index = LLVMBuildAdd(ctx->ac.builder, index,
3456 LLVMGetParam(ctx->main_fn, user_sgpr_base +
3457 SI_SGPR_START_INSTANCE), "");
3458 } else {
3459 /* VertexID + BaseVertex */
3460 index = LLVMBuildAdd(ctx->ac.builder,
3461 ctx->abi.vertex_id,
3462 LLVMGetParam(func, user_sgpr_base +
3463 SI_SGPR_BASE_VERTEX), "");
3464 }
3465
3466 index = ac_to_float(&ctx->ac, index);
3467 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
3468 ctx->args.arg_count + i, "");
3469 }
3470
3471 si_llvm_build_ret(ctx, ret);
3472 }
3473
3474 static bool si_get_vs_prolog(struct si_screen *sscreen,
3475 struct ac_llvm_compiler *compiler,
3476 struct si_shader *shader,
3477 struct pipe_debug_callback *debug,
3478 struct si_shader *main_part,
3479 const struct si_vs_prolog_bits *key)
3480 {
3481 struct si_shader_selector *vs = main_part->selector;
3482
3483 if (!si_vs_needs_prolog(vs, key))
3484 return true;
3485
3486 /* Get the prolog. */
3487 union si_shader_part_key prolog_key;
3488 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
3489 key, shader, &prolog_key);
3490
3491 shader->prolog =
3492 si_get_shader_part(sscreen, &sscreen->vs_prologs,
3493 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
3494 debug, si_build_vs_prolog_function,
3495 "Vertex Shader Prolog");
3496 return shader->prolog != NULL;
3497 }
3498
3499 /**
3500 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
3501 */
3502 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
3503 struct ac_llvm_compiler *compiler,
3504 struct si_shader *shader,
3505 struct pipe_debug_callback *debug)
3506 {
3507 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
3508 &shader->key.part.vs.prolog);
3509 }
3510
3511 /**
3512 * Select and compile (or reuse) TCS parts (epilog).
3513 */
3514 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
3515 struct ac_llvm_compiler *compiler,
3516 struct si_shader *shader,
3517 struct pipe_debug_callback *debug)
3518 {
3519 if (sscreen->info.chip_class >= GFX9) {
3520 struct si_shader *ls_main_part =
3521 shader->key.part.tcs.ls->main_shader_part_ls;
3522
3523 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
3524 &shader->key.part.tcs.ls_prolog))
3525 return false;
3526
3527 shader->previous_stage = ls_main_part;
3528 }
3529
3530 /* Get the epilog. */
3531 union si_shader_part_key epilog_key;
3532 memset(&epilog_key, 0, sizeof(epilog_key));
3533 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
3534
3535 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
3536 PIPE_SHADER_TESS_CTRL, false,
3537 &epilog_key, compiler, debug,
3538 si_llvm_build_tcs_epilog,
3539 "Tessellation Control Shader Epilog");
3540 return shader->epilog != NULL;
3541 }
3542
3543 /**
3544 * Select and compile (or reuse) GS parts (prolog).
3545 */
3546 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
3547 struct ac_llvm_compiler *compiler,
3548 struct si_shader *shader,
3549 struct pipe_debug_callback *debug)
3550 {
3551 if (sscreen->info.chip_class >= GFX9) {
3552 struct si_shader *es_main_part;
3553 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
3554
3555 if (shader->key.as_ngg)
3556 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
3557 else
3558 es_main_part = shader->key.part.gs.es->main_shader_part_es;
3559
3560 if (es_type == PIPE_SHADER_VERTEX &&
3561 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
3562 &shader->key.part.gs.vs_prolog))
3563 return false;
3564
3565 shader->previous_stage = es_main_part;
3566 }
3567
3568 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
3569 return true;
3570
3571 union si_shader_part_key prolog_key;
3572 memset(&prolog_key, 0, sizeof(prolog_key));
3573 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
3574 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
3575
3576 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
3577 PIPE_SHADER_GEOMETRY, true,
3578 &prolog_key, compiler, debug,
3579 si_llvm_build_gs_prolog,
3580 "Geometry Shader Prolog");
3581 return shader->prolog2 != NULL;
3582 }
3583
3584 /**
3585 * Compute the PS prolog key, which contains all the information needed to
3586 * build the PS prolog function, and set related bits in shader->config.
3587 */
3588 void si_get_ps_prolog_key(struct si_shader *shader,
3589 union si_shader_part_key *key,
3590 bool separate_prolog)
3591 {
3592 struct si_shader_info *info = &shader->selector->info;
3593
3594 memset(key, 0, sizeof(*key));
3595 key->ps_prolog.states = shader->key.part.ps.prolog;
3596 key->ps_prolog.colors_read = info->colors_read;
3597 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
3598 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
3599 key->ps_prolog.wqm = info->uses_derivatives &&
3600 (key->ps_prolog.colors_read ||
3601 key->ps_prolog.states.force_persp_sample_interp ||
3602 key->ps_prolog.states.force_linear_sample_interp ||
3603 key->ps_prolog.states.force_persp_center_interp ||
3604 key->ps_prolog.states.force_linear_center_interp ||
3605 key->ps_prolog.states.bc_optimize_for_persp ||
3606 key->ps_prolog.states.bc_optimize_for_linear);
3607 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
3608
3609 if (info->colors_read) {
3610 unsigned *color = shader->selector->color_attr_index;
3611
3612 if (shader->key.part.ps.prolog.color_two_side) {
3613 /* BCOLORs are stored after the last input. */
3614 key->ps_prolog.num_interp_inputs = info->num_inputs;
3615 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
3616 if (separate_prolog)
3617 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
3618 }
3619
3620 for (unsigned i = 0; i < 2; i++) {
3621 unsigned interp = info->input_interpolate[color[i]];
3622 unsigned location = info->input_interpolate_loc[color[i]];
3623
3624 if (!(info->colors_read & (0xf << i*4)))
3625 continue;
3626
3627 key->ps_prolog.color_attr_index[i] = color[i];
3628
3629 if (shader->key.part.ps.prolog.flatshade_colors &&
3630 interp == TGSI_INTERPOLATE_COLOR)
3631 interp = TGSI_INTERPOLATE_CONSTANT;
3632
3633 switch (interp) {
3634 case TGSI_INTERPOLATE_CONSTANT:
3635 key->ps_prolog.color_interp_vgpr_index[i] = -1;
3636 break;
3637 case TGSI_INTERPOLATE_PERSPECTIVE:
3638 case TGSI_INTERPOLATE_COLOR:
3639 /* Force the interpolation location for colors here. */
3640 if (shader->key.part.ps.prolog.force_persp_sample_interp)
3641 location = TGSI_INTERPOLATE_LOC_SAMPLE;
3642 if (shader->key.part.ps.prolog.force_persp_center_interp)
3643 location = TGSI_INTERPOLATE_LOC_CENTER;
3644
3645 switch (location) {
3646 case TGSI_INTERPOLATE_LOC_SAMPLE:
3647 key->ps_prolog.color_interp_vgpr_index[i] = 0;
3648 if (separate_prolog) {
3649 shader->config.spi_ps_input_ena |=
3650 S_0286CC_PERSP_SAMPLE_ENA(1);
3651 }
3652 break;
3653 case TGSI_INTERPOLATE_LOC_CENTER:
3654 key->ps_prolog.color_interp_vgpr_index[i] = 2;
3655 if (separate_prolog) {
3656 shader->config.spi_ps_input_ena |=
3657 S_0286CC_PERSP_CENTER_ENA(1);
3658 }
3659 break;
3660 case TGSI_INTERPOLATE_LOC_CENTROID:
3661 key->ps_prolog.color_interp_vgpr_index[i] = 4;
3662 if (separate_prolog) {
3663 shader->config.spi_ps_input_ena |=
3664 S_0286CC_PERSP_CENTROID_ENA(1);
3665 }
3666 break;
3667 default:
3668 assert(0);
3669 }
3670 break;
3671 case TGSI_INTERPOLATE_LINEAR:
3672 /* Force the interpolation location for colors here. */
3673 if (shader->key.part.ps.prolog.force_linear_sample_interp)
3674 location = TGSI_INTERPOLATE_LOC_SAMPLE;
3675 if (shader->key.part.ps.prolog.force_linear_center_interp)
3676 location = TGSI_INTERPOLATE_LOC_CENTER;
3677
3678 /* The VGPR assignment for non-monolithic shaders
3679 * works because InitialPSInputAddr is set on the
3680 * main shader and PERSP_PULL_MODEL is never used.
3681 */
3682 switch (location) {
3683 case TGSI_INTERPOLATE_LOC_SAMPLE:
3684 key->ps_prolog.color_interp_vgpr_index[i] =
3685 separate_prolog ? 6 : 9;
3686 if (separate_prolog) {
3687 shader->config.spi_ps_input_ena |=
3688 S_0286CC_LINEAR_SAMPLE_ENA(1);
3689 }
3690 break;
3691 case TGSI_INTERPOLATE_LOC_CENTER:
3692 key->ps_prolog.color_interp_vgpr_index[i] =
3693 separate_prolog ? 8 : 11;
3694 if (separate_prolog) {
3695 shader->config.spi_ps_input_ena |=
3696 S_0286CC_LINEAR_CENTER_ENA(1);
3697 }
3698 break;
3699 case TGSI_INTERPOLATE_LOC_CENTROID:
3700 key->ps_prolog.color_interp_vgpr_index[i] =
3701 separate_prolog ? 10 : 13;
3702 if (separate_prolog) {
3703 shader->config.spi_ps_input_ena |=
3704 S_0286CC_LINEAR_CENTROID_ENA(1);
3705 }
3706 break;
3707 default:
3708 assert(0);
3709 }
3710 break;
3711 default:
3712 assert(0);
3713 }
3714 }
3715 }
3716 }
3717
3718 /**
3719 * Check whether a PS prolog is required based on the key.
3720 */
3721 bool si_need_ps_prolog(const union si_shader_part_key *key)
3722 {
3723 return key->ps_prolog.colors_read ||
3724 key->ps_prolog.states.force_persp_sample_interp ||
3725 key->ps_prolog.states.force_linear_sample_interp ||
3726 key->ps_prolog.states.force_persp_center_interp ||
3727 key->ps_prolog.states.force_linear_center_interp ||
3728 key->ps_prolog.states.bc_optimize_for_persp ||
3729 key->ps_prolog.states.bc_optimize_for_linear ||
3730 key->ps_prolog.states.poly_stipple ||
3731 key->ps_prolog.states.samplemask_log_ps_iter;
3732 }
3733
3734 /**
3735 * Compute the PS epilog key, which contains all the information needed to
3736 * build the PS epilog function.
3737 */
3738 void si_get_ps_epilog_key(struct si_shader *shader,
3739 union si_shader_part_key *key)
3740 {
3741 struct si_shader_info *info = &shader->selector->info;
3742 memset(key, 0, sizeof(*key));
3743 key->ps_epilog.colors_written = info->colors_written;
3744 key->ps_epilog.writes_z = info->writes_z;
3745 key->ps_epilog.writes_stencil = info->writes_stencil;
3746 key->ps_epilog.writes_samplemask = info->writes_samplemask;
3747 key->ps_epilog.states = shader->key.part.ps.epilog;
3748 }
3749
3750 /**
3751 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
3752 */
3753 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
3754 struct ac_llvm_compiler *compiler,
3755 struct si_shader *shader,
3756 struct pipe_debug_callback *debug)
3757 {
3758 union si_shader_part_key prolog_key;
3759 union si_shader_part_key epilog_key;
3760
3761 /* Get the prolog. */
3762 si_get_ps_prolog_key(shader, &prolog_key, true);
3763
3764 /* The prolog is a no-op if these aren't set. */
3765 if (si_need_ps_prolog(&prolog_key)) {
3766 shader->prolog =
3767 si_get_shader_part(sscreen, &sscreen->ps_prologs,
3768 PIPE_SHADER_FRAGMENT, true,
3769 &prolog_key, compiler, debug,
3770 si_llvm_build_ps_prolog,
3771 "Fragment Shader Prolog");
3772 if (!shader->prolog)
3773 return false;
3774 }
3775
3776 /* Get the epilog. */
3777 si_get_ps_epilog_key(shader, &epilog_key);
3778
3779 shader->epilog =
3780 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
3781 PIPE_SHADER_FRAGMENT, false,
3782 &epilog_key, compiler, debug,
3783 si_llvm_build_ps_epilog,
3784 "Fragment Shader Epilog");
3785 if (!shader->epilog)
3786 return false;
3787
3788 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
3789 if (shader->key.part.ps.prolog.poly_stipple) {
3790 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
3791 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
3792 }
3793
3794 /* Set up the enable bits for per-sample shading if needed. */
3795 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
3796 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
3797 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3798 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
3799 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
3800 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
3801 }
3802 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
3803 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
3804 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3805 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
3806 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
3807 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
3808 }
3809 if (shader->key.part.ps.prolog.force_persp_center_interp &&
3810 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
3811 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3812 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
3813 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
3814 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
3815 }
3816 if (shader->key.part.ps.prolog.force_linear_center_interp &&
3817 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
3818 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
3819 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
3820 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
3821 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
3822 }
3823
3824 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
3825 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
3826 !(shader->config.spi_ps_input_ena & 0xf)) {
3827 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
3828 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
3829 }
3830
3831 /* At least one pair of interpolation weights must be enabled. */
3832 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
3833 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
3834 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
3835 }
3836
3837 /* Samplemask fixup requires the sample ID. */
3838 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
3839 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
3840 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
3841 }
3842
3843 /* The sample mask input is always enabled, because the API shader always
3844 * passes it through to the epilog. Disable it here if it's unused.
3845 */
3846 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
3847 !shader->selector->info.reads_samplemask)
3848 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
3849
3850 return true;
3851 }
3852
3853 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
3854 unsigned *lds_size)
3855 {
3856 /* If tessellation is all offchip and on-chip GS isn't used, this
3857 * workaround is not needed.
3858 */
3859 return;
3860
3861 /* SPI barrier management bug:
3862 * Make sure we have at least 4k of LDS in use to avoid the bug.
3863 * It applies to workgroup sizes of more than one wavefront.
3864 */
3865 if (sscreen->info.family == CHIP_BONAIRE ||
3866 sscreen->info.family == CHIP_KABINI)
3867 *lds_size = MAX2(*lds_size, 8);
3868 }
3869
3870 void si_fix_resource_usage(struct si_screen *sscreen, struct si_shader *shader)
3871 {
3872 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
3873
3874 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
3875
3876 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
3877 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
3878 si_multiwave_lds_size_workaround(sscreen,
3879 &shader->config.lds_size);
3880 }
3881 }
3882
3883 bool si_create_shader_variant(struct si_screen *sscreen,
3884 struct ac_llvm_compiler *compiler,
3885 struct si_shader *shader,
3886 struct pipe_debug_callback *debug)
3887 {
3888 struct si_shader_selector *sel = shader->selector;
3889 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
3890 int r;
3891
3892 /* LS, ES, VS are compiled on demand if the main part hasn't been
3893 * compiled for that stage.
3894 *
3895 * GS are compiled on demand if the main part hasn't been compiled
3896 * for the chosen NGG-ness.
3897 *
3898 * Vertex shaders are compiled on demand when a vertex fetch
3899 * workaround must be applied.
3900 */
3901 if (shader->is_monolithic) {
3902 /* Monolithic shader (compiled as a whole, has many variants,
3903 * may take a long time to compile).
3904 */
3905 r = si_compile_shader(sscreen, compiler, shader, debug);
3906 if (r)
3907 return false;
3908 } else {
3909 /* The shader consists of several parts:
3910 *
3911 * - the middle part is the user shader, it has 1 variant only
3912 * and it was compiled during the creation of the shader
3913 * selector
3914 * - the prolog part is inserted at the beginning
3915 * - the epilog part is inserted at the end
3916 *
3917 * The prolog and epilog have many (but simple) variants.
3918 *
3919 * Starting with gfx9, geometry and tessellation control
3920 * shaders also contain the prolog and user shader parts of
3921 * the previous shader stage.
3922 */
3923
3924 if (!mainp)
3925 return false;
3926
3927 /* Copy the compiled shader data over. */
3928 shader->is_binary_shared = true;
3929 shader->binary = mainp->binary;
3930 shader->config = mainp->config;
3931 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
3932 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
3933 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
3934 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
3935 memcpy(shader->info.vs_output_param_offset,
3936 mainp->info.vs_output_param_offset,
3937 sizeof(mainp->info.vs_output_param_offset));
3938 shader->info.uses_instanceid = mainp->info.uses_instanceid;
3939 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
3940 shader->info.nr_param_exports = mainp->info.nr_param_exports;
3941
3942 /* Select prologs and/or epilogs. */
3943 switch (sel->type) {
3944 case PIPE_SHADER_VERTEX:
3945 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
3946 return false;
3947 break;
3948 case PIPE_SHADER_TESS_CTRL:
3949 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
3950 return false;
3951 break;
3952 case PIPE_SHADER_TESS_EVAL:
3953 break;
3954 case PIPE_SHADER_GEOMETRY:
3955 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
3956 return false;
3957 break;
3958 case PIPE_SHADER_FRAGMENT:
3959 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
3960 return false;
3961
3962 /* Make sure we have at least as many VGPRs as there
3963 * are allocated inputs.
3964 */
3965 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3966 shader->info.num_input_vgprs);
3967 break;
3968 default:;
3969 }
3970
3971 /* Update SGPR and VGPR counts. */
3972 if (shader->prolog) {
3973 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
3974 shader->prolog->config.num_sgprs);
3975 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3976 shader->prolog->config.num_vgprs);
3977 }
3978 if (shader->previous_stage) {
3979 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
3980 shader->previous_stage->config.num_sgprs);
3981 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
3982 shader->previous_stage->config.num_vgprs);
3983 shader->config.spilled_sgprs =
3984 MAX2(shader->config.spilled_sgprs,
3985 shader->previous_stage->config.spilled_sgprs);
3986 shader->config.spilled_vgprs =
3987 MAX2(shader->config.spilled_vgprs,
3988 shader->previous_stage->config.spilled_vgprs);
3989 shader->info.private_mem_vgprs =
3990 MAX2(shader->info.private_mem_vgprs,
3991 shader->previous_stage->info.private_mem_vgprs);
3992 shader->config.scratch_bytes_per_wave =
3993 MAX2(shader->config.scratch_bytes_per_wave,
3994 shader->previous_stage->config.scratch_bytes_per_wave);
3995 shader->info.uses_instanceid |=
3996 shader->previous_stage->info.uses_instanceid;
3997 }
3998 if (shader->prolog2) {
3999 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
4000 shader->prolog2->config.num_sgprs);
4001 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
4002 shader->prolog2->config.num_vgprs);
4003 }
4004 if (shader->epilog) {
4005 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
4006 shader->epilog->config.num_sgprs);
4007 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
4008 shader->epilog->config.num_vgprs);
4009 }
4010 si_calculate_max_simd_waves(shader);
4011 }
4012
4013 if (shader->key.as_ngg) {
4014 assert(!shader->key.as_es && !shader->key.as_ls);
4015 gfx10_ngg_calculate_subgroup_info(shader);
4016 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
4017 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
4018 }
4019
4020 si_fix_resource_usage(sscreen, shader);
4021 si_shader_dump(sscreen, shader, debug, stderr, true);
4022
4023 /* Upload. */
4024 if (!si_shader_binary_upload(sscreen, shader, 0)) {
4025 fprintf(stderr, "LLVM failed to upload shader\n");
4026 return false;
4027 }
4028
4029 return true;
4030 }
4031
4032 void si_shader_destroy(struct si_shader *shader)
4033 {
4034 if (shader->scratch_bo)
4035 si_resource_reference(&shader->scratch_bo, NULL);
4036
4037 si_resource_reference(&shader->bo, NULL);
4038
4039 if (!shader->is_binary_shared)
4040 si_shader_binary_clean(&shader->binary);
4041
4042 free(shader->shader_log);
4043 }