radeonsi: fix monolithic pixel shaders with two-sided colors and SampleMaskIn
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <llvm/Config/llvm-config.h>
26
27 #include "util/u_memory.h"
28 #include "tgsi/tgsi_strings.h"
29 #include "tgsi/tgsi_from_mesa.h"
30
31 #include "ac_exp_param.h"
32 #include "ac_shader_util.h"
33 #include "ac_rtld.h"
34 #include "ac_llvm_util.h"
35 #include "si_shader_internal.h"
36 #include "si_pipe.h"
37 #include "sid.h"
38
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_serialize.h"
41
42 static const char scratch_rsrc_dword0_symbol[] =
43 "SCRATCH_RSRC_DWORD0";
44
45 static const char scratch_rsrc_dword1_symbol[] =
46 "SCRATCH_RSRC_DWORD1";
47
48 static void si_llvm_emit_barrier(struct si_shader_context *ctx);
49
50 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
51
52 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
53 union si_shader_part_key *key);
54 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
55 union si_shader_part_key *key);
56 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
57 union si_shader_part_key *key);
58 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
59 union si_shader_part_key *key);
60 static void si_fix_resource_usage(struct si_screen *sscreen,
61 struct si_shader *shader);
62
63 /* Ideally pass the sample mask input to the PS epilog as v14, which
64 * is its usual location, so that the shader doesn't have to add v_mov.
65 */
66 #define PS_EPILOG_SAMPLEMASK_MIN_LOC 14
67
68 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
69 LLVMTypeRef type)
70 {
71 if (type == ctx->ac.i64 || type == ctx->ac.f64)
72 return true;
73
74 return false;
75 }
76
77 /** Whether the shader runs as a combination of multiple API shaders */
78 static bool is_multi_part_shader(struct si_shader_context *ctx)
79 {
80 if (ctx->screen->info.chip_class <= GFX8)
81 return false;
82
83 return ctx->shader->key.as_ls ||
84 ctx->shader->key.as_es ||
85 ctx->type == PIPE_SHADER_TESS_CTRL ||
86 ctx->type == PIPE_SHADER_GEOMETRY;
87 }
88
89 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
90 static bool is_merged_shader(struct si_shader_context *ctx)
91 {
92 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
93 }
94
95 /**
96 * Returns a unique index for a per-patch semantic name and index. The index
97 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
98 * can be calculated.
99 */
100 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
101 {
102 switch (semantic_name) {
103 case TGSI_SEMANTIC_TESSOUTER:
104 return 0;
105 case TGSI_SEMANTIC_TESSINNER:
106 return 1;
107 case TGSI_SEMANTIC_PATCH:
108 assert(index < 30);
109 return 2 + index;
110
111 default:
112 assert(!"invalid semantic name");
113 return 0;
114 }
115 }
116
117 /**
118 * Returns a unique index for a semantic name and index. The index must be
119 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
120 * calculated.
121 */
122 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
123 unsigned is_varying)
124 {
125 switch (semantic_name) {
126 case TGSI_SEMANTIC_POSITION:
127 return 0;
128 case TGSI_SEMANTIC_GENERIC:
129 /* Since some shader stages use the the highest used IO index
130 * to determine the size to allocate for inputs/outputs
131 * (in LDS, tess and GS rings). GENERIC should be placed right
132 * after POSITION to make that size as small as possible.
133 */
134 if (index < SI_MAX_IO_GENERIC)
135 return 1 + index;
136
137 assert(!"invalid generic index");
138 return 0;
139 case TGSI_SEMANTIC_FOG:
140 return SI_MAX_IO_GENERIC + 1;
141 case TGSI_SEMANTIC_COLOR:
142 assert(index < 2);
143 return SI_MAX_IO_GENERIC + 2 + index;
144 case TGSI_SEMANTIC_BCOLOR:
145 assert(index < 2);
146 /* If it's a varying, COLOR and BCOLOR alias. */
147 if (is_varying)
148 return SI_MAX_IO_GENERIC + 2 + index;
149 else
150 return SI_MAX_IO_GENERIC + 4 + index;
151 case TGSI_SEMANTIC_TEXCOORD:
152 assert(index < 8);
153 return SI_MAX_IO_GENERIC + 6 + index;
154
155 /* These are rarely used between LS and HS or ES and GS. */
156 case TGSI_SEMANTIC_CLIPDIST:
157 assert(index < 2);
158 return SI_MAX_IO_GENERIC + 6 + 8 + index;
159 case TGSI_SEMANTIC_CLIPVERTEX:
160 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
161 case TGSI_SEMANTIC_PSIZE:
162 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
163
164 /* These can't be written by LS, HS, and ES. */
165 case TGSI_SEMANTIC_LAYER:
166 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
167 case TGSI_SEMANTIC_VIEWPORT_INDEX:
168 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
169 case TGSI_SEMANTIC_PRIMID:
170 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
171 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
172 default:
173 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
174 assert(!"invalid semantic name");
175 return 0;
176 }
177 }
178
179 /**
180 * Get the value of a shader input parameter and extract a bitfield.
181 */
182 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
183 LLVMValueRef value, unsigned rshift,
184 unsigned bitwidth)
185 {
186 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
187 value = ac_to_integer(&ctx->ac, value);
188
189 if (rshift)
190 value = LLVMBuildLShr(ctx->ac.builder, value,
191 LLVMConstInt(ctx->i32, rshift, 0), "");
192
193 if (rshift + bitwidth < 32) {
194 unsigned mask = (1 << bitwidth) - 1;
195 value = LLVMBuildAnd(ctx->ac.builder, value,
196 LLVMConstInt(ctx->i32, mask, 0), "");
197 }
198
199 return value;
200 }
201
202 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
203 struct ac_arg param, unsigned rshift,
204 unsigned bitwidth)
205 {
206 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
207
208 return unpack_llvm_param(ctx, value, rshift, bitwidth);
209 }
210
211 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
212 {
213 switch (ctx->type) {
214 case PIPE_SHADER_TESS_CTRL:
215 return si_unpack_param(ctx, ctx->args.tcs_rel_ids, 0, 8);
216
217 case PIPE_SHADER_TESS_EVAL:
218 return ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id);
219
220 default:
221 assert(0);
222 return NULL;
223 }
224 }
225
226 /* Tessellation shaders pass outputs to the next shader using LDS.
227 *
228 * LS outputs = TCS inputs
229 * TCS outputs = TES inputs
230 *
231 * The LDS layout is:
232 * - TCS inputs for patch 0
233 * - TCS inputs for patch 1
234 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
235 * - ...
236 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
237 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
238 * - TCS outputs for patch 1
239 * - Per-patch TCS outputs for patch 1
240 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
241 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
242 * - ...
243 *
244 * All three shaders VS(LS), TCS, TES share the same LDS space.
245 */
246
247 static LLVMValueRef
248 get_tcs_in_patch_stride(struct si_shader_context *ctx)
249 {
250 return si_unpack_param(ctx, ctx->vs_state_bits, 8, 13);
251 }
252
253 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
254 {
255 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
256
257 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
258 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
259
260 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
261 }
262
263 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
264 {
265 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
266
267 return LLVMConstInt(ctx->i32, stride, 0);
268 }
269
270 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
271 {
272 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
273 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 0, 13);
274
275 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
276 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
277 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
278 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
279 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
280 num_patch_outputs * 4;
281 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
282 }
283
284 static LLVMValueRef
285 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
286 {
287 return LLVMBuildMul(ctx->ac.builder,
288 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 0, 16),
289 LLVMConstInt(ctx->i32, 4, 0), "");
290 }
291
292 static LLVMValueRef
293 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
294 {
295 return LLVMBuildMul(ctx->ac.builder,
296 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 16, 16),
297 LLVMConstInt(ctx->i32, 4, 0), "");
298 }
299
300 static LLVMValueRef
301 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
302 {
303 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
304 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
305
306 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
307 }
308
309 static LLVMValueRef
310 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
311 {
312 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
313 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
314 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
315
316 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_offset);
317 }
318
319 static LLVMValueRef
320 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
321 {
322 LLVMValueRef patch0_patch_data_offset =
323 get_tcs_out_patch0_patch_data_offset(ctx);
324 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
325 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
326
327 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_patch_data_offset);
328 }
329
330 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
331 {
332 unsigned tcs_out_vertices =
333 ctx->shader->selector ?
334 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
335
336 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
337 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
338 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
339
340 return si_unpack_param(ctx, ctx->tcs_offchip_layout, 6, 6);
341 }
342
343 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
344 {
345 unsigned stride;
346
347 switch (ctx->type) {
348 case PIPE_SHADER_VERTEX:
349 stride = ctx->shader->selector->lshs_vertex_stride / 4;
350 return LLVMConstInt(ctx->i32, stride, 0);
351
352 case PIPE_SHADER_TESS_CTRL:
353 if (ctx->screen->info.chip_class >= GFX9 &&
354 ctx->shader->is_monolithic) {
355 stride = ctx->shader->key.part.tcs.ls->lshs_vertex_stride / 4;
356 return LLVMConstInt(ctx->i32, stride, 0);
357 }
358 return si_unpack_param(ctx, ctx->vs_state_bits, 24, 8);
359
360 default:
361 assert(0);
362 return NULL;
363 }
364 }
365
366 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
367 LLVMValueRef i32, unsigned index)
368 {
369 assert(index <= 1);
370
371 if (index == 1)
372 return LLVMBuildAShr(ctx->ac.builder, i32,
373 LLVMConstInt(ctx->i32, 16, 0), "");
374
375 return LLVMBuildSExt(ctx->ac.builder,
376 LLVMBuildTrunc(ctx->ac.builder, i32,
377 ctx->ac.i16, ""),
378 ctx->i32, "");
379 }
380
381 void si_llvm_load_input_vs(
382 struct si_shader_context *ctx,
383 unsigned input_index,
384 LLVMValueRef out[4])
385 {
386 const struct tgsi_shader_info *info = &ctx->shader->selector->info;
387 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
388
389 if (vs_blit_property) {
390 LLVMValueRef vertex_id = ctx->abi.vertex_id;
391 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
392 LLVMIntULE, vertex_id,
393 ctx->i32_1, "");
394 /* Use LLVMIntNE, because we have 3 vertices and only
395 * the middle one should use y2.
396 */
397 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
398 LLVMIntNE, vertex_id,
399 ctx->i32_1, "");
400
401 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
402 if (input_index == 0) {
403 /* Position: */
404 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
405 param_vs_blit_inputs);
406 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
407 param_vs_blit_inputs + 1);
408
409 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
410 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
411 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
412 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
413
414 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
415 x1, x2, "");
416 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
417 y1, y2, "");
418
419 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
420 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
421 out[2] = LLVMGetParam(ctx->main_fn,
422 param_vs_blit_inputs + 2);
423 out[3] = ctx->ac.f32_1;
424 return;
425 }
426
427 /* Color or texture coordinates: */
428 assert(input_index == 1);
429
430 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
431 for (int i = 0; i < 4; i++) {
432 out[i] = LLVMGetParam(ctx->main_fn,
433 param_vs_blit_inputs + 3 + i);
434 }
435 } else {
436 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
437 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
438 param_vs_blit_inputs + 3);
439 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
440 param_vs_blit_inputs + 4);
441 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
442 param_vs_blit_inputs + 5);
443 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
444 param_vs_blit_inputs + 6);
445
446 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
447 x1, x2, "");
448 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
449 y1, y2, "");
450 out[2] = LLVMGetParam(ctx->main_fn,
451 param_vs_blit_inputs + 7);
452 out[3] = LLVMGetParam(ctx->main_fn,
453 param_vs_blit_inputs + 8);
454 }
455 return;
456 }
457
458 union si_vs_fix_fetch fix_fetch;
459 LLVMValueRef t_list_ptr;
460 LLVMValueRef t_offset;
461 LLVMValueRef t_list;
462 LLVMValueRef vertex_index;
463 LLVMValueRef tmp;
464
465 /* Load the T list */
466 t_list_ptr = ac_get_arg(&ctx->ac, ctx->vertex_buffers);
467
468 t_offset = LLVMConstInt(ctx->i32, input_index, 0);
469
470 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
471
472 vertex_index = LLVMGetParam(ctx->main_fn,
473 ctx->vertex_index0.arg_index +
474 input_index);
475
476 /* Use the open-coded implementation for all loads of doubles and
477 * of dword-sized data that needs fixups. We need to insert conversion
478 * code anyway, and the amd/common code does it for us.
479 *
480 * Note: On LLVM <= 8, we can only open-code formats with
481 * channel size >= 4 bytes.
482 */
483 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
484 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
485 if (opencode ||
486 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
487 (fix_fetch.u.log_size == 2)) {
488 tmp = ac_build_opencoded_load_format(
489 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
490 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
491 t_list, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
492 for (unsigned i = 0; i < 4; ++i)
493 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
494 return;
495 }
496
497 /* Do multiple loads for special formats. */
498 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
499 LLVMValueRef fetches[4];
500 unsigned num_fetches;
501 unsigned fetch_stride;
502 unsigned channels_per_fetch;
503
504 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
505 num_fetches = MIN2(required_channels, 3);
506 fetch_stride = 1 << fix_fetch.u.log_size;
507 channels_per_fetch = 1;
508 } else {
509 num_fetches = 1;
510 fetch_stride = 0;
511 channels_per_fetch = required_channels;
512 }
513
514 for (unsigned i = 0; i < num_fetches; ++i) {
515 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
516 fetches[i] = ac_build_buffer_load_format(&ctx->ac, t_list, vertex_index, voffset,
517 channels_per_fetch, 0, true);
518 }
519
520 if (num_fetches == 1 && channels_per_fetch > 1) {
521 LLVMValueRef fetch = fetches[0];
522 for (unsigned i = 0; i < channels_per_fetch; ++i) {
523 tmp = LLVMConstInt(ctx->i32, i, false);
524 fetches[i] = LLVMBuildExtractElement(
525 ctx->ac.builder, fetch, tmp, "");
526 }
527 num_fetches = channels_per_fetch;
528 channels_per_fetch = 1;
529 }
530
531 for (unsigned i = num_fetches; i < 4; ++i)
532 fetches[i] = LLVMGetUndef(ctx->f32);
533
534 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
535 required_channels == 4) {
536 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
537 fetches[3] = ctx->ac.i32_1;
538 else
539 fetches[3] = ctx->ac.f32_1;
540 } else if (fix_fetch.u.log_size == 3 &&
541 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
542 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
543 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
544 required_channels == 4) {
545 /* For 2_10_10_10, the hardware returns an unsigned value;
546 * convert it to a signed one.
547 */
548 LLVMValueRef tmp = fetches[3];
549 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
550
551 /* First, recover the sign-extended signed integer value. */
552 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
553 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
554 else
555 tmp = ac_to_integer(&ctx->ac, tmp);
556
557 /* For the integer-like cases, do a natural sign extension.
558 *
559 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
560 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
561 * exponent.
562 */
563 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
564 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
565 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
566 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
567
568 /* Convert back to the right type. */
569 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
570 LLVMValueRef clamp;
571 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
572 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
573 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
574 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
575 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
576 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
577 }
578
579 fetches[3] = tmp;
580 }
581
582 for (unsigned i = 0; i < 4; ++i)
583 out[i] = ac_to_float(&ctx->ac, fetches[i]);
584 }
585
586 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
587 unsigned swizzle)
588 {
589 if (swizzle > 0)
590 return ctx->i32_0;
591
592 switch (ctx->type) {
593 case PIPE_SHADER_VERTEX:
594 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
595 case PIPE_SHADER_TESS_CTRL:
596 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
597 case PIPE_SHADER_TESS_EVAL:
598 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
599 case PIPE_SHADER_GEOMETRY:
600 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
601 default:
602 assert(0);
603 return ctx->i32_0;
604 }
605 }
606
607 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
608 LLVMValueRef vertex_dw_stride,
609 LLVMValueRef base_addr,
610 LLVMValueRef vertex_index,
611 LLVMValueRef param_index,
612 ubyte name, ubyte index)
613 {
614 if (vertex_dw_stride) {
615 base_addr = ac_build_imad(&ctx->ac, vertex_index,
616 vertex_dw_stride, base_addr);
617 }
618
619 if (param_index) {
620 base_addr = ac_build_imad(&ctx->ac, param_index,
621 LLVMConstInt(ctx->i32, 4, 0), base_addr);
622 }
623
624 int param = name == TGSI_SEMANTIC_PATCH ||
625 name == TGSI_SEMANTIC_TESSINNER ||
626 name == TGSI_SEMANTIC_TESSOUTER ?
627 si_shader_io_get_unique_index_patch(name, index) :
628 si_shader_io_get_unique_index(name, index, false);
629
630 /* Add the base address of the element. */
631 return LLVMBuildAdd(ctx->ac.builder, base_addr,
632 LLVMConstInt(ctx->i32, param * 4, 0), "");
633 }
634
635 /* The offchip buffer layout for TCS->TES is
636 *
637 * - attribute 0 of patch 0 vertex 0
638 * - attribute 0 of patch 0 vertex 1
639 * - attribute 0 of patch 0 vertex 2
640 * ...
641 * - attribute 0 of patch 1 vertex 0
642 * - attribute 0 of patch 1 vertex 1
643 * ...
644 * - attribute 1 of patch 0 vertex 0
645 * - attribute 1 of patch 0 vertex 1
646 * ...
647 * - per patch attribute 0 of patch 0
648 * - per patch attribute 0 of patch 1
649 * ...
650 *
651 * Note that every attribute has 4 components.
652 */
653 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
654 LLVMValueRef rel_patch_id,
655 LLVMValueRef vertex_index,
656 LLVMValueRef param_index)
657 {
658 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
659 LLVMValueRef param_stride, constant16;
660
661 vertices_per_patch = get_num_tcs_out_vertices(ctx);
662 num_patches = si_unpack_param(ctx, ctx->tcs_offchip_layout, 0, 6);
663 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
664 num_patches, "");
665
666 constant16 = LLVMConstInt(ctx->i32, 16, 0);
667 if (vertex_index) {
668 base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
669 vertices_per_patch, vertex_index);
670 param_stride = total_vertices;
671 } else {
672 base_addr = rel_patch_id;
673 param_stride = num_patches;
674 }
675
676 base_addr = ac_build_imad(&ctx->ac, param_index, param_stride, base_addr);
677 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
678
679 if (!vertex_index) {
680 LLVMValueRef patch_data_offset =
681 si_unpack_param(ctx, ctx->tcs_offchip_layout, 12, 20);
682
683 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
684 patch_data_offset, "");
685 }
686 return base_addr;
687 }
688
689 /* This is a generic helper that can be shared by the NIR and TGSI backends */
690 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
691 struct si_shader_context *ctx,
692 LLVMValueRef vertex_index,
693 LLVMValueRef param_index,
694 ubyte name, ubyte index)
695 {
696 unsigned param_index_base;
697
698 param_index_base = name == TGSI_SEMANTIC_PATCH ||
699 name == TGSI_SEMANTIC_TESSINNER ||
700 name == TGSI_SEMANTIC_TESSOUTER ?
701 si_shader_io_get_unique_index_patch(name, index) :
702 si_shader_io_get_unique_index(name, index, false);
703
704 if (param_index) {
705 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
706 LLVMConstInt(ctx->i32, param_index_base, 0),
707 "");
708 } else {
709 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
710 }
711
712 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
713 vertex_index, param_index);
714 }
715
716 static LLVMValueRef si_build_gather_64bit(struct si_shader_context *ctx,
717 LLVMTypeRef type,
718 LLVMValueRef val1,
719 LLVMValueRef val2)
720 {
721 LLVMValueRef values[2] = {
722 ac_to_integer(&ctx->ac, val1),
723 ac_to_integer(&ctx->ac, val2),
724 };
725 LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, 2);
726 return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
727 }
728
729 static LLVMValueRef buffer_load(struct si_shader_context *ctx,
730 LLVMTypeRef type, unsigned swizzle,
731 LLVMValueRef buffer, LLVMValueRef offset,
732 LLVMValueRef base, bool can_speculate)
733 {
734 LLVMValueRef value, value2;
735 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
736
737 if (swizzle == ~0) {
738 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
739 0, ac_glc, can_speculate, false);
740
741 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
742 }
743
744 if (!llvm_type_is_64bit(ctx, type)) {
745 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
746 0, ac_glc, can_speculate, false);
747
748 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
749 return LLVMBuildExtractElement(ctx->ac.builder, value,
750 LLVMConstInt(ctx->i32, swizzle, 0), "");
751 }
752
753 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
754 swizzle * 4, ac_glc, can_speculate, false);
755
756 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
757 swizzle * 4 + 4, ac_glc, can_speculate, false);
758
759 return si_build_gather_64bit(ctx, type, value, value2);
760 }
761
762 /**
763 * Load from LSHS LDS storage.
764 *
765 * \param type output value type
766 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
767 * \param dw_addr address in dwords
768 */
769 static LLVMValueRef lshs_lds_load(struct si_shader_context *ctx,
770 LLVMTypeRef type, unsigned swizzle,
771 LLVMValueRef dw_addr)
772 {
773 LLVMValueRef value;
774
775 if (swizzle == ~0) {
776 LLVMValueRef values[4];
777
778 for (unsigned chan = 0; chan < 4; chan++)
779 values[chan] = lshs_lds_load(ctx, type, chan, dw_addr);
780
781 return ac_build_gather_values(&ctx->ac, values, 4);
782 }
783
784 /* Split 64-bit loads. */
785 if (llvm_type_is_64bit(ctx, type)) {
786 LLVMValueRef lo, hi;
787
788 lo = lshs_lds_load(ctx, ctx->i32, swizzle, dw_addr);
789 hi = lshs_lds_load(ctx, ctx->i32, swizzle + 1, dw_addr);
790 return si_build_gather_64bit(ctx, type, lo, hi);
791 }
792
793 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
794 LLVMConstInt(ctx->i32, swizzle, 0), "");
795
796 value = ac_lds_load(&ctx->ac, dw_addr);
797
798 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
799 }
800
801 /**
802 * Store to LSHS LDS storage.
803 *
804 * \param swizzle offset (typically 0..3)
805 * \param dw_addr address in dwords
806 * \param value value to store
807 */
808 static void lshs_lds_store(struct si_shader_context *ctx,
809 unsigned dw_offset_imm, LLVMValueRef dw_addr,
810 LLVMValueRef value)
811 {
812 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
813 LLVMConstInt(ctx->i32, dw_offset_imm, 0), "");
814
815 ac_lds_store(&ctx->ac, dw_addr, value);
816 }
817
818 enum si_tess_ring {
819 TCS_FACTOR_RING,
820 TESS_OFFCHIP_RING_TCS,
821 TESS_OFFCHIP_RING_TES,
822 };
823
824 static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx,
825 enum si_tess_ring ring)
826 {
827 LLVMBuilderRef builder = ctx->ac.builder;
828 LLVMValueRef addr = ac_get_arg(&ctx->ac,
829 ring == TESS_OFFCHIP_RING_TES ?
830 ctx->tes_offchip_addr :
831 ctx->tcs_out_lds_layout);
832
833 /* TCS only receives high 13 bits of the address. */
834 if (ring == TESS_OFFCHIP_RING_TCS || ring == TCS_FACTOR_RING) {
835 addr = LLVMBuildAnd(builder, addr,
836 LLVMConstInt(ctx->i32, 0xfff80000, 0), "");
837 }
838
839 if (ring == TCS_FACTOR_RING) {
840 unsigned tf_offset = ctx->screen->tess_offchip_ring_size;
841 addr = LLVMBuildAdd(builder, addr,
842 LLVMConstInt(ctx->i32, tf_offset, 0), "");
843 }
844
845 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
846 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
847 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
848 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
849
850 if (ctx->screen->info.chip_class >= GFX10)
851 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
852 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
853 S_008F0C_RESOURCE_LEVEL(1);
854 else
855 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
856 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
857
858 LLVMValueRef desc[4];
859 desc[0] = addr;
860 desc[1] = LLVMConstInt(ctx->i32,
861 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
862 desc[2] = LLVMConstInt(ctx->i32, 0xffffffff, 0);
863 desc[3] = LLVMConstInt(ctx->i32, rsrc3, false);
864
865 return ac_build_gather_values(&ctx->ac, desc, 4);
866 }
867
868 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
869 LLVMTypeRef type,
870 LLVMValueRef vertex_index,
871 LLVMValueRef param_index,
872 unsigned const_index,
873 unsigned location,
874 unsigned driver_location,
875 unsigned component,
876 unsigned num_components,
877 bool is_patch,
878 bool is_compact,
879 bool load_input)
880 {
881 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
882 struct tgsi_shader_info *info = &ctx->shader->selector->info;
883 LLVMValueRef dw_addr, stride;
884 ubyte name, index;
885
886 driver_location = driver_location / 4;
887
888 if (load_input) {
889 name = info->input_semantic_name[driver_location];
890 index = info->input_semantic_index[driver_location];
891 } else {
892 name = info->output_semantic_name[driver_location];
893 index = info->output_semantic_index[driver_location];
894 }
895
896 assert((name == TGSI_SEMANTIC_PATCH ||
897 name == TGSI_SEMANTIC_TESSINNER ||
898 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
899
900 if (load_input) {
901 stride = get_tcs_in_vertex_dw_stride(ctx);
902 dw_addr = get_tcs_in_current_patch_offset(ctx);
903 } else {
904 if (is_patch) {
905 stride = NULL;
906 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
907 } else {
908 stride = get_tcs_out_vertex_dw_stride(ctx);
909 dw_addr = get_tcs_out_current_patch_offset(ctx);
910 }
911 }
912
913 if (!param_index) {
914 param_index = LLVMConstInt(ctx->i32, const_index, 0);
915 }
916
917 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
918 vertex_index, param_index,
919 name, index);
920
921 LLVMValueRef value[4];
922 for (unsigned i = 0; i < num_components; i++) {
923 unsigned offset = i;
924 if (llvm_type_is_64bit(ctx, type))
925 offset *= 2;
926
927 offset += component;
928 value[i + component] = lshs_lds_load(ctx, type, offset, dw_addr);
929 }
930
931 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
932 }
933
934 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
935 LLVMTypeRef type,
936 LLVMValueRef vertex_index,
937 LLVMValueRef param_index,
938 unsigned const_index,
939 unsigned location,
940 unsigned driver_location,
941 unsigned component,
942 unsigned num_components,
943 bool is_patch,
944 bool is_compact,
945 bool load_input)
946 {
947 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
948 struct tgsi_shader_info *info = &ctx->shader->selector->info;
949 LLVMValueRef base, addr;
950
951 driver_location = driver_location / 4;
952 ubyte name = info->input_semantic_name[driver_location];
953 ubyte index = info->input_semantic_index[driver_location];
954
955 assert((name == TGSI_SEMANTIC_PATCH ||
956 name == TGSI_SEMANTIC_TESSINNER ||
957 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
958
959 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
960
961 if (!param_index) {
962 param_index = LLVMConstInt(ctx->i32, const_index, 0);
963 }
964
965 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
966 param_index,
967 name, index);
968
969 /* TODO: This will generate rather ordinary llvm code, although it
970 * should be easy for the optimiser to fix up. In future we might want
971 * to refactor buffer_load(), but for now this maximises code sharing
972 * between the NIR and TGSI backends.
973 */
974 LLVMValueRef value[4];
975 for (unsigned i = 0; i < num_components; i++) {
976 unsigned offset = i;
977 if (llvm_type_is_64bit(ctx, type)) {
978 offset *= 2;
979 if (offset == 4) {
980 ubyte name = info->input_semantic_name[driver_location + 1];
981 ubyte index = info->input_semantic_index[driver_location + 1];
982 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
983 vertex_index,
984 param_index,
985 name, index);
986 }
987
988 offset = offset % 4;
989 }
990
991 offset += component;
992 value[i + component] = buffer_load(ctx, type, offset,
993 ctx->tess_offchip_ring, base, addr, true);
994 }
995
996 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
997 }
998
999 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
1000 const struct nir_variable *var,
1001 LLVMValueRef vertex_index,
1002 LLVMValueRef param_index,
1003 unsigned const_index,
1004 LLVMValueRef src,
1005 unsigned writemask)
1006 {
1007 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1008 struct tgsi_shader_info *info = &ctx->shader->selector->info;
1009 const unsigned component = var->data.location_frac;
1010 unsigned driver_location = var->data.driver_location;
1011 LLVMValueRef dw_addr, stride;
1012 LLVMValueRef buffer, base, addr;
1013 LLVMValueRef values[8];
1014 bool skip_lds_store;
1015 bool is_tess_factor = false, is_tess_inner = false;
1016
1017 driver_location = driver_location / 4;
1018 ubyte name = info->output_semantic_name[driver_location];
1019 ubyte index = info->output_semantic_index[driver_location];
1020
1021 bool is_const = !param_index;
1022 if (!param_index)
1023 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1024
1025 const bool is_patch = var->data.patch ||
1026 var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
1027 var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER;
1028
1029 assert((name == TGSI_SEMANTIC_PATCH ||
1030 name == TGSI_SEMANTIC_TESSINNER ||
1031 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
1032
1033 if (!is_patch) {
1034 stride = get_tcs_out_vertex_dw_stride(ctx);
1035 dw_addr = get_tcs_out_current_patch_offset(ctx);
1036 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1037 vertex_index, param_index,
1038 name, index);
1039
1040 skip_lds_store = !info->reads_pervertex_outputs;
1041 } else {
1042 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1043 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1044 vertex_index, param_index,
1045 name, index);
1046
1047 skip_lds_store = !info->reads_perpatch_outputs;
1048
1049 if (is_const && const_index == 0) {
1050 int name = info->output_semantic_name[driver_location];
1051
1052 /* Always write tess factors into LDS for the TCS epilog. */
1053 if (name == TGSI_SEMANTIC_TESSINNER ||
1054 name == TGSI_SEMANTIC_TESSOUTER) {
1055 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1056 skip_lds_store = !info->reads_tessfactor_outputs &&
1057 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs;
1058 is_tess_factor = true;
1059 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1060 }
1061 }
1062 }
1063
1064 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1065
1066 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1067
1068 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1069 param_index, name, index);
1070
1071 for (unsigned chan = component; chan < 8; chan++) {
1072 if (!(writemask & (1 << chan)))
1073 continue;
1074 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1075
1076 unsigned buffer_store_offset = chan % 4;
1077 if (chan == 4) {
1078 ubyte name = info->output_semantic_name[driver_location + 1];
1079 ubyte index = info->output_semantic_index[driver_location + 1];
1080 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
1081 vertex_index,
1082 param_index,
1083 name, index);
1084 }
1085
1086 /* Skip LDS stores if there is no LDS read of this output. */
1087 if (!skip_lds_store)
1088 lshs_lds_store(ctx, chan, dw_addr, value);
1089
1090 value = ac_to_integer(&ctx->ac, value);
1091 values[chan] = value;
1092
1093 if (writemask != 0xF && !is_tess_factor) {
1094 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1095 addr, base,
1096 4 * buffer_store_offset,
1097 ac_glc);
1098 }
1099
1100 /* Write tess factors into VGPRs for the epilog. */
1101 if (is_tess_factor &&
1102 ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
1103 if (!is_tess_inner) {
1104 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1105 ctx->invoc0_tess_factors[chan]);
1106 } else if (chan < 2) {
1107 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1108 ctx->invoc0_tess_factors[4 + chan]);
1109 }
1110 }
1111 }
1112
1113 if (writemask == 0xF && !is_tess_factor) {
1114 LLVMValueRef value = ac_build_gather_values(&ctx->ac,
1115 values, 4);
1116 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1117 base, 0, ac_glc);
1118 }
1119 }
1120
1121 static LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1122 unsigned input_index,
1123 unsigned vtx_offset_param,
1124 LLVMTypeRef type,
1125 unsigned swizzle)
1126 {
1127 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1128 struct si_shader *shader = ctx->shader;
1129 LLVMValueRef vtx_offset, soffset;
1130 struct tgsi_shader_info *info = &shader->selector->info;
1131 unsigned semantic_name = info->input_semantic_name[input_index];
1132 unsigned semantic_index = info->input_semantic_index[input_index];
1133 unsigned param;
1134 LLVMValueRef value;
1135
1136 param = si_shader_io_get_unique_index(semantic_name, semantic_index, false);
1137
1138 /* GFX9 has the ESGS ring in LDS. */
1139 if (ctx->screen->info.chip_class >= GFX9) {
1140 unsigned index = vtx_offset_param;
1141
1142 switch (index / 2) {
1143 case 0:
1144 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx01_offset,
1145 index % 2 ? 16 : 0, 16);
1146 break;
1147 case 1:
1148 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx23_offset,
1149 index % 2 ? 16 : 0, 16);
1150 break;
1151 case 2:
1152 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx45_offset,
1153 index % 2 ? 16 : 0, 16);
1154 break;
1155 default:
1156 assert(0);
1157 return NULL;
1158 }
1159
1160 unsigned offset = param * 4 + swizzle;
1161 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1162 LLVMConstInt(ctx->i32, offset, false), "");
1163
1164 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->esgs_ring, vtx_offset);
1165 LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, ptr, "");
1166 if (llvm_type_is_64bit(ctx, type)) {
1167 ptr = LLVMBuildGEP(ctx->ac.builder, ptr,
1168 &ctx->ac.i32_1, 1, "");
1169 LLVMValueRef values[2] = {
1170 value,
1171 LLVMBuildLoad(ctx->ac.builder, ptr, "")
1172 };
1173 value = ac_build_gather_values(&ctx->ac, values, 2);
1174 }
1175 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1176 }
1177
1178 /* GFX6: input load from the ESGS ring in memory. */
1179 if (swizzle == ~0) {
1180 LLVMValueRef values[4];
1181 unsigned chan;
1182 for (chan = 0; chan < 4; chan++) {
1183 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1184 type, chan);
1185 }
1186 return ac_build_gather_values(&ctx->ac, values, 4);
1187 }
1188
1189 /* Get the vertex offset parameter on GFX6. */
1190 LLVMValueRef gs_vtx_offset = ac_get_arg(&ctx->ac,
1191 ctx->gs_vtx_offset[vtx_offset_param]);
1192
1193 vtx_offset = LLVMBuildMul(ctx->ac.builder, gs_vtx_offset,
1194 LLVMConstInt(ctx->i32, 4, 0), "");
1195
1196 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1197
1198 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1199 vtx_offset, soffset, 0, ac_glc, true, false);
1200 if (llvm_type_is_64bit(ctx, type)) {
1201 LLVMValueRef value2;
1202 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1203
1204 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1205 ctx->i32_0, vtx_offset, soffset,
1206 0, ac_glc, true, false);
1207 return si_build_gather_64bit(ctx, type, value, value2);
1208 }
1209 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1210 }
1211
1212 static LLVMValueRef si_nir_load_input_gs(struct ac_shader_abi *abi,
1213 unsigned location,
1214 unsigned driver_location,
1215 unsigned component,
1216 unsigned num_components,
1217 unsigned vertex_index,
1218 unsigned const_index,
1219 LLVMTypeRef type)
1220 {
1221 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1222
1223 LLVMValueRef value[4];
1224 for (unsigned i = 0; i < num_components; i++) {
1225 unsigned offset = i;
1226 if (llvm_type_is_64bit(ctx, type))
1227 offset *= 2;
1228
1229 offset += component;
1230 value[i + component] = si_llvm_load_input_gs(&ctx->abi, driver_location / 4 + const_index,
1231 vertex_index, type, offset);
1232 }
1233
1234 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1235 }
1236
1237 static LLVMValueRef si_build_fs_interp(struct si_shader_context *ctx,
1238 unsigned attr_index, unsigned chan,
1239 LLVMValueRef prim_mask,
1240 LLVMValueRef i, LLVMValueRef j)
1241 {
1242 if (i || j) {
1243 return ac_build_fs_interp(&ctx->ac,
1244 LLVMConstInt(ctx->i32, chan, 0),
1245 LLVMConstInt(ctx->i32, attr_index, 0),
1246 prim_mask, i, j);
1247 }
1248 return ac_build_fs_interp_mov(&ctx->ac,
1249 LLVMConstInt(ctx->i32, 2, 0), /* P0 */
1250 LLVMConstInt(ctx->i32, chan, 0),
1251 LLVMConstInt(ctx->i32, attr_index, 0),
1252 prim_mask);
1253 }
1254
1255 /**
1256 * Interpolate a fragment shader input.
1257 *
1258 * @param ctx context
1259 * @param input_index index of the input in hardware
1260 * @param semantic_name TGSI_SEMANTIC_*
1261 * @param semantic_index semantic index
1262 * @param num_interp_inputs number of all interpolated inputs (= BCOLOR offset)
1263 * @param colors_read_mask color components read (4 bits for each color, 8 bits in total)
1264 * @param interp_param interpolation weights (i,j)
1265 * @param prim_mask SI_PARAM_PRIM_MASK
1266 * @param face SI_PARAM_FRONT_FACE
1267 * @param result the return value (4 components)
1268 */
1269 static void interp_fs_color(struct si_shader_context *ctx,
1270 unsigned input_index,
1271 unsigned semantic_index,
1272 unsigned num_interp_inputs,
1273 unsigned colors_read_mask,
1274 LLVMValueRef interp_param,
1275 LLVMValueRef prim_mask,
1276 LLVMValueRef face,
1277 LLVMValueRef result[4])
1278 {
1279 LLVMValueRef i = NULL, j = NULL;
1280 unsigned chan;
1281
1282 /* fs.constant returns the param from the middle vertex, so it's not
1283 * really useful for flat shading. It's meant to be used for custom
1284 * interpolation (but the intrinsic can't fetch from the other two
1285 * vertices).
1286 *
1287 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1288 * to do the right thing. The only reason we use fs.constant is that
1289 * fs.interp cannot be used on integers, because they can be equal
1290 * to NaN.
1291 *
1292 * When interp is false we will use fs.constant or for newer llvm,
1293 * amdgcn.interp.mov.
1294 */
1295 bool interp = interp_param != NULL;
1296
1297 if (interp) {
1298 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1299 LLVMVectorType(ctx->f32, 2), "");
1300
1301 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1302 ctx->i32_0, "");
1303 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1304 ctx->i32_1, "");
1305 }
1306
1307 if (ctx->shader->key.part.ps.prolog.color_two_side) {
1308 LLVMValueRef is_face_positive;
1309
1310 /* If BCOLOR0 is used, BCOLOR1 is at offset "num_inputs + 1",
1311 * otherwise it's at offset "num_inputs".
1312 */
1313 unsigned back_attr_offset = num_interp_inputs;
1314 if (semantic_index == 1 && colors_read_mask & 0xf)
1315 back_attr_offset += 1;
1316
1317 is_face_positive = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
1318 face, ctx->i32_0, "");
1319
1320 for (chan = 0; chan < 4; chan++) {
1321 LLVMValueRef front, back;
1322
1323 front = si_build_fs_interp(ctx,
1324 input_index, chan,
1325 prim_mask, i, j);
1326 back = si_build_fs_interp(ctx,
1327 back_attr_offset, chan,
1328 prim_mask, i, j);
1329
1330 result[chan] = LLVMBuildSelect(ctx->ac.builder,
1331 is_face_positive,
1332 front,
1333 back,
1334 "");
1335 }
1336 } else {
1337 for (chan = 0; chan < 4; chan++) {
1338 result[chan] = si_build_fs_interp(ctx,
1339 input_index, chan,
1340 prim_mask, i, j);
1341 }
1342 }
1343 }
1344
1345 LLVMValueRef si_get_sample_id(struct si_shader_context *ctx)
1346 {
1347 return si_unpack_param(ctx, ctx->args.ancillary, 8, 4);
1348 }
1349
1350 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
1351 {
1352 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1353
1354 /* For non-indexed draws, the base vertex set by the driver
1355 * (for direct draws) or the CP (for indirect draws) is the
1356 * first vertex ID, but GLSL expects 0 to be returned.
1357 */
1358 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
1359 ctx->vs_state_bits);
1360 LLVMValueRef indexed;
1361
1362 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
1363 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
1364
1365 return LLVMBuildSelect(ctx->ac.builder, indexed,
1366 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
1367 ctx->i32_0, "");
1368 }
1369
1370 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
1371 {
1372 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1373
1374 LLVMValueRef values[3];
1375 LLVMValueRef result;
1376 unsigned i;
1377 unsigned *properties = ctx->shader->selector->info.properties;
1378
1379 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1380 unsigned sizes[3] = {
1381 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1382 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1383 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1384 };
1385
1386 for (i = 0; i < 3; ++i)
1387 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1388
1389 result = ac_build_gather_values(&ctx->ac, values, 3);
1390 } else {
1391 result = ac_get_arg(&ctx->ac, ctx->block_size);
1392 }
1393
1394 return result;
1395 }
1396
1397 /**
1398 * Load a dword from a constant buffer.
1399 */
1400 static LLVMValueRef buffer_load_const(struct si_shader_context *ctx,
1401 LLVMValueRef resource,
1402 LLVMValueRef offset)
1403 {
1404 return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL,
1405 0, 0, true, true);
1406 }
1407
1408 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi, LLVMValueRef sample_id)
1409 {
1410 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1411 LLVMValueRef desc = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1412 LLVMValueRef buf_index = LLVMConstInt(ctx->i32, SI_PS_CONST_SAMPLE_POSITIONS, 0);
1413 LLVMValueRef resource = ac_build_load_to_sgpr(&ctx->ac, desc, buf_index);
1414
1415 /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
1416 LLVMValueRef offset0 = LLVMBuildMul(ctx->ac.builder, sample_id, LLVMConstInt(ctx->i32, 8, 0), "");
1417 LLVMValueRef offset1 = LLVMBuildAdd(ctx->ac.builder, offset0, LLVMConstInt(ctx->i32, 4, 0), "");
1418
1419 LLVMValueRef pos[4] = {
1420 buffer_load_const(ctx, resource, offset0),
1421 buffer_load_const(ctx, resource, offset1),
1422 LLVMConstReal(ctx->f32, 0),
1423 LLVMConstReal(ctx->f32, 0)
1424 };
1425
1426 return ac_build_gather_values(&ctx->ac, pos, 4);
1427 }
1428
1429 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
1430 {
1431 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1432 return ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args.sample_coverage));
1433 }
1434
1435 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi)
1436 {
1437 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1438 LLVMValueRef coord[4] = {
1439 ac_get_arg(&ctx->ac, ctx->tes_u),
1440 ac_get_arg(&ctx->ac, ctx->tes_v),
1441 ctx->ac.f32_0,
1442 ctx->ac.f32_0
1443 };
1444
1445 /* For triangles, the vector should be (u, v, 1-u-v). */
1446 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1447 PIPE_PRIM_TRIANGLES) {
1448 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1449 LLVMBuildFAdd(ctx->ac.builder,
1450 coord[0], coord[1], ""), "");
1451 }
1452 return ac_build_gather_values(&ctx->ac, coord, 4);
1453 }
1454
1455 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1456 unsigned semantic_name)
1457 {
1458 LLVMValueRef base, addr;
1459
1460 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1461
1462 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1463 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1464 LLVMConstInt(ctx->i32, param, 0));
1465
1466 return buffer_load(ctx, ctx->f32,
1467 ~0, ctx->tess_offchip_ring, base, addr, true);
1468
1469 }
1470
1471 static LLVMValueRef load_tess_level_default(struct si_shader_context *ctx,
1472 unsigned semantic_name)
1473 {
1474 LLVMValueRef buf, slot, val[4];
1475 int i, offset;
1476
1477 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1478 buf = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1479 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
1480 offset = semantic_name == TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL ? 4 : 0;
1481
1482 for (i = 0; i < 4; i++)
1483 val[i] = buffer_load_const(ctx, buf,
1484 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1485 return ac_build_gather_values(&ctx->ac, val, 4);
1486 }
1487
1488 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1489 unsigned varying_id,
1490 bool load_default_state)
1491 {
1492 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1493 unsigned semantic_name;
1494
1495 if (load_default_state) {
1496 switch (varying_id) {
1497 case VARYING_SLOT_TESS_LEVEL_INNER:
1498 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL;
1499 break;
1500 case VARYING_SLOT_TESS_LEVEL_OUTER:
1501 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL;
1502 break;
1503 default:
1504 unreachable("unknown tess level");
1505 }
1506 return load_tess_level_default(ctx, semantic_name);
1507 }
1508
1509 switch (varying_id) {
1510 case VARYING_SLOT_TESS_LEVEL_INNER:
1511 semantic_name = TGSI_SEMANTIC_TESSINNER;
1512 break;
1513 case VARYING_SLOT_TESS_LEVEL_OUTER:
1514 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1515 break;
1516 default:
1517 unreachable("unknown tess level");
1518 }
1519
1520 return load_tess_level(ctx, semantic_name);
1521
1522 }
1523
1524 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
1525 {
1526 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1527 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1528 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 13, 6);
1529 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1530 return get_num_tcs_out_vertices(ctx);
1531 else
1532 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1533 }
1534
1535 void si_declare_compute_memory(struct si_shader_context *ctx)
1536 {
1537 struct si_shader_selector *sel = ctx->shader->selector;
1538 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
1539
1540 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
1541 LLVMValueRef var;
1542
1543 assert(!ctx->ac.lds);
1544
1545 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1546 LLVMArrayType(ctx->i8, lds_size),
1547 "compute_lds",
1548 AC_ADDR_SPACE_LDS);
1549 LLVMSetAlignment(var, 64 * 1024);
1550
1551 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
1552 }
1553
1554 static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *ctx)
1555 {
1556 LLVMValueRef ptr =
1557 ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1558 struct si_shader_selector *sel = ctx->shader->selector;
1559
1560 /* Do the bounds checking with a descriptor, because
1561 * doing computation and manual bounds checking of 64-bit
1562 * addresses generates horrible VALU code with very high
1563 * VGPR usage and very low SIMD occupancy.
1564 */
1565 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.intptr, "");
1566
1567 LLVMValueRef desc0, desc1;
1568 desc0 = ptr;
1569 desc1 = LLVMConstInt(ctx->i32,
1570 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
1571
1572 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1573 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1574 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1575 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1576
1577 if (ctx->screen->info.chip_class >= GFX10)
1578 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1579 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1580 S_008F0C_RESOURCE_LEVEL(1);
1581 else
1582 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1583 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1584
1585 LLVMValueRef desc_elems[] = {
1586 desc0,
1587 desc1,
1588 LLVMConstInt(ctx->i32, (sel->info.const_file_max[0] + 1) * 16, 0),
1589 LLVMConstInt(ctx->i32, rsrc3, false)
1590 };
1591
1592 return ac_build_gather_values(&ctx->ac, desc_elems, 4);
1593 }
1594
1595 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
1596 {
1597 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1598 struct si_shader_selector *sel = ctx->shader->selector;
1599
1600 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1601
1602 if (sel->info.const_buffers_declared == 1 &&
1603 sel->info.shader_buffers_declared == 0) {
1604 return load_const_buffer_desc_fast_path(ctx);
1605 }
1606
1607 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
1608 index = LLVMBuildAdd(ctx->ac.builder, index,
1609 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1610
1611 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
1612 }
1613
1614 static LLVMValueRef
1615 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
1616 {
1617 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1618 LLVMValueRef rsrc_ptr = ac_get_arg(&ctx->ac,
1619 ctx->const_and_shader_buffers);
1620
1621 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
1622 index = LLVMBuildSub(ctx->ac.builder,
1623 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
1624 index, "");
1625
1626 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
1627 }
1628
1629 /* Initialize arguments for the shader export intrinsic */
1630 static void si_llvm_init_export_args(struct si_shader_context *ctx,
1631 LLVMValueRef *values,
1632 unsigned target,
1633 struct ac_export_args *args)
1634 {
1635 LLVMValueRef f32undef = LLVMGetUndef(ctx->ac.f32);
1636 unsigned spi_shader_col_format = V_028714_SPI_SHADER_32_ABGR;
1637 unsigned chan;
1638 bool is_int8, is_int10;
1639
1640 /* Default is 0xf. Adjusted below depending on the format. */
1641 args->enabled_channels = 0xf; /* writemask */
1642
1643 /* Specify whether the EXEC mask represents the valid mask */
1644 args->valid_mask = 0;
1645
1646 /* Specify whether this is the last export */
1647 args->done = 0;
1648
1649 /* Specify the target we are exporting */
1650 args->target = target;
1651
1652 if (ctx->type == PIPE_SHADER_FRAGMENT) {
1653 const struct si_shader_key *key = &ctx->shader->key;
1654 unsigned col_formats = key->part.ps.epilog.spi_shader_col_format;
1655 int cbuf = target - V_008DFC_SQ_EXP_MRT;
1656
1657 assert(cbuf >= 0 && cbuf < 8);
1658 spi_shader_col_format = (col_formats >> (cbuf * 4)) & 0xf;
1659 is_int8 = (key->part.ps.epilog.color_is_int8 >> cbuf) & 0x1;
1660 is_int10 = (key->part.ps.epilog.color_is_int10 >> cbuf) & 0x1;
1661 }
1662
1663 args->compr = false;
1664 args->out[0] = f32undef;
1665 args->out[1] = f32undef;
1666 args->out[2] = f32undef;
1667 args->out[3] = f32undef;
1668
1669 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
1670 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
1671 unsigned bits, bool hi) = NULL;
1672
1673 switch (spi_shader_col_format) {
1674 case V_028714_SPI_SHADER_ZERO:
1675 args->enabled_channels = 0; /* writemask */
1676 args->target = V_008DFC_SQ_EXP_NULL;
1677 break;
1678
1679 case V_028714_SPI_SHADER_32_R:
1680 args->enabled_channels = 1; /* writemask */
1681 args->out[0] = values[0];
1682 break;
1683
1684 case V_028714_SPI_SHADER_32_GR:
1685 args->enabled_channels = 0x3; /* writemask */
1686 args->out[0] = values[0];
1687 args->out[1] = values[1];
1688 break;
1689
1690 case V_028714_SPI_SHADER_32_AR:
1691 if (ctx->screen->info.chip_class >= GFX10) {
1692 args->enabled_channels = 0x3; /* writemask */
1693 args->out[0] = values[0];
1694 args->out[1] = values[3];
1695 } else {
1696 args->enabled_channels = 0x9; /* writemask */
1697 args->out[0] = values[0];
1698 args->out[3] = values[3];
1699 }
1700 break;
1701
1702 case V_028714_SPI_SHADER_FP16_ABGR:
1703 packf = ac_build_cvt_pkrtz_f16;
1704 break;
1705
1706 case V_028714_SPI_SHADER_UNORM16_ABGR:
1707 packf = ac_build_cvt_pknorm_u16;
1708 break;
1709
1710 case V_028714_SPI_SHADER_SNORM16_ABGR:
1711 packf = ac_build_cvt_pknorm_i16;
1712 break;
1713
1714 case V_028714_SPI_SHADER_UINT16_ABGR:
1715 packi = ac_build_cvt_pk_u16;
1716 break;
1717
1718 case V_028714_SPI_SHADER_SINT16_ABGR:
1719 packi = ac_build_cvt_pk_i16;
1720 break;
1721
1722 case V_028714_SPI_SHADER_32_ABGR:
1723 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
1724 break;
1725 }
1726
1727 /* Pack f16 or norm_i16/u16. */
1728 if (packf) {
1729 for (chan = 0; chan < 2; chan++) {
1730 LLVMValueRef pack_args[2] = {
1731 values[2 * chan],
1732 values[2 * chan + 1]
1733 };
1734 LLVMValueRef packed;
1735
1736 packed = packf(&ctx->ac, pack_args);
1737 args->out[chan] = ac_to_float(&ctx->ac, packed);
1738 }
1739 args->compr = 1; /* COMPR flag */
1740 }
1741 /* Pack i16/u16. */
1742 if (packi) {
1743 for (chan = 0; chan < 2; chan++) {
1744 LLVMValueRef pack_args[2] = {
1745 ac_to_integer(&ctx->ac, values[2 * chan]),
1746 ac_to_integer(&ctx->ac, values[2 * chan + 1])
1747 };
1748 LLVMValueRef packed;
1749
1750 packed = packi(&ctx->ac, pack_args,
1751 is_int8 ? 8 : is_int10 ? 10 : 16,
1752 chan == 1);
1753 args->out[chan] = ac_to_float(&ctx->ac, packed);
1754 }
1755 args->compr = 1; /* COMPR flag */
1756 }
1757 }
1758
1759 static void si_alpha_test(struct si_shader_context *ctx, LLVMValueRef alpha)
1760 {
1761 if (ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_NEVER) {
1762 static LLVMRealPredicate cond_map[PIPE_FUNC_ALWAYS + 1] = {
1763 [PIPE_FUNC_LESS] = LLVMRealOLT,
1764 [PIPE_FUNC_EQUAL] = LLVMRealOEQ,
1765 [PIPE_FUNC_LEQUAL] = LLVMRealOLE,
1766 [PIPE_FUNC_GREATER] = LLVMRealOGT,
1767 [PIPE_FUNC_NOTEQUAL] = LLVMRealONE,
1768 [PIPE_FUNC_GEQUAL] = LLVMRealOGE,
1769 };
1770 LLVMRealPredicate cond = cond_map[ctx->shader->key.part.ps.epilog.alpha_func];
1771 assert(cond);
1772
1773 LLVMValueRef alpha_ref = LLVMGetParam(ctx->main_fn,
1774 SI_PARAM_ALPHA_REF);
1775 LLVMValueRef alpha_pass =
1776 LLVMBuildFCmp(ctx->ac.builder, cond, alpha, alpha_ref, "");
1777 ac_build_kill_if_false(&ctx->ac, alpha_pass);
1778 } else {
1779 ac_build_kill_if_false(&ctx->ac, ctx->i1false);
1780 }
1781 }
1782
1783 static LLVMValueRef si_scale_alpha_by_sample_mask(struct si_shader_context *ctx,
1784 LLVMValueRef alpha,
1785 unsigned samplemask_param)
1786 {
1787 LLVMValueRef coverage;
1788
1789 /* alpha = alpha * popcount(coverage) / SI_NUM_SMOOTH_AA_SAMPLES */
1790 coverage = LLVMGetParam(ctx->main_fn,
1791 samplemask_param);
1792 coverage = ac_to_integer(&ctx->ac, coverage);
1793
1794 coverage = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i32",
1795 ctx->i32,
1796 &coverage, 1, AC_FUNC_ATTR_READNONE);
1797
1798 coverage = LLVMBuildUIToFP(ctx->ac.builder, coverage,
1799 ctx->f32, "");
1800
1801 coverage = LLVMBuildFMul(ctx->ac.builder, coverage,
1802 LLVMConstReal(ctx->f32,
1803 1.0 / SI_NUM_SMOOTH_AA_SAMPLES), "");
1804
1805 return LLVMBuildFMul(ctx->ac.builder, alpha, coverage, "");
1806 }
1807
1808 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
1809 struct ac_export_args *pos, LLVMValueRef *out_elts)
1810 {
1811 unsigned reg_index;
1812 unsigned chan;
1813 unsigned const_chan;
1814 LLVMValueRef base_elt;
1815 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1816 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
1817 SI_VS_CONST_CLIP_PLANES, 0);
1818 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
1819
1820 for (reg_index = 0; reg_index < 2; reg_index ++) {
1821 struct ac_export_args *args = &pos[2 + reg_index];
1822
1823 args->out[0] =
1824 args->out[1] =
1825 args->out[2] =
1826 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
1827
1828 /* Compute dot products of position and user clip plane vectors */
1829 for (chan = 0; chan < 4; chan++) {
1830 for (const_chan = 0; const_chan < 4; const_chan++) {
1831 LLVMValueRef addr =
1832 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
1833 const_chan) * 4, 0);
1834 base_elt = buffer_load_const(ctx, const_resource,
1835 addr);
1836 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
1837 out_elts[const_chan], args->out[chan]);
1838 }
1839 }
1840
1841 args->enabled_channels = 0xf;
1842 args->valid_mask = 0;
1843 args->done = 0;
1844 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
1845 args->compr = 0;
1846 }
1847 }
1848
1849 static void si_dump_streamout(struct pipe_stream_output_info *so)
1850 {
1851 unsigned i;
1852
1853 if (so->num_outputs)
1854 fprintf(stderr, "STREAMOUT\n");
1855
1856 for (i = 0; i < so->num_outputs; i++) {
1857 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
1858 so->output[i].start_component;
1859 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
1860 i, so->output[i].output_buffer,
1861 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
1862 so->output[i].register_index,
1863 mask & 1 ? "x" : "",
1864 mask & 2 ? "y" : "",
1865 mask & 4 ? "z" : "",
1866 mask & 8 ? "w" : "");
1867 }
1868 }
1869
1870 void si_emit_streamout_output(struct si_shader_context *ctx,
1871 LLVMValueRef const *so_buffers,
1872 LLVMValueRef const *so_write_offsets,
1873 struct pipe_stream_output *stream_out,
1874 struct si_shader_output_values *shader_out)
1875 {
1876 unsigned buf_idx = stream_out->output_buffer;
1877 unsigned start = stream_out->start_component;
1878 unsigned num_comps = stream_out->num_components;
1879 LLVMValueRef out[4];
1880
1881 assert(num_comps && num_comps <= 4);
1882 if (!num_comps || num_comps > 4)
1883 return;
1884
1885 /* Load the output as int. */
1886 for (int j = 0; j < num_comps; j++) {
1887 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
1888
1889 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
1890 }
1891
1892 /* Pack the output. */
1893 LLVMValueRef vdata = NULL;
1894
1895 switch (num_comps) {
1896 case 1: /* as i32 */
1897 vdata = out[0];
1898 break;
1899 case 2: /* as v2i32 */
1900 case 3: /* as v3i32 */
1901 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
1902 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
1903 break;
1904 }
1905 /* as v4i32 (aligned to 4) */
1906 out[3] = LLVMGetUndef(ctx->i32);
1907 /* fall through */
1908 case 4: /* as v4i32 */
1909 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
1910 break;
1911 }
1912
1913 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
1914 vdata, num_comps,
1915 so_write_offsets[buf_idx],
1916 ctx->i32_0,
1917 stream_out->dst_offset * 4, ac_glc | ac_slc);
1918 }
1919
1920 /**
1921 * Write streamout data to buffers for vertex stream @p stream (different
1922 * vertex streams can occur for GS copy shaders).
1923 */
1924 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
1925 struct si_shader_output_values *outputs,
1926 unsigned noutput, unsigned stream)
1927 {
1928 struct si_shader_selector *sel = ctx->shader->selector;
1929 struct pipe_stream_output_info *so = &sel->so;
1930 LLVMBuilderRef builder = ctx->ac.builder;
1931 int i;
1932
1933 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1934 LLVMValueRef so_vtx_count =
1935 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
1936
1937 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
1938
1939 /* can_emit = tid < so_vtx_count; */
1940 LLVMValueRef can_emit =
1941 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
1942
1943 /* Emit the streamout code conditionally. This actually avoids
1944 * out-of-bounds buffer access. The hw tells us via the SGPR
1945 * (so_vtx_count) which threads are allowed to emit streamout data. */
1946 ac_build_ifcc(&ctx->ac, can_emit, 6501);
1947 {
1948 /* The buffer offset is computed as follows:
1949 * ByteOffset = streamout_offset[buffer_id]*4 +
1950 * (streamout_write_index + thread_id)*stride[buffer_id] +
1951 * attrib_offset
1952 */
1953
1954 LLVMValueRef so_write_index =
1955 ac_get_arg(&ctx->ac,
1956 ctx->streamout_write_index);
1957
1958 /* Compute (streamout_write_index + thread_id). */
1959 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
1960
1961 /* Load the descriptor and compute the write offset for each
1962 * enabled buffer. */
1963 LLVMValueRef so_write_offset[4] = {};
1964 LLVMValueRef so_buffers[4];
1965 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
1966 ctx->rw_buffers);
1967
1968 for (i = 0; i < 4; i++) {
1969 if (!so->stride[i])
1970 continue;
1971
1972 LLVMValueRef offset = LLVMConstInt(ctx->i32,
1973 SI_VS_STREAMOUT_BUF0 + i, 0);
1974
1975 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
1976
1977 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
1978 ctx->streamout_offset[i]);
1979 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
1980
1981 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
1982 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
1983 so_offset);
1984 }
1985
1986 /* Write streamout data. */
1987 for (i = 0; i < so->num_outputs; i++) {
1988 unsigned reg = so->output[i].register_index;
1989
1990 if (reg >= noutput)
1991 continue;
1992
1993 if (stream != so->output[i].stream)
1994 continue;
1995
1996 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
1997 &so->output[i], &outputs[reg]);
1998 }
1999 }
2000 ac_build_endif(&ctx->ac, 6501);
2001 }
2002
2003 static void si_export_param(struct si_shader_context *ctx, unsigned index,
2004 LLVMValueRef *values)
2005 {
2006 struct ac_export_args args;
2007
2008 si_llvm_init_export_args(ctx, values,
2009 V_008DFC_SQ_EXP_PARAM + index, &args);
2010 ac_build_export(&ctx->ac, &args);
2011 }
2012
2013 static void si_build_param_exports(struct si_shader_context *ctx,
2014 struct si_shader_output_values *outputs,
2015 unsigned noutput)
2016 {
2017 struct si_shader *shader = ctx->shader;
2018 unsigned param_count = 0;
2019
2020 for (unsigned i = 0; i < noutput; i++) {
2021 unsigned semantic_name = outputs[i].semantic_name;
2022 unsigned semantic_index = outputs[i].semantic_index;
2023
2024 if (outputs[i].vertex_stream[0] != 0 &&
2025 outputs[i].vertex_stream[1] != 0 &&
2026 outputs[i].vertex_stream[2] != 0 &&
2027 outputs[i].vertex_stream[3] != 0)
2028 continue;
2029
2030 switch (semantic_name) {
2031 case TGSI_SEMANTIC_LAYER:
2032 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2033 case TGSI_SEMANTIC_CLIPDIST:
2034 case TGSI_SEMANTIC_COLOR:
2035 case TGSI_SEMANTIC_BCOLOR:
2036 case TGSI_SEMANTIC_PRIMID:
2037 case TGSI_SEMANTIC_FOG:
2038 case TGSI_SEMANTIC_TEXCOORD:
2039 case TGSI_SEMANTIC_GENERIC:
2040 break;
2041 default:
2042 continue;
2043 }
2044
2045 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
2046 semantic_index < SI_MAX_IO_GENERIC) &&
2047 shader->key.opt.kill_outputs &
2048 (1ull << si_shader_io_get_unique_index(semantic_name,
2049 semantic_index, true)))
2050 continue;
2051
2052 si_export_param(ctx, param_count, outputs[i].values);
2053
2054 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
2055 shader->info.vs_output_param_offset[i] = param_count++;
2056 }
2057
2058 shader->info.nr_param_exports = param_count;
2059 }
2060
2061 /**
2062 * Vertex color clamping.
2063 *
2064 * This uses a state constant loaded in a user data SGPR and
2065 * an IF statement is added that clamps all colors if the constant
2066 * is true.
2067 */
2068 static void si_vertex_color_clamping(struct si_shader_context *ctx,
2069 struct si_shader_output_values *outputs,
2070 unsigned noutput)
2071 {
2072 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
2073 bool has_colors = false;
2074
2075 /* Store original colors to alloca variables. */
2076 for (unsigned i = 0; i < noutput; i++) {
2077 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
2078 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
2079 continue;
2080
2081 for (unsigned j = 0; j < 4; j++) {
2082 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->f32, "");
2083 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
2084 }
2085 has_colors = true;
2086 }
2087
2088 if (!has_colors)
2089 return;
2090
2091 /* The state is in the first bit of the user SGPR. */
2092 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
2093 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
2094
2095 ac_build_ifcc(&ctx->ac, cond, 6502);
2096
2097 /* Store clamped colors to alloca variables within the conditional block. */
2098 for (unsigned i = 0; i < noutput; i++) {
2099 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
2100 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
2101 continue;
2102
2103 for (unsigned j = 0; j < 4; j++) {
2104 LLVMBuildStore(ctx->ac.builder,
2105 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
2106 addr[i][j]);
2107 }
2108 }
2109 ac_build_endif(&ctx->ac, 6502);
2110
2111 /* Load clamped colors */
2112 for (unsigned i = 0; i < noutput; i++) {
2113 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
2114 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
2115 continue;
2116
2117 for (unsigned j = 0; j < 4; j++) {
2118 outputs[i].values[j] =
2119 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
2120 }
2121 }
2122 }
2123
2124 /* Generate export instructions for hardware VS shader stage or NGG GS stage
2125 * (position and parameter data only).
2126 */
2127 void si_llvm_export_vs(struct si_shader_context *ctx,
2128 struct si_shader_output_values *outputs,
2129 unsigned noutput)
2130 {
2131 struct si_shader *shader = ctx->shader;
2132 struct ac_export_args pos_args[4] = {};
2133 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2134 unsigned pos_idx;
2135 int i;
2136
2137 si_vertex_color_clamping(ctx, outputs, noutput);
2138
2139 /* Build position exports. */
2140 for (i = 0; i < noutput; i++) {
2141 switch (outputs[i].semantic_name) {
2142 case TGSI_SEMANTIC_POSITION:
2143 si_llvm_init_export_args(ctx, outputs[i].values,
2144 V_008DFC_SQ_EXP_POS, &pos_args[0]);
2145 break;
2146 case TGSI_SEMANTIC_PSIZE:
2147 psize_value = outputs[i].values[0];
2148 break;
2149 case TGSI_SEMANTIC_LAYER:
2150 layer_value = outputs[i].values[0];
2151 break;
2152 case TGSI_SEMANTIC_VIEWPORT_INDEX:
2153 viewport_index_value = outputs[i].values[0];
2154 break;
2155 case TGSI_SEMANTIC_EDGEFLAG:
2156 edgeflag_value = outputs[i].values[0];
2157 break;
2158 case TGSI_SEMANTIC_CLIPDIST:
2159 if (!shader->key.opt.clip_disable) {
2160 unsigned index = 2 + outputs[i].semantic_index;
2161 si_llvm_init_export_args(ctx, outputs[i].values,
2162 V_008DFC_SQ_EXP_POS + index,
2163 &pos_args[index]);
2164 }
2165 break;
2166 case TGSI_SEMANTIC_CLIPVERTEX:
2167 if (!shader->key.opt.clip_disable) {
2168 si_llvm_emit_clipvertex(ctx, pos_args,
2169 outputs[i].values);
2170 }
2171 break;
2172 }
2173 }
2174
2175 /* We need to add the position output manually if it's missing. */
2176 if (!pos_args[0].out[0]) {
2177 pos_args[0].enabled_channels = 0xf; /* writemask */
2178 pos_args[0].valid_mask = 0; /* EXEC mask */
2179 pos_args[0].done = 0; /* last export? */
2180 pos_args[0].target = V_008DFC_SQ_EXP_POS;
2181 pos_args[0].compr = 0; /* COMPR flag */
2182 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
2183 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
2184 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
2185 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
2186 }
2187
2188 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
2189 !shader->key.as_ngg;
2190
2191 /* Write the misc vector (point size, edgeflag, layer, viewport). */
2192 if (shader->selector->info.writes_psize ||
2193 pos_writes_edgeflag ||
2194 shader->selector->info.writes_viewport_index ||
2195 shader->selector->info.writes_layer) {
2196 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
2197 (pos_writes_edgeflag << 1) |
2198 (shader->selector->info.writes_layer << 2);
2199
2200 pos_args[1].valid_mask = 0; /* EXEC mask */
2201 pos_args[1].done = 0; /* last export? */
2202 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2203 pos_args[1].compr = 0; /* COMPR flag */
2204 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2205 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2206 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2207 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2208
2209 if (shader->selector->info.writes_psize)
2210 pos_args[1].out[0] = psize_value;
2211
2212 if (pos_writes_edgeflag) {
2213 /* The output is a float, but the hw expects an integer
2214 * with the first bit containing the edge flag. */
2215 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
2216 edgeflag_value,
2217 ctx->i32, "");
2218 edgeflag_value = ac_build_umin(&ctx->ac,
2219 edgeflag_value,
2220 ctx->i32_1);
2221
2222 /* The LLVM intrinsic expects a float. */
2223 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
2224 }
2225
2226 if (ctx->screen->info.chip_class >= GFX9) {
2227 /* GFX9 has the layer in out.z[10:0] and the viewport
2228 * index in out.z[19:16].
2229 */
2230 if (shader->selector->info.writes_layer)
2231 pos_args[1].out[2] = layer_value;
2232
2233 if (shader->selector->info.writes_viewport_index) {
2234 LLVMValueRef v = viewport_index_value;
2235
2236 v = ac_to_integer(&ctx->ac, v);
2237 v = LLVMBuildShl(ctx->ac.builder, v,
2238 LLVMConstInt(ctx->i32, 16, 0), "");
2239 v = LLVMBuildOr(ctx->ac.builder, v,
2240 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2241 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2242 pos_args[1].enabled_channels |= 1 << 2;
2243 }
2244 } else {
2245 if (shader->selector->info.writes_layer)
2246 pos_args[1].out[2] = layer_value;
2247
2248 if (shader->selector->info.writes_viewport_index) {
2249 pos_args[1].out[3] = viewport_index_value;
2250 pos_args[1].enabled_channels |= 1 << 3;
2251 }
2252 }
2253 }
2254
2255 for (i = 0; i < 4; i++)
2256 if (pos_args[i].out[0])
2257 shader->info.nr_pos_exports++;
2258
2259 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
2260 * Setting valid_mask=1 prevents it and has no other effect.
2261 */
2262 if (ctx->screen->info.family == CHIP_NAVI10 ||
2263 ctx->screen->info.family == CHIP_NAVI12 ||
2264 ctx->screen->info.family == CHIP_NAVI14)
2265 pos_args[0].valid_mask = 1;
2266
2267 pos_idx = 0;
2268 for (i = 0; i < 4; i++) {
2269 if (!pos_args[i].out[0])
2270 continue;
2271
2272 /* Specify the target we are exporting */
2273 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2274
2275 if (pos_idx == shader->info.nr_pos_exports)
2276 /* Specify that this is the last export */
2277 pos_args[i].done = 1;
2278
2279 ac_build_export(&ctx->ac, &pos_args[i]);
2280 }
2281
2282 /* Build parameter exports. */
2283 si_build_param_exports(ctx, outputs, noutput);
2284 }
2285
2286 /**
2287 * Forward all outputs from the vertex shader to the TES. This is only used
2288 * for the fixed function TCS.
2289 */
2290 static void si_copy_tcs_inputs(struct si_shader_context *ctx)
2291 {
2292 LLVMValueRef invocation_id, buffer, buffer_offset;
2293 LLVMValueRef lds_vertex_stride, lds_base;
2294 uint64_t inputs;
2295
2296 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
2297 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
2298 buffer_offset = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
2299
2300 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
2301 lds_base = get_tcs_in_current_patch_offset(ctx);
2302 lds_base = ac_build_imad(&ctx->ac, invocation_id, lds_vertex_stride,
2303 lds_base);
2304
2305 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
2306 while (inputs) {
2307 unsigned i = u_bit_scan64(&inputs);
2308
2309 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2310 LLVMConstInt(ctx->i32, 4 * i, 0),
2311 "");
2312
2313 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
2314 get_rel_patch_id(ctx),
2315 invocation_id,
2316 LLVMConstInt(ctx->i32, i, 0));
2317
2318 LLVMValueRef value = lshs_lds_load(ctx, ctx->ac.i32, ~0, lds_ptr);
2319
2320 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
2321 buffer_offset, 0, ac_glc);
2322 }
2323 }
2324
2325 static void si_write_tess_factors(struct si_shader_context *ctx,
2326 LLVMValueRef rel_patch_id,
2327 LLVMValueRef invocation_id,
2328 LLVMValueRef tcs_out_current_patch_data_offset,
2329 LLVMValueRef invoc0_tf_outer[4],
2330 LLVMValueRef invoc0_tf_inner[2])
2331 {
2332 struct si_shader *shader = ctx->shader;
2333 unsigned tess_inner_index, tess_outer_index;
2334 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2335 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2336 unsigned stride, outer_comps, inner_comps, i, offset;
2337
2338 /* Add a barrier before loading tess factors from LDS. */
2339 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
2340 si_llvm_emit_barrier(ctx);
2341
2342 /* Do this only for invocation 0, because the tess levels are per-patch,
2343 * not per-vertex.
2344 *
2345 * This can't jump, because invocation 0 executes this. It should
2346 * at least mask out the loads and stores for other invocations.
2347 */
2348 ac_build_ifcc(&ctx->ac,
2349 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2350 invocation_id, ctx->i32_0, ""), 6503);
2351
2352 /* Determine the layout of one tess factor element in the buffer. */
2353 switch (shader->key.part.tcs.epilog.prim_mode) {
2354 case PIPE_PRIM_LINES:
2355 stride = 2; /* 2 dwords, 1 vec2 store */
2356 outer_comps = 2;
2357 inner_comps = 0;
2358 break;
2359 case PIPE_PRIM_TRIANGLES:
2360 stride = 4; /* 4 dwords, 1 vec4 store */
2361 outer_comps = 3;
2362 inner_comps = 1;
2363 break;
2364 case PIPE_PRIM_QUADS:
2365 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2366 outer_comps = 4;
2367 inner_comps = 2;
2368 break;
2369 default:
2370 assert(0);
2371 return;
2372 }
2373
2374 for (i = 0; i < 4; i++) {
2375 inner[i] = LLVMGetUndef(ctx->i32);
2376 outer[i] = LLVMGetUndef(ctx->i32);
2377 }
2378
2379 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
2380 /* Tess factors are in VGPRs. */
2381 for (i = 0; i < outer_comps; i++)
2382 outer[i] = out[i] = invoc0_tf_outer[i];
2383 for (i = 0; i < inner_comps; i++)
2384 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
2385 } else {
2386 /* Load tess_inner and tess_outer from LDS.
2387 * Any invocation can write them, so we can't get them from a temporary.
2388 */
2389 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2390 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2391
2392 lds_base = tcs_out_current_patch_data_offset;
2393 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2394 LLVMConstInt(ctx->i32,
2395 tess_inner_index * 4, 0), "");
2396 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2397 LLVMConstInt(ctx->i32,
2398 tess_outer_index * 4, 0), "");
2399
2400 for (i = 0; i < outer_comps; i++) {
2401 outer[i] = out[i] =
2402 lshs_lds_load(ctx, ctx->ac.i32, i, lds_outer);
2403 }
2404 for (i = 0; i < inner_comps; i++) {
2405 inner[i] = out[outer_comps+i] =
2406 lshs_lds_load(ctx, ctx->ac.i32, i, lds_inner);
2407 }
2408 }
2409
2410 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2411 /* For isolines, the hardware expects tess factors in the
2412 * reverse order from what GLSL / TGSI specify.
2413 */
2414 LLVMValueRef tmp = out[0];
2415 out[0] = out[1];
2416 out[1] = tmp;
2417 }
2418
2419 /* Convert the outputs to vectors for stores. */
2420 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2421 vec1 = NULL;
2422
2423 if (stride > 4)
2424 vec1 = ac_build_gather_values(&ctx->ac, out+4, stride - 4);
2425
2426 /* Get the buffer. */
2427 buffer = get_tess_ring_descriptor(ctx, TCS_FACTOR_RING);
2428
2429 /* Get the offset. */
2430 tf_base = ac_get_arg(&ctx->ac,
2431 ctx->tcs_factor_offset);
2432 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2433 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2434
2435 ac_build_ifcc(&ctx->ac,
2436 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2437 rel_patch_id, ctx->i32_0, ""), 6504);
2438
2439 /* Store the dynamic HS control word. */
2440 offset = 0;
2441 if (ctx->screen->info.chip_class <= GFX8) {
2442 ac_build_buffer_store_dword(&ctx->ac, buffer,
2443 LLVMConstInt(ctx->i32, 0x80000000, 0),
2444 1, ctx->i32_0, tf_base,
2445 offset, ac_glc);
2446 offset += 4;
2447 }
2448
2449 ac_build_endif(&ctx->ac, 6504);
2450
2451 /* Store the tessellation factors. */
2452 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2453 MIN2(stride, 4), byteoffset, tf_base,
2454 offset, ac_glc);
2455 offset += 16;
2456 if (vec1)
2457 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2458 stride - 4, byteoffset, tf_base,
2459 offset, ac_glc);
2460
2461 /* Store the tess factors into the offchip buffer if TES reads them. */
2462 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2463 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2464 LLVMValueRef tf_inner_offset;
2465 unsigned param_outer, param_inner;
2466
2467 buf = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
2468 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
2469
2470 param_outer = si_shader_io_get_unique_index_patch(
2471 TGSI_SEMANTIC_TESSOUTER, 0);
2472 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2473 LLVMConstInt(ctx->i32, param_outer, 0));
2474
2475 unsigned outer_vec_size =
2476 ac_has_vec3_support(ctx->screen->info.chip_class, false) ?
2477 outer_comps : util_next_power_of_two(outer_comps);
2478 outer_vec = ac_build_gather_values(&ctx->ac, outer, outer_vec_size);
2479
2480 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2481 outer_comps, tf_outer_offset,
2482 base, 0, ac_glc);
2483 if (inner_comps) {
2484 param_inner = si_shader_io_get_unique_index_patch(
2485 TGSI_SEMANTIC_TESSINNER, 0);
2486 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2487 LLVMConstInt(ctx->i32, param_inner, 0));
2488
2489 inner_vec = inner_comps == 1 ? inner[0] :
2490 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2491 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2492 inner_comps, tf_inner_offset,
2493 base, 0, ac_glc);
2494 }
2495 }
2496
2497 ac_build_endif(&ctx->ac, 6503);
2498 }
2499
2500 static LLVMValueRef
2501 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2502 struct ac_arg param, unsigned return_index)
2503 {
2504 return LLVMBuildInsertValue(ctx->ac.builder, ret,
2505 ac_get_arg(&ctx->ac, param),
2506 return_index, "");
2507 }
2508
2509 static LLVMValueRef
2510 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2511 struct ac_arg param, unsigned return_index)
2512 {
2513 LLVMBuilderRef builder = ctx->ac.builder;
2514 LLVMValueRef p = ac_get_arg(&ctx->ac, param);
2515
2516 return LLVMBuildInsertValue(builder, ret,
2517 ac_to_float(&ctx->ac, p),
2518 return_index, "");
2519 }
2520
2521 static LLVMValueRef
2522 si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
2523 struct ac_arg param, unsigned return_index)
2524 {
2525 LLVMBuilderRef builder = ctx->ac.builder;
2526 LLVMValueRef ptr = ac_get_arg(&ctx->ac, param);
2527 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i32, "");
2528 return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
2529 }
2530
2531 /* This only writes the tessellation factor levels. */
2532 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
2533 unsigned max_outputs,
2534 LLVMValueRef *addrs)
2535 {
2536 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2537 LLVMBuilderRef builder = ctx->ac.builder;
2538 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2539
2540 si_copy_tcs_inputs(ctx);
2541
2542 rel_patch_id = get_rel_patch_id(ctx);
2543 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
2544 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2545
2546 if (ctx->screen->info.chip_class >= GFX9) {
2547 LLVMBasicBlockRef blocks[2] = {
2548 LLVMGetInsertBlock(builder),
2549 ctx->merged_wrap_if_entry_block
2550 };
2551 LLVMValueRef values[2];
2552
2553 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2554
2555 values[0] = rel_patch_id;
2556 values[1] = LLVMGetUndef(ctx->i32);
2557 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2558
2559 values[0] = tf_lds_offset;
2560 values[1] = LLVMGetUndef(ctx->i32);
2561 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2562
2563 values[0] = invocation_id;
2564 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
2565 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2566 }
2567
2568 /* Return epilog parameters from this function. */
2569 LLVMValueRef ret = ctx->return_value;
2570 unsigned vgpr;
2571
2572 if (ctx->screen->info.chip_class >= GFX9) {
2573 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2574 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2575 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2576 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2577 /* Tess offchip and tess factor offsets are at the beginning. */
2578 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2579 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2580 vgpr = 8 + GFX9_SGPR_TCS_OUT_LAYOUT + 1;
2581 } else {
2582 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2583 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2584 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2585 GFX6_SGPR_TCS_OUT_LAYOUT);
2586 /* Tess offchip and tess factor offsets are after user SGPRs. */
2587 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset,
2588 GFX6_TCS_NUM_USER_SGPR);
2589 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset,
2590 GFX6_TCS_NUM_USER_SGPR + 1);
2591 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2592 }
2593
2594 /* VGPRs */
2595 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
2596 invocation_id = ac_to_float(&ctx->ac, invocation_id);
2597 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
2598
2599 /* Leave a hole corresponding to the two input VGPRs. This ensures that
2600 * the invocation_id output does not alias the tcs_rel_ids input,
2601 * which saves a V_MOV on gfx9.
2602 */
2603 vgpr += 2;
2604
2605 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2606 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2607
2608 if (ctx->shader->selector->tcs_info.tessfactors_are_def_in_all_invocs) {
2609 vgpr++; /* skip the tess factor LDS offset */
2610 for (unsigned i = 0; i < 6; i++) {
2611 LLVMValueRef value =
2612 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
2613 value = ac_to_float(&ctx->ac, value);
2614 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
2615 }
2616 } else {
2617 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2618 }
2619 ctx->return_value = ret;
2620 }
2621
2622 /* Pass TCS inputs from LS to TCS on GFX9. */
2623 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2624 {
2625 LLVMValueRef ret = ctx->return_value;
2626
2627 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2628 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2629 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2630 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2631 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2632 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2633
2634 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2635 8 + SI_SGPR_RW_BUFFERS);
2636 ret = si_insert_input_ptr(ctx, ret,
2637 ctx->bindless_samplers_and_images,
2638 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2639
2640 ret = si_insert_input_ret(ctx, ret, ctx->vs_state_bits,
2641 8 + SI_SGPR_VS_STATE_BITS);
2642
2643 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2644 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2645 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_offsets,
2646 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2647 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2648 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2649
2650 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2651 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2652 ac_to_float(&ctx->ac,
2653 ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id)),
2654 vgpr++, "");
2655 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2656 ac_to_float(&ctx->ac,
2657 ac_get_arg(&ctx->ac, ctx->args.tcs_rel_ids)),
2658 vgpr++, "");
2659 ctx->return_value = ret;
2660 }
2661
2662 /* Pass GS inputs from ES to GS on GFX9. */
2663 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2664 {
2665 LLVMValueRef ret = ctx->return_value;
2666
2667 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2668 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2669 if (ctx->shader->key.as_ngg)
2670 ret = si_insert_input_ptr(ctx, ret, ctx->gs_tg_info, 2);
2671 else
2672 ret = si_insert_input_ret(ctx, ret, ctx->gs2vs_offset, 2);
2673 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2674 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2675
2676 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2677 8 + SI_SGPR_RW_BUFFERS);
2678 ret = si_insert_input_ptr(ctx, ret,
2679 ctx->bindless_samplers_and_images,
2680 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2681 if (ctx->screen->use_ngg) {
2682 ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits,
2683 8 + SI_SGPR_VS_STATE_BITS);
2684 }
2685
2686 unsigned vgpr;
2687 if (ctx->type == PIPE_SHADER_VERTEX)
2688 vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR;
2689 else
2690 vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
2691
2692 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx01_offset, vgpr++);
2693 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx23_offset, vgpr++);
2694 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
2695 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
2696 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx45_offset, vgpr++);
2697 ctx->return_value = ret;
2698 }
2699
2700 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
2701 unsigned max_outputs,
2702 LLVMValueRef *addrs)
2703 {
2704 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2705 struct si_shader *shader = ctx->shader;
2706 struct tgsi_shader_info *info = &shader->selector->info;
2707 unsigned i, chan;
2708 LLVMValueRef vertex_id = ac_get_arg(&ctx->ac, ctx->rel_auto_id);
2709 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
2710 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2711 vertex_dw_stride, "");
2712
2713 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2714 * its inputs from it. */
2715 for (i = 0; i < info->num_outputs; i++) {
2716 unsigned name = info->output_semantic_name[i];
2717 unsigned index = info->output_semantic_index[i];
2718
2719 /* The ARB_shader_viewport_layer_array spec contains the
2720 * following issue:
2721 *
2722 * 2) What happens if gl_ViewportIndex or gl_Layer is
2723 * written in the vertex shader and a geometry shader is
2724 * present?
2725 *
2726 * RESOLVED: The value written by the last vertex processing
2727 * stage is used. If the last vertex processing stage
2728 * (vertex, tessellation evaluation or geometry) does not
2729 * statically assign to gl_ViewportIndex or gl_Layer, index
2730 * or layer zero is assumed.
2731 *
2732 * So writes to those outputs in VS-as-LS are simply ignored.
2733 */
2734 if (name == TGSI_SEMANTIC_LAYER ||
2735 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2736 continue;
2737
2738 int param = si_shader_io_get_unique_index(name, index, false);
2739 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2740 LLVMConstInt(ctx->i32, param * 4, 0), "");
2741
2742 for (chan = 0; chan < 4; chan++) {
2743 if (!(info->output_usagemask[i] & (1 << chan)))
2744 continue;
2745
2746 lshs_lds_store(ctx, chan, dw_addr,
2747 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
2748 }
2749 }
2750
2751 if (ctx->screen->info.chip_class >= GFX9)
2752 si_set_ls_return_value_for_tcs(ctx);
2753 }
2754
2755 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
2756 unsigned max_outputs,
2757 LLVMValueRef *addrs)
2758 {
2759 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2760 struct si_shader *es = ctx->shader;
2761 struct tgsi_shader_info *info = &es->selector->info;
2762 LLVMValueRef lds_base = NULL;
2763 unsigned chan;
2764 int i;
2765
2766 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
2767 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2768 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2769 LLVMValueRef wave_idx = si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
2770 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2771 LLVMBuildMul(ctx->ac.builder, wave_idx,
2772 LLVMConstInt(ctx->i32, ctx->ac.wave_size, false), ""), "");
2773 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2774 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2775 }
2776
2777 for (i = 0; i < info->num_outputs; i++) {
2778 int param;
2779
2780 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2781 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2782 continue;
2783
2784 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2785 info->output_semantic_index[i], false);
2786
2787 for (chan = 0; chan < 4; chan++) {
2788 if (!(info->output_usagemask[i] & (1 << chan)))
2789 continue;
2790
2791 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2792 out_val = ac_to_integer(&ctx->ac, out_val);
2793
2794 /* GFX9 has the ESGS ring in LDS. */
2795 if (ctx->screen->info.chip_class >= GFX9) {
2796 LLVMValueRef idx = LLVMConstInt(ctx->i32, param * 4 + chan, false);
2797 idx = LLVMBuildAdd(ctx->ac.builder, lds_base, idx, "");
2798 ac_build_indexed_store(&ctx->ac, ctx->esgs_ring, idx, out_val);
2799 continue;
2800 }
2801
2802 ac_build_buffer_store_dword(&ctx->ac,
2803 ctx->esgs_ring,
2804 out_val, 1, NULL,
2805 ac_get_arg(&ctx->ac, ctx->es2gs_offset),
2806 (4 * param + chan) * 4,
2807 ac_glc | ac_slc | ac_swizzled);
2808 }
2809 }
2810
2811 if (ctx->screen->info.chip_class >= GFX9)
2812 si_set_es_return_value_for_gs(ctx);
2813 }
2814
2815 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2816 {
2817 if (ctx->screen->info.chip_class >= GFX9)
2818 return si_unpack_param(ctx, ctx->merged_wave_info, 16, 8);
2819 else
2820 return ac_get_arg(&ctx->ac, ctx->gs_wave_id);
2821 }
2822
2823 static void emit_gs_epilogue(struct si_shader_context *ctx)
2824 {
2825 if (ctx->shader->key.as_ngg) {
2826 gfx10_ngg_gs_emit_epilogue(ctx);
2827 return;
2828 }
2829
2830 if (ctx->screen->info.chip_class >= GFX10)
2831 LLVMBuildFence(ctx->ac.builder, LLVMAtomicOrderingRelease, false, "");
2832
2833 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2834 si_get_gs_wave_id(ctx));
2835
2836 if (ctx->screen->info.chip_class >= GFX9)
2837 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2838 }
2839
2840 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
2841 unsigned max_outputs,
2842 LLVMValueRef *addrs)
2843 {
2844 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2845 struct tgsi_shader_info UNUSED *info = &ctx->shader->selector->info;
2846
2847 assert(info->num_outputs <= max_outputs);
2848
2849 emit_gs_epilogue(ctx);
2850 }
2851
2852 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
2853 unsigned max_outputs,
2854 LLVMValueRef *addrs)
2855 {
2856 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2857 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2858 struct si_shader_output_values *outputs = NULL;
2859 int i,j;
2860
2861 assert(!ctx->shader->is_gs_copy_shader);
2862 assert(info->num_outputs <= max_outputs);
2863
2864 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2865
2866 for (i = 0; i < info->num_outputs; i++) {
2867 outputs[i].semantic_name = info->output_semantic_name[i];
2868 outputs[i].semantic_index = info->output_semantic_index[i];
2869
2870 for (j = 0; j < 4; j++) {
2871 outputs[i].values[j] =
2872 LLVMBuildLoad(ctx->ac.builder,
2873 addrs[4 * i + j],
2874 "");
2875 outputs[i].vertex_stream[j] =
2876 (info->output_streams[i] >> (2 * j)) & 3;
2877 }
2878 }
2879
2880 if (!ctx->screen->use_ngg_streamout &&
2881 ctx->shader->selector->so.num_outputs)
2882 si_llvm_emit_streamout(ctx, outputs, i, 0);
2883
2884 /* Export PrimitiveID. */
2885 if (ctx->shader->key.mono.u.vs_export_prim_id) {
2886 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
2887 outputs[i].semantic_index = 0;
2888 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
2889 for (j = 1; j < 4; j++)
2890 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
2891
2892 memset(outputs[i].vertex_stream, 0,
2893 sizeof(outputs[i].vertex_stream));
2894 i++;
2895 }
2896
2897 si_llvm_export_vs(ctx, outputs, i);
2898 FREE(outputs);
2899 }
2900
2901 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
2902 unsigned max_outputs,
2903 LLVMValueRef *addrs)
2904 {
2905 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2906 struct tgsi_shader_info *info = &ctx->shader->selector->info;
2907 LLVMValueRef pos[4] = {};
2908
2909 assert(info->num_outputs <= max_outputs);
2910
2911 for (unsigned i = 0; i < info->num_outputs; i++) {
2912 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
2913 continue;
2914
2915 for (unsigned chan = 0; chan < 4; chan++)
2916 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2917 break;
2918 }
2919 assert(pos[0] != NULL);
2920
2921 /* Return the position output. */
2922 LLVMValueRef ret = ctx->return_value;
2923 for (unsigned chan = 0; chan < 4; chan++)
2924 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
2925 ctx->return_value = ret;
2926 }
2927
2928 struct si_ps_exports {
2929 unsigned num;
2930 struct ac_export_args args[10];
2931 };
2932
2933 static void si_export_mrt_z(struct si_shader_context *ctx,
2934 LLVMValueRef depth, LLVMValueRef stencil,
2935 LLVMValueRef samplemask, struct si_ps_exports *exp)
2936 {
2937 struct ac_export_args args;
2938
2939 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
2940
2941 memcpy(&exp->args[exp->num++], &args, sizeof(args));
2942 }
2943
2944 static void si_export_mrt_color(struct si_shader_context *ctx,
2945 LLVMValueRef *color, unsigned index,
2946 unsigned samplemask_param,
2947 bool is_last, struct si_ps_exports *exp)
2948 {
2949 int i;
2950
2951 /* Clamp color */
2952 if (ctx->shader->key.part.ps.epilog.clamp_color)
2953 for (i = 0; i < 4; i++)
2954 color[i] = ac_build_clamp(&ctx->ac, color[i]);
2955
2956 /* Alpha to one */
2957 if (ctx->shader->key.part.ps.epilog.alpha_to_one)
2958 color[3] = ctx->ac.f32_1;
2959
2960 /* Alpha test */
2961 if (index == 0 &&
2962 ctx->shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
2963 si_alpha_test(ctx, color[3]);
2964
2965 /* Line & polygon smoothing */
2966 if (ctx->shader->key.part.ps.epilog.poly_line_smoothing)
2967 color[3] = si_scale_alpha_by_sample_mask(ctx, color[3],
2968 samplemask_param);
2969
2970 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
2971 if (ctx->shader->key.part.ps.epilog.last_cbuf > 0) {
2972 struct ac_export_args args[8];
2973 int c, last = -1;
2974
2975 /* Get the export arguments, also find out what the last one is. */
2976 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
2977 si_llvm_init_export_args(ctx, color,
2978 V_008DFC_SQ_EXP_MRT + c, &args[c]);
2979 if (args[c].enabled_channels)
2980 last = c;
2981 }
2982
2983 /* Emit all exports. */
2984 for (c = 0; c <= ctx->shader->key.part.ps.epilog.last_cbuf; c++) {
2985 if (is_last && last == c) {
2986 args[c].valid_mask = 1; /* whether the EXEC mask is valid */
2987 args[c].done = 1; /* DONE bit */
2988 } else if (!args[c].enabled_channels)
2989 continue; /* unnecessary NULL export */
2990
2991 memcpy(&exp->args[exp->num++], &args[c], sizeof(args[c]));
2992 }
2993 } else {
2994 struct ac_export_args args;
2995
2996 /* Export */
2997 si_llvm_init_export_args(ctx, color, V_008DFC_SQ_EXP_MRT + index,
2998 &args);
2999 if (is_last) {
3000 args.valid_mask = 1; /* whether the EXEC mask is valid */
3001 args.done = 1; /* DONE bit */
3002 } else if (!args.enabled_channels)
3003 return; /* unnecessary NULL export */
3004
3005 memcpy(&exp->args[exp->num++], &args, sizeof(args));
3006 }
3007 }
3008
3009 static void si_emit_ps_exports(struct si_shader_context *ctx,
3010 struct si_ps_exports *exp)
3011 {
3012 for (unsigned i = 0; i < exp->num; i++)
3013 ac_build_export(&ctx->ac, &exp->args[i]);
3014 }
3015
3016 /**
3017 * Return PS outputs in this order:
3018 *
3019 * v[0:3] = color0.xyzw
3020 * v[4:7] = color1.xyzw
3021 * ...
3022 * vN+0 = Depth
3023 * vN+1 = Stencil
3024 * vN+2 = SampleMask
3025 * vN+3 = SampleMaskIn (used for OpenGL smoothing)
3026 *
3027 * The alpha-ref SGPR is returned via its original location.
3028 */
3029 static void si_llvm_return_fs_outputs(struct ac_shader_abi *abi,
3030 unsigned max_outputs,
3031 LLVMValueRef *addrs)
3032 {
3033 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3034 struct si_shader *shader = ctx->shader;
3035 struct tgsi_shader_info *info = &shader->selector->info;
3036 LLVMBuilderRef builder = ctx->ac.builder;
3037 unsigned i, j, first_vgpr, vgpr;
3038
3039 LLVMValueRef color[8][4] = {};
3040 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
3041 LLVMValueRef ret;
3042
3043 if (ctx->postponed_kill)
3044 ac_build_kill_if_false(&ctx->ac, LLVMBuildLoad(builder, ctx->postponed_kill, ""));
3045
3046 /* Read the output values. */
3047 for (i = 0; i < info->num_outputs; i++) {
3048 unsigned semantic_name = info->output_semantic_name[i];
3049 unsigned semantic_index = info->output_semantic_index[i];
3050
3051 switch (semantic_name) {
3052 case TGSI_SEMANTIC_COLOR:
3053 assert(semantic_index < 8);
3054 for (j = 0; j < 4; j++) {
3055 LLVMValueRef ptr = addrs[4 * i + j];
3056 LLVMValueRef result = LLVMBuildLoad(builder, ptr, "");
3057 color[semantic_index][j] = result;
3058 }
3059 break;
3060 case TGSI_SEMANTIC_POSITION:
3061 depth = LLVMBuildLoad(builder,
3062 addrs[4 * i + 2], "");
3063 break;
3064 case TGSI_SEMANTIC_STENCIL:
3065 stencil = LLVMBuildLoad(builder,
3066 addrs[4 * i + 1], "");
3067 break;
3068 case TGSI_SEMANTIC_SAMPLEMASK:
3069 samplemask = LLVMBuildLoad(builder,
3070 addrs[4 * i + 0], "");
3071 break;
3072 default:
3073 fprintf(stderr, "Warning: GFX6 unhandled fs output type:%d\n",
3074 semantic_name);
3075 }
3076 }
3077
3078 /* Fill the return structure. */
3079 ret = ctx->return_value;
3080
3081 /* Set SGPRs. */
3082 ret = LLVMBuildInsertValue(builder, ret,
3083 ac_to_integer(&ctx->ac,
3084 LLVMGetParam(ctx->main_fn,
3085 SI_PARAM_ALPHA_REF)),
3086 SI_SGPR_ALPHA_REF, "");
3087
3088 /* Set VGPRs */
3089 first_vgpr = vgpr = SI_SGPR_ALPHA_REF + 1;
3090 for (i = 0; i < ARRAY_SIZE(color); i++) {
3091 if (!color[i][0])
3092 continue;
3093
3094 for (j = 0; j < 4; j++)
3095 ret = LLVMBuildInsertValue(builder, ret, color[i][j], vgpr++, "");
3096 }
3097 if (depth)
3098 ret = LLVMBuildInsertValue(builder, ret, depth, vgpr++, "");
3099 if (stencil)
3100 ret = LLVMBuildInsertValue(builder, ret, stencil, vgpr++, "");
3101 if (samplemask)
3102 ret = LLVMBuildInsertValue(builder, ret, samplemask, vgpr++, "");
3103
3104 /* Add the input sample mask for smoothing at the end. */
3105 if (vgpr < first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC)
3106 vgpr = first_vgpr + PS_EPILOG_SAMPLEMASK_MIN_LOC;
3107 ret = LLVMBuildInsertValue(builder, ret,
3108 LLVMGetParam(ctx->main_fn,
3109 SI_PARAM_SAMPLE_COVERAGE), vgpr++, "");
3110
3111 ctx->return_value = ret;
3112 }
3113
3114 /* Emit one vertex from the geometry shader */
3115 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
3116 unsigned stream,
3117 LLVMValueRef *addrs)
3118 {
3119 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3120
3121 if (ctx->shader->key.as_ngg) {
3122 gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
3123 return;
3124 }
3125
3126 struct tgsi_shader_info *info = &ctx->shader->selector->info;
3127 struct si_shader *shader = ctx->shader;
3128 LLVMValueRef soffset = ac_get_arg(&ctx->ac, ctx->gs2vs_offset);
3129 LLVMValueRef gs_next_vertex;
3130 LLVMValueRef can_emit;
3131 unsigned chan, offset;
3132 int i;
3133
3134 /* Write vertex attribute values to GSVS ring */
3135 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
3136 ctx->gs_next_vertex[stream],
3137 "");
3138
3139 /* If this thread has already emitted the declared maximum number of
3140 * vertices, skip the write: excessive vertex emissions are not
3141 * supposed to have any effect.
3142 *
3143 * If the shader has no writes to memory, kill it instead. This skips
3144 * further memory loads and may allow LLVM to skip to the end
3145 * altogether.
3146 */
3147 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
3148 LLVMConstInt(ctx->i32,
3149 shader->selector->gs_max_out_vertices, 0), "");
3150
3151 bool use_kill = !info->writes_memory;
3152 if (use_kill) {
3153 ac_build_kill_if_false(&ctx->ac, can_emit);
3154 } else {
3155 ac_build_ifcc(&ctx->ac, can_emit, 6505);
3156 }
3157
3158 offset = 0;
3159 for (i = 0; i < info->num_outputs; i++) {
3160 for (chan = 0; chan < 4; chan++) {
3161 if (!(info->output_usagemask[i] & (1 << chan)) ||
3162 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
3163 continue;
3164
3165 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
3166 LLVMValueRef voffset =
3167 LLVMConstInt(ctx->i32, offset *
3168 shader->selector->gs_max_out_vertices, 0);
3169 offset++;
3170
3171 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
3172 voffset = LLVMBuildMul(ctx->ac.builder, voffset,
3173 LLVMConstInt(ctx->i32, 4, 0), "");
3174
3175 out_val = ac_to_integer(&ctx->ac, out_val);
3176
3177 ac_build_buffer_store_dword(&ctx->ac,
3178 ctx->gsvs_ring[stream],
3179 out_val, 1,
3180 voffset, soffset, 0,
3181 ac_glc | ac_slc | ac_swizzled);
3182 }
3183 }
3184
3185 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex, ctx->i32_1, "");
3186 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
3187
3188 /* Signal vertex emission if vertex data was written. */
3189 if (offset) {
3190 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
3191 si_get_gs_wave_id(ctx));
3192 }
3193
3194 if (!use_kill)
3195 ac_build_endif(&ctx->ac, 6505);
3196 }
3197
3198 /* Cut one primitive from the geometry shader */
3199 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
3200 unsigned stream)
3201 {
3202 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
3203
3204 if (ctx->shader->key.as_ngg) {
3205 LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
3206 return;
3207 }
3208
3209 /* Signal primitive cut */
3210 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
3211 si_get_gs_wave_id(ctx));
3212 }
3213
3214 static void si_llvm_emit_barrier(struct si_shader_context *ctx)
3215 {
3216 /* GFX6 only (thanks to a hw bug workaround):
3217 * The real barrier instruction isn’t needed, because an entire patch
3218 * always fits into a single wave.
3219 */
3220 if (ctx->screen->info.chip_class == GFX6 &&
3221 ctx->type == PIPE_SHADER_TESS_CTRL) {
3222 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
3223 return;
3224 }
3225
3226 ac_build_s_barrier(&ctx->ac);
3227 }
3228
3229 void si_create_function(struct si_shader_context *ctx,
3230 const char *name,
3231 LLVMTypeRef *returns, unsigned num_returns,
3232 unsigned max_workgroup_size)
3233 {
3234 si_llvm_create_func(ctx, name, returns, num_returns);
3235 ctx->return_value = LLVMGetUndef(ctx->return_type);
3236
3237 if (ctx->screen->info.address32_hi) {
3238 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
3239 "amdgpu-32bit-address-high-bits",
3240 ctx->screen->info.address32_hi);
3241 }
3242
3243 LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
3244 "no-signed-zeros-fp-math",
3245 "true");
3246
3247 ac_llvm_set_workgroup_size(ctx->main_fn, max_workgroup_size);
3248 }
3249
3250 static void declare_streamout_params(struct si_shader_context *ctx,
3251 struct pipe_stream_output_info *so)
3252 {
3253 if (ctx->screen->use_ngg_streamout) {
3254 if (ctx->type == PIPE_SHADER_TESS_EVAL)
3255 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
3256 return;
3257 }
3258
3259 /* Streamout SGPRs. */
3260 if (so->num_outputs) {
3261 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
3262 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
3263 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3264 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
3265 }
3266
3267 /* A streamout buffer offset is loaded if the stride is non-zero. */
3268 for (int i = 0; i < 4; i++) {
3269 if (!so->stride[i])
3270 continue;
3271
3272 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
3273 }
3274 }
3275
3276 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
3277 {
3278 switch (shader->selector->type) {
3279 case PIPE_SHADER_VERTEX:
3280 case PIPE_SHADER_TESS_EVAL:
3281 return shader->key.as_ngg ? 128 : 0;
3282
3283 case PIPE_SHADER_TESS_CTRL:
3284 /* Return this so that LLVM doesn't remove s_barrier
3285 * instructions on chips where we use s_barrier. */
3286 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
3287
3288 case PIPE_SHADER_GEOMETRY:
3289 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
3290
3291 case PIPE_SHADER_COMPUTE:
3292 break; /* see below */
3293
3294 default:
3295 return 0;
3296 }
3297
3298 const unsigned *properties = shader->selector->info.properties;
3299 unsigned max_work_group_size =
3300 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
3301 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
3302 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
3303
3304 if (!max_work_group_size) {
3305 /* This is a variable group size compute shader,
3306 * compile it for the maximum possible group size.
3307 */
3308 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
3309 }
3310 return max_work_group_size;
3311 }
3312
3313 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
3314 bool assign_params)
3315 {
3316 enum ac_arg_type const_shader_buf_type;
3317
3318 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
3319 ctx->shader->selector->info.shader_buffers_declared == 0)
3320 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
3321 else
3322 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
3323
3324 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
3325 assign_params ? &ctx->const_and_shader_buffers :
3326 &ctx->other_const_and_shader_buffers);
3327 }
3328
3329 static void declare_samplers_and_images(struct si_shader_context *ctx,
3330 bool assign_params)
3331 {
3332 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
3333 assign_params ? &ctx->samplers_and_images :
3334 &ctx->other_samplers_and_images);
3335 }
3336
3337 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
3338 bool assign_params)
3339 {
3340 declare_const_and_shader_buffers(ctx, assign_params);
3341 declare_samplers_and_images(ctx, assign_params);
3342 }
3343
3344 static void declare_global_desc_pointers(struct si_shader_context *ctx)
3345 {
3346 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
3347 &ctx->rw_buffers);
3348 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
3349 &ctx->bindless_samplers_and_images);
3350 }
3351
3352 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
3353 {
3354 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3355 if (!ctx->shader->is_gs_copy_shader) {
3356 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
3357 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
3358 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
3359 }
3360 }
3361
3362 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
3363 unsigned *num_prolog_vgprs)
3364 {
3365 struct si_shader *shader = ctx->shader;
3366
3367 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
3368 if (shader->key.as_ls) {
3369 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
3370 if (ctx->screen->info.chip_class >= GFX10) {
3371 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
3372 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
3373 } else {
3374 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
3375 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
3376 }
3377 } else if (ctx->screen->info.chip_class >= GFX10) {
3378 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
3379 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3380 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
3381 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
3382 } else {
3383 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
3384 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
3385 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
3386 }
3387
3388 if (!shader->is_gs_copy_shader) {
3389 /* Vertex load indices. */
3390 if (shader->selector->info.num_inputs) {
3391 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3392 &ctx->vertex_index0);
3393 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
3394 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
3395 }
3396 *num_prolog_vgprs += shader->selector->info.num_inputs;
3397 }
3398 }
3399
3400 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
3401 unsigned vs_blit_property)
3402 {
3403 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3404 &ctx->vs_blit_inputs); /* i16 x1, y1 */
3405 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
3406 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
3407
3408 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
3409 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
3410 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
3411 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
3412 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
3413 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
3414 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
3415 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
3416 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
3417 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
3418 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
3419 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
3420 }
3421 }
3422
3423 static void declare_tes_input_vgprs(struct si_shader_context *ctx)
3424 {
3425 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
3426 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
3427 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
3428 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
3429 }
3430
3431 enum {
3432 /* Convenient merged shader definitions. */
3433 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
3434 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
3435 };
3436
3437 static void add_arg_checked(struct ac_shader_args *args,
3438 enum ac_arg_regfile file,
3439 unsigned registers, enum ac_arg_type type,
3440 struct ac_arg *arg,
3441 unsigned idx)
3442 {
3443 assert(args->arg_count == idx);
3444 ac_add_arg(args, file, registers, type, arg);
3445 }
3446
3447 static void create_function(struct si_shader_context *ctx)
3448 {
3449 struct si_shader *shader = ctx->shader;
3450 LLVMTypeRef returns[16+32*4];
3451 unsigned i, num_return_sgprs;
3452 unsigned num_returns = 0;
3453 unsigned num_prolog_vgprs = 0;
3454 unsigned type = ctx->type;
3455 unsigned vs_blit_property =
3456 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
3457
3458 memset(&ctx->args, 0, sizeof(ctx->args));
3459
3460 /* Set MERGED shaders. */
3461 if (ctx->screen->info.chip_class >= GFX9) {
3462 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
3463 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
3464 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
3465 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
3466 }
3467
3468 switch (type) {
3469 case PIPE_SHADER_VERTEX:
3470 declare_global_desc_pointers(ctx);
3471
3472 if (vs_blit_property) {
3473 declare_vs_blit_inputs(ctx, vs_blit_property);
3474
3475 /* VGPRs */
3476 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3477 break;
3478 }
3479
3480 declare_per_stage_desc_pointers(ctx, true);
3481 declare_vs_specific_input_sgprs(ctx);
3482 if (!shader->is_gs_copy_shader) {
3483 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
3484 &ctx->vertex_buffers);
3485 }
3486
3487 if (shader->key.as_es) {
3488 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3489 &ctx->es2gs_offset);
3490 } else if (shader->key.as_ls) {
3491 /* no extra parameters */
3492 } else {
3493 /* The locations of the other parameters are assigned dynamically. */
3494 declare_streamout_params(ctx, &shader->selector->so);
3495 }
3496
3497 /* VGPRs */
3498 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3499
3500 /* Return values */
3501 if (shader->key.opt.vs_as_prim_discard_cs) {
3502 for (i = 0; i < 4; i++)
3503 returns[num_returns++] = ctx->f32; /* VGPRs */
3504 }
3505 break;
3506
3507 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
3508 declare_global_desc_pointers(ctx);
3509 declare_per_stage_desc_pointers(ctx, true);
3510 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3511 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
3512 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
3513 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3514 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3515 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
3516
3517 /* VGPRs */
3518 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
3519 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
3520
3521 /* param_tcs_offchip_offset and param_tcs_factor_offset are
3522 * placed after the user SGPRs.
3523 */
3524 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
3525 returns[num_returns++] = ctx->i32; /* SGPRs */
3526 for (i = 0; i < 11; i++)
3527 returns[num_returns++] = ctx->f32; /* VGPRs */
3528 break;
3529
3530 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
3531 /* Merged stages have 8 system SGPRs at the beginning. */
3532 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
3533 declare_per_stage_desc_pointers(ctx,
3534 ctx->type == PIPE_SHADER_TESS_CTRL);
3535 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3536 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
3537 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
3538 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
3539 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
3540 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
3541
3542 declare_global_desc_pointers(ctx);
3543 declare_per_stage_desc_pointers(ctx,
3544 ctx->type == PIPE_SHADER_VERTEX);
3545 declare_vs_specific_input_sgprs(ctx);
3546
3547 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3548 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
3549 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
3550 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
3551
3552 /* VGPRs (first TCS, then VS) */
3553 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
3554 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
3555
3556 if (ctx->type == PIPE_SHADER_VERTEX) {
3557 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3558
3559 /* LS return values are inputs to the TCS main shader part. */
3560 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
3561 returns[num_returns++] = ctx->i32; /* SGPRs */
3562 for (i = 0; i < 2; i++)
3563 returns[num_returns++] = ctx->f32; /* VGPRs */
3564 } else {
3565 /* TCS return values are inputs to the TCS epilog.
3566 *
3567 * param_tcs_offchip_offset, param_tcs_factor_offset,
3568 * param_tcs_offchip_layout, and param_rw_buffers
3569 * should be passed to the epilog.
3570 */
3571 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
3572 returns[num_returns++] = ctx->i32; /* SGPRs */
3573 for (i = 0; i < 11; i++)
3574 returns[num_returns++] = ctx->f32; /* VGPRs */
3575 }
3576 break;
3577
3578 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
3579 /* Merged stages have 8 system SGPRs at the beginning. */
3580 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
3581 declare_per_stage_desc_pointers(ctx,
3582 ctx->type == PIPE_SHADER_GEOMETRY);
3583
3584 if (ctx->shader->key.as_ngg)
3585 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
3586 else
3587 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3588
3589 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
3590 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3591 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
3592 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
3593 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
3594
3595 declare_global_desc_pointers(ctx);
3596 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
3597 declare_per_stage_desc_pointers(ctx,
3598 (ctx->type == PIPE_SHADER_VERTEX ||
3599 ctx->type == PIPE_SHADER_TESS_EVAL));
3600 }
3601
3602 if (ctx->type == PIPE_SHADER_VERTEX) {
3603 if (vs_blit_property)
3604 declare_vs_blit_inputs(ctx, vs_blit_property);
3605 else
3606 declare_vs_specific_input_sgprs(ctx);
3607 } else {
3608 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3609 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3610 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3611 /* Declare as many input SGPRs as the VS has. */
3612 }
3613
3614 if (ctx->type == PIPE_SHADER_VERTEX) {
3615 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
3616 &ctx->vertex_buffers);
3617 }
3618
3619 /* VGPRs (first GS, then VS/TES) */
3620 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
3621 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
3622 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3623 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3624 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
3625
3626 if (ctx->type == PIPE_SHADER_VERTEX) {
3627 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3628 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3629 declare_tes_input_vgprs(ctx);
3630 }
3631
3632 if (ctx->shader->key.as_es &&
3633 (ctx->type == PIPE_SHADER_VERTEX ||
3634 ctx->type == PIPE_SHADER_TESS_EVAL)) {
3635 unsigned num_user_sgprs;
3636
3637 if (ctx->type == PIPE_SHADER_VERTEX)
3638 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
3639 else
3640 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
3641
3642 /* ES return values are inputs to GS. */
3643 for (i = 0; i < 8 + num_user_sgprs; i++)
3644 returns[num_returns++] = ctx->i32; /* SGPRs */
3645 for (i = 0; i < 5; i++)
3646 returns[num_returns++] = ctx->f32; /* VGPRs */
3647 }
3648 break;
3649
3650 case PIPE_SHADER_TESS_EVAL:
3651 declare_global_desc_pointers(ctx);
3652 declare_per_stage_desc_pointers(ctx, true);
3653 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3654 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3655 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3656
3657 if (shader->key.as_es) {
3658 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3659 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
3660 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
3661 } else {
3662 declare_streamout_params(ctx, &shader->selector->so);
3663 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3664 }
3665
3666 /* VGPRs */
3667 declare_tes_input_vgprs(ctx);
3668 break;
3669
3670 case PIPE_SHADER_GEOMETRY:
3671 declare_global_desc_pointers(ctx);
3672 declare_per_stage_desc_pointers(ctx, true);
3673 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3674 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
3675
3676 /* VGPRs */
3677 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
3678 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
3679 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3680 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
3681 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
3682 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
3683 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
3684 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3685 break;
3686
3687 case PIPE_SHADER_FRAGMENT:
3688 declare_global_desc_pointers(ctx);
3689 declare_per_stage_desc_pointers(ctx, true);
3690 add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
3691 SI_PARAM_ALPHA_REF);
3692 add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3693 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
3694
3695 add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
3696 SI_PARAM_PERSP_SAMPLE);
3697 add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3698 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
3699 add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3700 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
3701 add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3702 NULL, SI_PARAM_PERSP_PULL_MODEL);
3703 add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3704 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
3705 add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3706 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
3707 add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3708 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
3709 add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
3710 NULL, SI_PARAM_LINE_STIPPLE_TEX);
3711 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3712 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
3713 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3714 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
3715 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3716 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
3717 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3718 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
3719 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
3720 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3721 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
3722 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
3723 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3724 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
3725 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3726 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
3727 add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3728 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
3729
3730 /* Color inputs from the prolog. */
3731 if (shader->selector->info.colors_read) {
3732 unsigned num_color_elements =
3733 util_bitcount(shader->selector->info.colors_read);
3734
3735 for (i = 0; i < num_color_elements; i++)
3736 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
3737
3738 num_prolog_vgprs += num_color_elements;
3739 }
3740
3741 /* Outputs for the epilog. */
3742 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
3743 num_returns =
3744 num_return_sgprs +
3745 util_bitcount(shader->selector->info.colors_written) * 4 +
3746 shader->selector->info.writes_z +
3747 shader->selector->info.writes_stencil +
3748 shader->selector->info.writes_samplemask +
3749 1 /* SampleMaskIn */;
3750
3751 num_returns = MAX2(num_returns,
3752 num_return_sgprs +
3753 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
3754
3755 for (i = 0; i < num_return_sgprs; i++)
3756 returns[i] = ctx->i32;
3757 for (; i < num_returns; i++)
3758 returns[i] = ctx->f32;
3759 break;
3760
3761 case PIPE_SHADER_COMPUTE:
3762 declare_global_desc_pointers(ctx);
3763 declare_per_stage_desc_pointers(ctx, true);
3764 if (shader->selector->info.uses_grid_size)
3765 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
3766 &ctx->args.num_work_groups);
3767 if (shader->selector->info.uses_block_size &&
3768 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
3769 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
3770
3771 unsigned cs_user_data_dwords =
3772 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
3773 if (cs_user_data_dwords) {
3774 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
3775 &ctx->cs_user_data);
3776 }
3777
3778 /* Hardware SGPRs. */
3779 for (i = 0; i < 3; i++) {
3780 if (shader->selector->info.uses_block_id[i]) {
3781 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3782 &ctx->args.workgroup_ids[i]);
3783 }
3784 }
3785 if (shader->selector->info.uses_subgroup_info)
3786 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
3787
3788 /* Hardware VGPRs. */
3789 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3790 &ctx->args.local_invocation_ids);
3791 break;
3792 default:
3793 assert(0 && "unimplemented shader");
3794 return;
3795 }
3796
3797 si_create_function(ctx, "main", returns, num_returns,
3798 si_get_max_workgroup_size(shader));
3799
3800 /* Reserve register locations for VGPR inputs the PS prolog may need. */
3801 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
3802 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
3803 "InitialPSInputAddr",
3804 S_0286D0_PERSP_SAMPLE_ENA(1) |
3805 S_0286D0_PERSP_CENTER_ENA(1) |
3806 S_0286D0_PERSP_CENTROID_ENA(1) |
3807 S_0286D0_LINEAR_SAMPLE_ENA(1) |
3808 S_0286D0_LINEAR_CENTER_ENA(1) |
3809 S_0286D0_LINEAR_CENTROID_ENA(1) |
3810 S_0286D0_FRONT_FACE_ENA(1) |
3811 S_0286D0_ANCILLARY_ENA(1) |
3812 S_0286D0_POS_FIXED_PT_ENA(1));
3813 }
3814
3815 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
3816 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
3817
3818 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
3819 shader->info.num_input_vgprs -= num_prolog_vgprs;
3820
3821 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
3822 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3823 /* The LSHS size is not known until draw time, so we append it
3824 * at the end of whatever LDS use there may be in the rest of
3825 * the shader (currently none, unless LLVM decides to do its
3826 * own LDS-based lowering).
3827 */
3828 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
3829 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3830 "__lds_end", AC_ADDR_SPACE_LDS);
3831 LLVMSetAlignment(ctx->ac.lds, 256);
3832 } else {
3833 ac_declare_lds_as_pointer(&ctx->ac);
3834 }
3835 }
3836
3837 /* Unlike radv, we override these arguments in the prolog, so to the
3838 * API shader they appear as normal arguments.
3839 */
3840 if (ctx->type == PIPE_SHADER_VERTEX) {
3841 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
3842 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
3843 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
3844 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
3845 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
3846 }
3847 }
3848
3849 /* Ensure that the esgs ring is declared.
3850 *
3851 * We declare it with 64KB alignment as a hint that the
3852 * pointer value will always be 0.
3853 */
3854 static void declare_esgs_ring(struct si_shader_context *ctx)
3855 {
3856 if (ctx->esgs_ring)
3857 return;
3858
3859 assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
3860
3861 ctx->esgs_ring = LLVMAddGlobalInAddressSpace(
3862 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3863 "esgs_ring",
3864 AC_ADDR_SPACE_LDS);
3865 LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
3866 LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
3867 }
3868
3869 /**
3870 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
3871 * for later use.
3872 */
3873 static void preload_ring_buffers(struct si_shader_context *ctx)
3874 {
3875 LLVMBuilderRef builder = ctx->ac.builder;
3876
3877 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
3878
3879 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY) {
3880 if (ctx->screen->info.chip_class <= GFX8) {
3881 unsigned ring =
3882 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
3883 : SI_ES_RING_ESGS;
3884 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
3885
3886 ctx->esgs_ring =
3887 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3888 } else {
3889 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3890 /* Declare the ESGS ring as an explicit LDS symbol. */
3891 declare_esgs_ring(ctx);
3892 } else {
3893 ac_declare_lds_as_pointer(&ctx->ac);
3894 ctx->esgs_ring = ctx->ac.lds;
3895 }
3896 }
3897 }
3898
3899 if (ctx->shader->is_gs_copy_shader) {
3900 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3901
3902 ctx->gsvs_ring[0] =
3903 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3904 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
3905 const struct si_shader_selector *sel = ctx->shader->selector;
3906 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3907 LLVMValueRef base_ring;
3908
3909 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3910
3911 /* The conceptual layout of the GSVS ring is
3912 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3913 * but the real memory layout is swizzled across
3914 * threads:
3915 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3916 * t16v0c0 ..
3917 * Override the buffer descriptor accordingly.
3918 */
3919 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
3920 uint64_t stream_offset = 0;
3921
3922 for (unsigned stream = 0; stream < 4; ++stream) {
3923 unsigned num_components;
3924 unsigned stride;
3925 unsigned num_records;
3926 LLVMValueRef ring, tmp;
3927
3928 num_components = sel->info.num_stream_output_components[stream];
3929 if (!num_components)
3930 continue;
3931
3932 stride = 4 * num_components * sel->gs_max_out_vertices;
3933
3934 /* Limit on the stride field for <= GFX7. */
3935 assert(stride < (1 << 14));
3936
3937 num_records = ctx->ac.wave_size;
3938
3939 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
3940 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
3941 tmp = LLVMBuildAdd(builder, tmp,
3942 LLVMConstInt(ctx->i64,
3943 stream_offset, 0), "");
3944 stream_offset += stride * ctx->ac.wave_size;
3945
3946 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
3947 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
3948 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
3949 tmp = LLVMBuildOr(builder, tmp,
3950 LLVMConstInt(ctx->i32,
3951 S_008F04_STRIDE(stride) |
3952 S_008F04_SWIZZLE_ENABLE(1), 0), "");
3953 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
3954 ring = LLVMBuildInsertElement(builder, ring,
3955 LLVMConstInt(ctx->i32, num_records, 0),
3956 LLVMConstInt(ctx->i32, 2, 0), "");
3957
3958 uint32_t rsrc3 =
3959 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
3960 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3961 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
3962 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
3963 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
3964 S_008F0C_ADD_TID_ENABLE(1);
3965
3966 if (ctx->ac.chip_class >= GFX10) {
3967 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
3968 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
3969 S_008F0C_RESOURCE_LEVEL(1);
3970 } else {
3971 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3972 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
3973 S_008F0C_ELEMENT_SIZE(1); /* element_size = 4 (bytes) */
3974 }
3975
3976 ring = LLVMBuildInsertElement(builder, ring,
3977 LLVMConstInt(ctx->i32, rsrc3, false),
3978 LLVMConstInt(ctx->i32, 3, 0), "");
3979
3980 ctx->gsvs_ring[stream] = ring;
3981 }
3982 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3983 ctx->tess_offchip_ring = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TES);
3984 }
3985 }
3986
3987 static void si_llvm_emit_polygon_stipple(struct si_shader_context *ctx,
3988 LLVMValueRef param_rw_buffers,
3989 struct ac_arg param_pos_fixed_pt)
3990 {
3991 LLVMBuilderRef builder = ctx->ac.builder;
3992 LLVMValueRef slot, desc, offset, row, bit, address[2];
3993
3994 /* Use the fixed-point gl_FragCoord input.
3995 * Since the stipple pattern is 32x32 and it repeats, just get 5 bits
3996 * per coordinate to get the repeating effect.
3997 */
3998 address[0] = si_unpack_param(ctx, param_pos_fixed_pt, 0, 5);
3999 address[1] = si_unpack_param(ctx, param_pos_fixed_pt, 16, 5);
4000
4001 /* Load the buffer descriptor. */
4002 slot = LLVMConstInt(ctx->i32, SI_PS_CONST_POLY_STIPPLE, 0);
4003 desc = ac_build_load_to_sgpr(&ctx->ac, param_rw_buffers, slot);
4004
4005 /* The stipple pattern is 32x32, each row has 32 bits. */
4006 offset = LLVMBuildMul(builder, address[1],
4007 LLVMConstInt(ctx->i32, 4, 0), "");
4008 row = buffer_load_const(ctx, desc, offset);
4009 row = ac_to_integer(&ctx->ac, row);
4010 bit = LLVMBuildLShr(builder, row, address[0], "");
4011 bit = LLVMBuildTrunc(builder, bit, ctx->i1, "");
4012 ac_build_kill_if_false(&ctx->ac, bit);
4013 }
4014
4015 /* For the UMR disassembler. */
4016 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
4017 #define DEBUGGER_NUM_MARKERS 5
4018
4019 static bool si_shader_binary_open(struct si_screen *screen,
4020 struct si_shader *shader,
4021 struct ac_rtld_binary *rtld)
4022 {
4023 const struct si_shader_selector *sel = shader->selector;
4024 const char *part_elfs[5];
4025 size_t part_sizes[5];
4026 unsigned num_parts = 0;
4027
4028 #define add_part(shader_or_part) \
4029 if (shader_or_part) { \
4030 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
4031 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
4032 num_parts++; \
4033 }
4034
4035 add_part(shader->prolog);
4036 add_part(shader->previous_stage);
4037 add_part(shader->prolog2);
4038 add_part(shader);
4039 add_part(shader->epilog);
4040
4041 #undef add_part
4042
4043 struct ac_rtld_symbol lds_symbols[2];
4044 unsigned num_lds_symbols = 0;
4045
4046 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
4047 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
4048 /* We add this symbol even on LLVM <= 8 to ensure that
4049 * shader->config.lds_size is set correctly below.
4050 */
4051 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
4052 sym->name = "esgs_ring";
4053 sym->size = shader->gs_info.esgs_ring_size;
4054 sym->align = 64 * 1024;
4055 }
4056
4057 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
4058 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
4059 sym->name = "ngg_emit";
4060 sym->size = shader->ngg.ngg_emit_size * 4;
4061 sym->align = 4;
4062 }
4063
4064 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
4065 .info = &screen->info,
4066 .options = {
4067 .halt_at_entry = screen->options.halt_shaders,
4068 },
4069 .shader_type = tgsi_processor_to_shader_stage(sel->type),
4070 .wave_size = si_get_shader_wave_size(shader),
4071 .num_parts = num_parts,
4072 .elf_ptrs = part_elfs,
4073 .elf_sizes = part_sizes,
4074 .num_shared_lds_symbols = num_lds_symbols,
4075 .shared_lds_symbols = lds_symbols });
4076
4077 if (rtld->lds_size > 0) {
4078 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
4079 shader->config.lds_size =
4080 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
4081 }
4082
4083 return ok;
4084 }
4085
4086 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
4087 {
4088 struct ac_rtld_binary rtld;
4089 si_shader_binary_open(screen, shader, &rtld);
4090 return rtld.exec_size;
4091 }
4092
4093 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
4094 {
4095 uint64_t *scratch_va = data;
4096
4097 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
4098 *value = (uint32_t)*scratch_va;
4099 return true;
4100 }
4101 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
4102 /* Enable scratch coalescing. */
4103 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
4104 S_008F04_SWIZZLE_ENABLE(1);
4105 return true;
4106 }
4107
4108 return false;
4109 }
4110
4111 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
4112 uint64_t scratch_va)
4113 {
4114 struct ac_rtld_binary binary;
4115 if (!si_shader_binary_open(sscreen, shader, &binary))
4116 return false;
4117
4118 si_resource_reference(&shader->bo, NULL);
4119 shader->bo = si_aligned_buffer_create(&sscreen->b,
4120 sscreen->info.cpdma_prefetch_writes_memory ?
4121 0 : SI_RESOURCE_FLAG_READ_ONLY,
4122 PIPE_USAGE_IMMUTABLE,
4123 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
4124 256);
4125 if (!shader->bo)
4126 return false;
4127
4128 /* Upload. */
4129 struct ac_rtld_upload_info u = {};
4130 u.binary = &binary;
4131 u.get_external_symbol = si_get_external_symbol;
4132 u.cb_data = &scratch_va;
4133 u.rx_va = shader->bo->gpu_address;
4134 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
4135 PIPE_TRANSFER_READ_WRITE |
4136 PIPE_TRANSFER_UNSYNCHRONIZED |
4137 RADEON_TRANSFER_TEMPORARY);
4138 if (!u.rx_ptr)
4139 return false;
4140
4141 bool ok = ac_rtld_upload(&u);
4142
4143 sscreen->ws->buffer_unmap(shader->bo->buf);
4144 ac_rtld_close(&binary);
4145
4146 return ok;
4147 }
4148
4149 static void si_shader_dump_disassembly(struct si_screen *screen,
4150 const struct si_shader_binary *binary,
4151 enum pipe_shader_type shader_type,
4152 unsigned wave_size,
4153 struct pipe_debug_callback *debug,
4154 const char *name, FILE *file)
4155 {
4156 struct ac_rtld_binary rtld_binary;
4157
4158 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
4159 .info = &screen->info,
4160 .shader_type = tgsi_processor_to_shader_stage(shader_type),
4161 .wave_size = wave_size,
4162 .num_parts = 1,
4163 .elf_ptrs = &binary->elf_buffer,
4164 .elf_sizes = &binary->elf_size }))
4165 return;
4166
4167 const char *disasm;
4168 size_t nbytes;
4169
4170 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
4171 goto out;
4172
4173 if (nbytes > INT_MAX)
4174 goto out;
4175
4176 if (debug && debug->debug_message) {
4177 /* Very long debug messages are cut off, so send the
4178 * disassembly one line at a time. This causes more
4179 * overhead, but on the plus side it simplifies
4180 * parsing of resulting logs.
4181 */
4182 pipe_debug_message(debug, SHADER_INFO,
4183 "Shader Disassembly Begin");
4184
4185 uint64_t line = 0;
4186 while (line < nbytes) {
4187 int count = nbytes - line;
4188 const char *nl = memchr(disasm + line, '\n', nbytes - line);
4189 if (nl)
4190 count = nl - (disasm + line);
4191
4192 if (count) {
4193 pipe_debug_message(debug, SHADER_INFO,
4194 "%.*s", count, disasm + line);
4195 }
4196
4197 line += count + 1;
4198 }
4199
4200 pipe_debug_message(debug, SHADER_INFO,
4201 "Shader Disassembly End");
4202 }
4203
4204 if (file) {
4205 fprintf(file, "Shader %s disassembly:\n", name);
4206 fprintf(file, "%*s", (int)nbytes, disasm);
4207 }
4208
4209 out:
4210 ac_rtld_close(&rtld_binary);
4211 }
4212
4213 static void si_calculate_max_simd_waves(struct si_shader *shader)
4214 {
4215 struct si_screen *sscreen = shader->selector->screen;
4216 struct ac_shader_config *conf = &shader->config;
4217 unsigned num_inputs = shader->selector->info.num_inputs;
4218 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
4219 unsigned lds_per_wave = 0;
4220 unsigned max_simd_waves;
4221
4222 max_simd_waves = sscreen->info.max_wave64_per_simd;
4223
4224 /* Compute LDS usage for PS. */
4225 switch (shader->selector->type) {
4226 case PIPE_SHADER_FRAGMENT:
4227 /* The minimum usage per wave is (num_inputs * 48). The maximum
4228 * usage is (num_inputs * 48 * 16).
4229 * We can get anything in between and it varies between waves.
4230 *
4231 * The 48 bytes per input for a single primitive is equal to
4232 * 4 bytes/component * 4 components/input * 3 points.
4233 *
4234 * Other stages don't know the size at compile time or don't
4235 * allocate LDS per wave, but instead they do it per thread group.
4236 */
4237 lds_per_wave = conf->lds_size * lds_increment +
4238 align(num_inputs * 48, lds_increment);
4239 break;
4240 case PIPE_SHADER_COMPUTE:
4241 if (shader->selector) {
4242 unsigned max_workgroup_size =
4243 si_get_max_workgroup_size(shader);
4244 lds_per_wave = (conf->lds_size * lds_increment) /
4245 DIV_ROUND_UP(max_workgroup_size,
4246 sscreen->compute_wave_size);
4247 }
4248 break;
4249 default:;
4250 }
4251
4252 /* Compute the per-SIMD wave counts. */
4253 if (conf->num_sgprs) {
4254 max_simd_waves =
4255 MIN2(max_simd_waves,
4256 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
4257 }
4258
4259 if (conf->num_vgprs) {
4260 /* Always print wave limits as Wave64, so that we can compare
4261 * Wave32 and Wave64 with shader-db fairly. */
4262 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
4263 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
4264 }
4265
4266 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
4267 * 16KB makes some SIMDs unoccupied).
4268 *
4269 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
4270 */
4271 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
4272 unsigned max_lds_per_simd = max_lds_size / 4;
4273 if (lds_per_wave)
4274 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
4275
4276 shader->info.max_simd_waves = max_simd_waves;
4277 }
4278
4279 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
4280 struct si_shader *shader,
4281 struct pipe_debug_callback *debug)
4282 {
4283 const struct ac_shader_config *conf = &shader->config;
4284
4285 if (screen->options.debug_disassembly)
4286 si_shader_dump_disassembly(screen, &shader->binary,
4287 shader->selector->type,
4288 si_get_shader_wave_size(shader),
4289 debug, "main", NULL);
4290
4291 pipe_debug_message(debug, SHADER_INFO,
4292 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
4293 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
4294 "Spilled VGPRs: %d PrivMem VGPRs: %d",
4295 conf->num_sgprs, conf->num_vgprs,
4296 si_get_shader_binary_size(screen, shader),
4297 conf->lds_size, conf->scratch_bytes_per_wave,
4298 shader->info.max_simd_waves, conf->spilled_sgprs,
4299 conf->spilled_vgprs, shader->info.private_mem_vgprs);
4300 }
4301
4302 static void si_shader_dump_stats(struct si_screen *sscreen,
4303 struct si_shader *shader,
4304 FILE *file,
4305 bool check_debug_option)
4306 {
4307 const struct ac_shader_config *conf = &shader->config;
4308
4309 if (!check_debug_option ||
4310 si_can_dump_shader(sscreen, shader->selector->type)) {
4311 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
4312 fprintf(file, "*** SHADER CONFIG ***\n"
4313 "SPI_PS_INPUT_ADDR = 0x%04x\n"
4314 "SPI_PS_INPUT_ENA = 0x%04x\n",
4315 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
4316 }
4317
4318 fprintf(file, "*** SHADER STATS ***\n"
4319 "SGPRS: %d\n"
4320 "VGPRS: %d\n"
4321 "Spilled SGPRs: %d\n"
4322 "Spilled VGPRs: %d\n"
4323 "Private memory VGPRs: %d\n"
4324 "Code Size: %d bytes\n"
4325 "LDS: %d blocks\n"
4326 "Scratch: %d bytes per wave\n"
4327 "Max Waves: %d\n"
4328 "********************\n\n\n",
4329 conf->num_sgprs, conf->num_vgprs,
4330 conf->spilled_sgprs, conf->spilled_vgprs,
4331 shader->info.private_mem_vgprs,
4332 si_get_shader_binary_size(sscreen, shader),
4333 conf->lds_size, conf->scratch_bytes_per_wave,
4334 shader->info.max_simd_waves);
4335 }
4336 }
4337
4338 const char *si_get_shader_name(const struct si_shader *shader)
4339 {
4340 switch (shader->selector->type) {
4341 case PIPE_SHADER_VERTEX:
4342 if (shader->key.as_es)
4343 return "Vertex Shader as ES";
4344 else if (shader->key.as_ls)
4345 return "Vertex Shader as LS";
4346 else if (shader->key.opt.vs_as_prim_discard_cs)
4347 return "Vertex Shader as Primitive Discard CS";
4348 else if (shader->key.as_ngg)
4349 return "Vertex Shader as ESGS";
4350 else
4351 return "Vertex Shader as VS";
4352 case PIPE_SHADER_TESS_CTRL:
4353 return "Tessellation Control Shader";
4354 case PIPE_SHADER_TESS_EVAL:
4355 if (shader->key.as_es)
4356 return "Tessellation Evaluation Shader as ES";
4357 else if (shader->key.as_ngg)
4358 return "Tessellation Evaluation Shader as ESGS";
4359 else
4360 return "Tessellation Evaluation Shader as VS";
4361 case PIPE_SHADER_GEOMETRY:
4362 if (shader->is_gs_copy_shader)
4363 return "GS Copy Shader as VS";
4364 else
4365 return "Geometry Shader";
4366 case PIPE_SHADER_FRAGMENT:
4367 return "Pixel Shader";
4368 case PIPE_SHADER_COMPUTE:
4369 return "Compute Shader";
4370 default:
4371 return "Unknown Shader";
4372 }
4373 }
4374
4375 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
4376 struct pipe_debug_callback *debug,
4377 FILE *file, bool check_debug_option)
4378 {
4379 enum pipe_shader_type shader_type = shader->selector->type;
4380
4381 if (!check_debug_option ||
4382 si_can_dump_shader(sscreen, shader_type))
4383 si_dump_shader_key(shader, file);
4384
4385 if (!check_debug_option && shader->binary.llvm_ir_string) {
4386 if (shader->previous_stage &&
4387 shader->previous_stage->binary.llvm_ir_string) {
4388 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
4389 si_get_shader_name(shader));
4390 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
4391 }
4392
4393 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
4394 si_get_shader_name(shader));
4395 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
4396 }
4397
4398 if (!check_debug_option ||
4399 (si_can_dump_shader(sscreen, shader_type) &&
4400 !(sscreen->debug_flags & DBG(NO_ASM)))) {
4401 unsigned wave_size = si_get_shader_wave_size(shader);
4402
4403 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
4404
4405 if (shader->prolog)
4406 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
4407 shader_type, wave_size, debug, "prolog", file);
4408 if (shader->previous_stage)
4409 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
4410 shader_type, wave_size, debug, "previous stage", file);
4411 if (shader->prolog2)
4412 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
4413 shader_type, wave_size, debug, "prolog2", file);
4414
4415 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
4416 wave_size, debug, "main", file);
4417
4418 if (shader->epilog)
4419 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
4420 shader_type, wave_size, debug, "epilog", file);
4421 fprintf(file, "\n");
4422 }
4423
4424 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
4425 }
4426
4427 static int si_compile_llvm(struct si_screen *sscreen,
4428 struct si_shader_binary *binary,
4429 struct ac_shader_config *conf,
4430 struct ac_llvm_compiler *compiler,
4431 LLVMModuleRef mod,
4432 struct pipe_debug_callback *debug,
4433 enum pipe_shader_type shader_type,
4434 unsigned wave_size,
4435 const char *name,
4436 bool less_optimized)
4437 {
4438 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
4439
4440 if (si_can_dump_shader(sscreen, shader_type)) {
4441 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
4442
4443 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
4444 fprintf(stderr, "%s LLVM IR:\n\n", name);
4445 ac_dump_module(mod);
4446 fprintf(stderr, "\n");
4447 }
4448 }
4449
4450 if (sscreen->record_llvm_ir) {
4451 char *ir = LLVMPrintModuleToString(mod);
4452 binary->llvm_ir_string = strdup(ir);
4453 LLVMDisposeMessage(ir);
4454 }
4455
4456 if (!si_replace_shader(count, binary)) {
4457 unsigned r = si_llvm_compile(mod, binary, compiler, debug,
4458 less_optimized, wave_size);
4459 if (r)
4460 return r;
4461 }
4462
4463 struct ac_rtld_binary rtld;
4464 if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
4465 .info = &sscreen->info,
4466 .shader_type = tgsi_processor_to_shader_stage(shader_type),
4467 .wave_size = wave_size,
4468 .num_parts = 1,
4469 .elf_ptrs = &binary->elf_buffer,
4470 .elf_sizes = &binary->elf_size }))
4471 return -1;
4472
4473 bool ok = ac_rtld_read_config(&rtld, conf);
4474 ac_rtld_close(&rtld);
4475 if (!ok)
4476 return -1;
4477
4478 /* Enable 64-bit and 16-bit denormals, because there is no performance
4479 * cost.
4480 *
4481 * If denormals are enabled, all floating-point output modifiers are
4482 * ignored.
4483 *
4484 * Don't enable denormals for 32-bit floats, because:
4485 * - Floating-point output modifiers would be ignored by the hw.
4486 * - Some opcodes don't support denormals, such as v_mad_f32. We would
4487 * have to stop using those.
4488 * - GFX6 & GFX7 would be very slow.
4489 */
4490 conf->float_mode |= V_00B028_FP_64_DENORMS;
4491
4492 return 0;
4493 }
4494
4495 static void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
4496 {
4497 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
4498 LLVMBuildRetVoid(ctx->ac.builder);
4499 else
4500 LLVMBuildRet(ctx->ac.builder, ret);
4501 }
4502
4503 /* Generate code for the hardware VS shader stage to go with a geometry shader */
4504 struct si_shader *
4505 si_generate_gs_copy_shader(struct si_screen *sscreen,
4506 struct ac_llvm_compiler *compiler,
4507 struct si_shader_selector *gs_selector,
4508 struct pipe_debug_callback *debug)
4509 {
4510 struct si_shader_context ctx;
4511 struct si_shader *shader;
4512 LLVMBuilderRef builder;
4513 struct si_shader_output_values outputs[SI_MAX_VS_OUTPUTS];
4514 struct tgsi_shader_info *gsinfo = &gs_selector->info;
4515 int i;
4516
4517
4518 shader = CALLOC_STRUCT(si_shader);
4519 if (!shader)
4520 return NULL;
4521
4522 /* We can leave the fence as permanently signaled because the GS copy
4523 * shader only becomes visible globally after it has been compiled. */
4524 util_queue_fence_init(&shader->ready);
4525
4526 shader->selector = gs_selector;
4527 shader->is_gs_copy_shader = true;
4528
4529 si_llvm_context_init(&ctx, sscreen, compiler,
4530 si_get_wave_size(sscreen, PIPE_SHADER_VERTEX, false, false),
4531 64);
4532 ctx.shader = shader;
4533 ctx.type = PIPE_SHADER_VERTEX;
4534
4535 builder = ctx.ac.builder;
4536
4537 create_function(&ctx);
4538 preload_ring_buffers(&ctx);
4539
4540 LLVMValueRef voffset =
4541 LLVMBuildMul(ctx.ac.builder, ctx.abi.vertex_id,
4542 LLVMConstInt(ctx.i32, 4, 0), "");
4543
4544 /* Fetch the vertex stream ID.*/
4545 LLVMValueRef stream_id;
4546
4547 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs)
4548 stream_id = si_unpack_param(&ctx, ctx.streamout_config, 24, 2);
4549 else
4550 stream_id = ctx.i32_0;
4551
4552 /* Fill in output information. */
4553 for (i = 0; i < gsinfo->num_outputs; ++i) {
4554 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
4555 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
4556
4557 for (int chan = 0; chan < 4; chan++) {
4558 outputs[i].vertex_stream[chan] =
4559 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
4560 }
4561 }
4562
4563 LLVMBasicBlockRef end_bb;
4564 LLVMValueRef switch_inst;
4565
4566 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
4567 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
4568
4569 for (int stream = 0; stream < 4; stream++) {
4570 LLVMBasicBlockRef bb;
4571 unsigned offset;
4572
4573 if (!gsinfo->num_stream_output_components[stream])
4574 continue;
4575
4576 if (stream > 0 && !gs_selector->so.num_outputs)
4577 continue;
4578
4579 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
4580 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
4581 LLVMPositionBuilderAtEnd(builder, bb);
4582
4583 /* Fetch vertex data from GSVS ring */
4584 offset = 0;
4585 for (i = 0; i < gsinfo->num_outputs; ++i) {
4586 for (unsigned chan = 0; chan < 4; chan++) {
4587 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
4588 outputs[i].vertex_stream[chan] != stream) {
4589 outputs[i].values[chan] = LLVMGetUndef(ctx.f32);
4590 continue;
4591 }
4592
4593 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
4594 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
4595 offset++;
4596
4597 outputs[i].values[chan] =
4598 ac_build_buffer_load(&ctx.ac,
4599 ctx.gsvs_ring[0], 1,
4600 ctx.i32_0, voffset,
4601 soffset, 0, ac_glc | ac_slc,
4602 true, false);
4603 }
4604 }
4605
4606 /* Streamout and exports. */
4607 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs) {
4608 si_llvm_emit_streamout(&ctx, outputs,
4609 gsinfo->num_outputs,
4610 stream);
4611 }
4612
4613 if (stream == 0)
4614 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
4615
4616 LLVMBuildBr(builder, end_bb);
4617 }
4618
4619 LLVMPositionBuilderAtEnd(builder, end_bb);
4620
4621 LLVMBuildRetVoid(ctx.ac.builder);
4622
4623 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
4624 si_llvm_optimize_module(&ctx);
4625
4626 bool ok = false;
4627 if (si_compile_llvm(sscreen, &ctx.shader->binary,
4628 &ctx.shader->config, ctx.compiler,
4629 ctx.ac.module,
4630 debug, PIPE_SHADER_GEOMETRY, ctx.ac.wave_size,
4631 "GS Copy Shader", false) == 0) {
4632 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
4633 fprintf(stderr, "GS Copy Shader:\n");
4634 si_shader_dump(sscreen, ctx.shader, debug, stderr, true);
4635
4636 if (!ctx.shader->config.scratch_bytes_per_wave)
4637 ok = si_shader_binary_upload(sscreen, ctx.shader, 0);
4638 else
4639 ok = true;
4640 }
4641
4642 si_llvm_dispose(&ctx);
4643
4644 if (!ok) {
4645 FREE(shader);
4646 shader = NULL;
4647 } else {
4648 si_fix_resource_usage(sscreen, shader);
4649 }
4650 return shader;
4651 }
4652
4653 static void si_dump_shader_key_vs(const struct si_shader_key *key,
4654 const struct si_vs_prolog_bits *prolog,
4655 const char *prefix, FILE *f)
4656 {
4657 fprintf(f, " %s.instance_divisor_is_one = %u\n",
4658 prefix, prolog->instance_divisor_is_one);
4659 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
4660 prefix, prolog->instance_divisor_is_fetched);
4661 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
4662 prefix, prolog->unpack_instance_id_from_vertex_id);
4663 fprintf(f, " %s.ls_vgpr_fix = %u\n",
4664 prefix, prolog->ls_vgpr_fix);
4665
4666 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
4667 fprintf(f, " mono.vs.fix_fetch = {");
4668 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
4669 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
4670 if (i)
4671 fprintf(f, ", ");
4672 if (!fix.bits)
4673 fprintf(f, "0");
4674 else
4675 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
4676 fix.u.num_channels_m1, fix.u.format);
4677 }
4678 fprintf(f, "}\n");
4679 }
4680
4681 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
4682 {
4683 const struct si_shader_key *key = &shader->key;
4684 enum pipe_shader_type shader_type = shader->selector->type;
4685
4686 fprintf(f, "SHADER KEY\n");
4687
4688 switch (shader_type) {
4689 case PIPE_SHADER_VERTEX:
4690 si_dump_shader_key_vs(key, &key->part.vs.prolog,
4691 "part.vs.prolog", f);
4692 fprintf(f, " as_es = %u\n", key->as_es);
4693 fprintf(f, " as_ls = %u\n", key->as_ls);
4694 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4695 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4696 key->mono.u.vs_export_prim_id);
4697 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
4698 key->opt.vs_as_prim_discard_cs);
4699 fprintf(f, " opt.cs_prim_type = %s\n",
4700 tgsi_primitive_names[key->opt.cs_prim_type]);
4701 fprintf(f, " opt.cs_indexed = %u\n",
4702 key->opt.cs_indexed);
4703 fprintf(f, " opt.cs_instancing = %u\n",
4704 key->opt.cs_instancing);
4705 fprintf(f, " opt.cs_primitive_restart = %u\n",
4706 key->opt.cs_primitive_restart);
4707 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
4708 key->opt.cs_provoking_vertex_first);
4709 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
4710 key->opt.cs_need_correct_orientation);
4711 fprintf(f, " opt.cs_cull_front = %u\n",
4712 key->opt.cs_cull_front);
4713 fprintf(f, " opt.cs_cull_back = %u\n",
4714 key->opt.cs_cull_back);
4715 fprintf(f, " opt.cs_cull_z = %u\n",
4716 key->opt.cs_cull_z);
4717 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
4718 key->opt.cs_halfz_clip_space);
4719 break;
4720
4721 case PIPE_SHADER_TESS_CTRL:
4722 if (shader->selector->screen->info.chip_class >= GFX9) {
4723 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
4724 "part.tcs.ls_prolog", f);
4725 }
4726 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
4727 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
4728 break;
4729
4730 case PIPE_SHADER_TESS_EVAL:
4731 fprintf(f, " as_es = %u\n", key->as_es);
4732 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4733 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4734 key->mono.u.vs_export_prim_id);
4735 break;
4736
4737 case PIPE_SHADER_GEOMETRY:
4738 if (shader->is_gs_copy_shader)
4739 break;
4740
4741 if (shader->selector->screen->info.chip_class >= GFX9 &&
4742 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
4743 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
4744 "part.gs.vs_prolog", f);
4745 }
4746 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
4747 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
4748 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4749 break;
4750
4751 case PIPE_SHADER_COMPUTE:
4752 break;
4753
4754 case PIPE_SHADER_FRAGMENT:
4755 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
4756 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
4757 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
4758 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
4759 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
4760 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
4761 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
4762 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
4763 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
4764 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
4765 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
4766 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
4767 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
4768 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
4769 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
4770 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
4771 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
4772 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
4773 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
4774 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
4775 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
4776 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
4777 break;
4778
4779 default:
4780 assert(0);
4781 }
4782
4783 if ((shader_type == PIPE_SHADER_GEOMETRY ||
4784 shader_type == PIPE_SHADER_TESS_EVAL ||
4785 shader_type == PIPE_SHADER_VERTEX) &&
4786 !key->as_es && !key->as_ls) {
4787 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
4788 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
4789 }
4790 }
4791
4792 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
4793 {
4794 struct si_shader *shader = ctx->shader;
4795 struct tgsi_shader_info *info = &shader->selector->info;
4796
4797 if ((ctx->type != PIPE_SHADER_VERTEX &&
4798 ctx->type != PIPE_SHADER_TESS_EVAL) ||
4799 shader->key.as_ls ||
4800 shader->key.as_es)
4801 return;
4802
4803 ac_optimize_vs_outputs(&ctx->ac,
4804 ctx->main_fn,
4805 shader->info.vs_output_param_offset,
4806 info->num_outputs,
4807 &shader->info.nr_param_exports);
4808 }
4809
4810 static void si_init_exec_from_input(struct si_shader_context *ctx,
4811 struct ac_arg param, unsigned bitoffset)
4812 {
4813 LLVMValueRef args[] = {
4814 ac_get_arg(&ctx->ac, param),
4815 LLVMConstInt(ctx->i32, bitoffset, 0),
4816 };
4817 ac_build_intrinsic(&ctx->ac,
4818 "llvm.amdgcn.init.exec.from.input",
4819 ctx->voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
4820 }
4821
4822 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
4823 const struct si_vs_prolog_bits *key)
4824 {
4825 /* VGPR initialization fixup for Vega10 and Raven is always done in the
4826 * VS prolog. */
4827 return sel->vs_needs_prolog ||
4828 key->ls_vgpr_fix ||
4829 key->unpack_instance_id_from_vertex_id;
4830 }
4831
4832 LLVMValueRef si_is_es_thread(struct si_shader_context *ctx)
4833 {
4834 /* Return true if the current thread should execute an ES thread. */
4835 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4836 ac_get_thread_id(&ctx->ac),
4837 si_unpack_param(ctx, ctx->merged_wave_info, 0, 8), "");
4838 }
4839
4840 LLVMValueRef si_is_gs_thread(struct si_shader_context *ctx)
4841 {
4842 /* Return true if the current thread should execute a GS thread. */
4843 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4844 ac_get_thread_id(&ctx->ac),
4845 si_unpack_param(ctx, ctx->merged_wave_info, 8, 8), "");
4846 }
4847
4848 static void si_llvm_emit_kill(struct ac_shader_abi *abi, LLVMValueRef visible)
4849 {
4850 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
4851 LLVMBuilderRef builder = ctx->ac.builder;
4852
4853 if (ctx->shader->selector->force_correct_derivs_after_kill) {
4854 /* Kill immediately while maintaining WQM. */
4855 ac_build_kill_if_false(&ctx->ac,
4856 ac_build_wqm_vote(&ctx->ac, visible));
4857
4858 LLVMValueRef mask = LLVMBuildLoad(builder, ctx->postponed_kill, "");
4859 mask = LLVMBuildAnd(builder, mask, visible, "");
4860 LLVMBuildStore(builder, mask, ctx->postponed_kill);
4861 return;
4862 }
4863
4864 ac_build_kill_if_false(&ctx->ac, visible);
4865 }
4866
4867 static bool si_compile_tgsi_main(struct si_shader_context *ctx,
4868 struct nir_shader *nir, bool free_nir)
4869 {
4870 struct si_shader *shader = ctx->shader;
4871 struct si_shader_selector *sel = shader->selector;
4872
4873 // TODO clean all this up!
4874 switch (ctx->type) {
4875 case PIPE_SHADER_VERTEX:
4876 if (shader->key.as_ls)
4877 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
4878 else if (shader->key.as_es)
4879 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4880 else if (shader->key.opt.vs_as_prim_discard_cs)
4881 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
4882 else if (shader->key.as_ngg)
4883 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4884 else
4885 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4886 ctx->abi.load_base_vertex = get_base_vertex;
4887 break;
4888 case PIPE_SHADER_TESS_CTRL:
4889 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
4890 ctx->abi.load_tess_level = si_load_tess_level;
4891 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
4892 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
4893 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4894 break;
4895 case PIPE_SHADER_TESS_EVAL:
4896 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
4897 ctx->abi.load_tess_coord = si_load_tess_coord;
4898 ctx->abi.load_tess_level = si_load_tess_level;
4899 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4900 if (shader->key.as_es)
4901 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4902 else if (shader->key.as_ngg)
4903 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4904 else
4905 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4906 break;
4907 case PIPE_SHADER_GEOMETRY:
4908 ctx->abi.load_inputs = si_nir_load_input_gs;
4909 ctx->abi.emit_vertex = si_llvm_emit_vertex;
4910 ctx->abi.emit_primitive = si_llvm_emit_primitive;
4911 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
4912 break;
4913 case PIPE_SHADER_FRAGMENT:
4914 ctx->abi.emit_outputs = si_llvm_return_fs_outputs;
4915 ctx->abi.load_sample_position = load_sample_position;
4916 ctx->abi.load_sample_mask_in = load_sample_mask_in;
4917 ctx->abi.emit_fbfetch = si_nir_emit_fbfetch;
4918 ctx->abi.emit_kill = si_llvm_emit_kill;
4919 break;
4920 case PIPE_SHADER_COMPUTE:
4921 ctx->abi.load_local_group_size = get_block_size;
4922 break;
4923 default:
4924 assert(!"Unsupported shader type");
4925 return false;
4926 }
4927
4928 ctx->abi.load_ubo = load_ubo;
4929 ctx->abi.load_ssbo = load_ssbo;
4930
4931 create_function(ctx);
4932 preload_ring_buffers(ctx);
4933
4934 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
4935 sel->tcs_info.tessfactors_are_def_in_all_invocs) {
4936 for (unsigned i = 0; i < 6; i++) {
4937 ctx->invoc0_tess_factors[i] =
4938 ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
4939 }
4940 }
4941
4942 if (ctx->type == PIPE_SHADER_GEOMETRY) {
4943 for (unsigned i = 0; i < 4; i++) {
4944 ctx->gs_next_vertex[i] =
4945 ac_build_alloca(&ctx->ac, ctx->i32, "");
4946 }
4947 if (shader->key.as_ngg) {
4948 for (unsigned i = 0; i < 4; ++i) {
4949 ctx->gs_curprim_verts[i] =
4950 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4951 ctx->gs_generated_prims[i] =
4952 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4953 }
4954
4955 unsigned scratch_size = 8;
4956 if (sel->so.num_outputs)
4957 scratch_size = 44;
4958
4959 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
4960 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4961 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4962 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
4963 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4964
4965 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4966 LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
4967 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
4968 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
4969 }
4970 }
4971
4972 if (ctx->type != PIPE_SHADER_GEOMETRY &&
4973 (shader->key.as_ngg && !shader->key.as_es)) {
4974 /* Unconditionally declare scratch space base for streamout and
4975 * vertex compaction. Whether space is actually allocated is
4976 * determined during linking / PM4 creation.
4977 *
4978 * Add an extra dword per vertex to ensure an odd stride, which
4979 * avoids bank conflicts for SoA accesses.
4980 */
4981 if (!gfx10_is_ngg_passthrough(shader))
4982 declare_esgs_ring(ctx);
4983
4984 /* This is really only needed when streamout and / or vertex
4985 * compaction is enabled.
4986 */
4987 if (sel->so.num_outputs && !ctx->gs_ngg_scratch) {
4988 LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
4989 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4990 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4991 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
4992 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4993 }
4994 }
4995
4996 /* For GFX9 merged shaders:
4997 * - Set EXEC for the first shader. If the prolog is present, set
4998 * EXEC there instead.
4999 * - Add a barrier before the second shader.
5000 * - In the second shader, reset EXEC to ~0 and wrap the main part in
5001 * an if-statement. This is required for correctness in geometry
5002 * shaders, to ensure that empty GS waves do not send GS_EMIT and
5003 * GS_CUT messages.
5004 *
5005 * For monolithic merged shaders, the first shader is wrapped in an
5006 * if-block together with its prolog in si_build_wrapper_function.
5007 *
5008 * NGG vertex and tess eval shaders running as the last
5009 * vertex/geometry stage handle execution explicitly using
5010 * if-statements.
5011 */
5012 if (ctx->screen->info.chip_class >= GFX9) {
5013 if (!shader->is_monolithic &&
5014 sel->info.num_instructions > 1 && /* not empty shader */
5015 (shader->key.as_es || shader->key.as_ls) &&
5016 (ctx->type == PIPE_SHADER_TESS_EVAL ||
5017 (ctx->type == PIPE_SHADER_VERTEX &&
5018 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
5019 si_init_exec_from_input(ctx,
5020 ctx->merged_wave_info, 0);
5021 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
5022 ctx->type == PIPE_SHADER_GEOMETRY ||
5023 (shader->key.as_ngg && !shader->key.as_es)) {
5024 LLVMValueRef thread_enabled;
5025 bool nested_barrier;
5026
5027 if (!shader->is_monolithic ||
5028 (ctx->type == PIPE_SHADER_TESS_EVAL &&
5029 (shader->key.as_ngg && !shader->key.as_es)))
5030 ac_init_exec_full_mask(&ctx->ac);
5031
5032 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
5033 ctx->type == PIPE_SHADER_GEOMETRY) {
5034 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
5035 gfx10_ngg_gs_emit_prologue(ctx);
5036 nested_barrier = false;
5037 } else {
5038 nested_barrier = true;
5039 }
5040
5041 thread_enabled = si_is_gs_thread(ctx);
5042 } else {
5043 thread_enabled = si_is_es_thread(ctx);
5044 nested_barrier = false;
5045 }
5046
5047 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
5048 ctx->merged_wrap_if_label = 11500;
5049 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
5050
5051 if (nested_barrier) {
5052 /* Execute a barrier before the second shader in
5053 * a merged shader.
5054 *
5055 * Execute the barrier inside the conditional block,
5056 * so that empty waves can jump directly to s_endpgm,
5057 * which will also signal the barrier.
5058 *
5059 * This is possible in gfx9, because an empty wave
5060 * for the second shader does not participate in
5061 * the epilogue. With NGG, empty waves may still
5062 * be required to export data (e.g. GS output vertices),
5063 * so we cannot let them exit early.
5064 *
5065 * If the shader is TCS and the TCS epilog is present
5066 * and contains a barrier, it will wait there and then
5067 * reach s_endpgm.
5068 */
5069 si_llvm_emit_barrier(ctx);
5070 }
5071 }
5072 }
5073
5074 if (sel->force_correct_derivs_after_kill) {
5075 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
5076 /* true = don't kill. */
5077 LLVMBuildStore(ctx->ac.builder, ctx->i1true,
5078 ctx->postponed_kill);
5079 }
5080
5081 bool success = si_nir_build_llvm(ctx, nir);
5082 if (free_nir)
5083 ralloc_free(nir);
5084 if (!success) {
5085 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
5086 return false;
5087 }
5088
5089 si_llvm_build_ret(ctx, ctx->return_value);
5090 return true;
5091 }
5092
5093 /**
5094 * Compute the VS prolog key, which contains all the information needed to
5095 * build the VS prolog function, and set shader->info bits where needed.
5096 *
5097 * \param info Shader info of the vertex shader.
5098 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
5099 * \param prolog_key Key of the VS prolog
5100 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
5101 * \param key Output shader part key.
5102 */
5103 static void si_get_vs_prolog_key(const struct tgsi_shader_info *info,
5104 unsigned num_input_sgprs,
5105 const struct si_vs_prolog_bits *prolog_key,
5106 struct si_shader *shader_out,
5107 union si_shader_part_key *key)
5108 {
5109 memset(key, 0, sizeof(*key));
5110 key->vs_prolog.states = *prolog_key;
5111 key->vs_prolog.num_input_sgprs = num_input_sgprs;
5112 key->vs_prolog.num_inputs = info->num_inputs;
5113 key->vs_prolog.as_ls = shader_out->key.as_ls;
5114 key->vs_prolog.as_es = shader_out->key.as_es;
5115 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
5116
5117 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
5118 key->vs_prolog.as_ls = 1;
5119 key->vs_prolog.num_merged_next_stage_vgprs = 2;
5120 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
5121 key->vs_prolog.as_es = 1;
5122 key->vs_prolog.num_merged_next_stage_vgprs = 5;
5123 } else if (shader_out->key.as_ngg) {
5124 key->vs_prolog.num_merged_next_stage_vgprs = 5;
5125 }
5126
5127 /* Enable loading the InstanceID VGPR. */
5128 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
5129
5130 if ((key->vs_prolog.states.instance_divisor_is_one |
5131 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
5132 shader_out->info.uses_instanceid = true;
5133 }
5134
5135 /**
5136 * Compute the PS prolog key, which contains all the information needed to
5137 * build the PS prolog function, and set related bits in shader->config.
5138 */
5139 static void si_get_ps_prolog_key(struct si_shader *shader,
5140 union si_shader_part_key *key,
5141 bool separate_prolog)
5142 {
5143 struct tgsi_shader_info *info = &shader->selector->info;
5144
5145 memset(key, 0, sizeof(*key));
5146 key->ps_prolog.states = shader->key.part.ps.prolog;
5147 key->ps_prolog.colors_read = info->colors_read;
5148 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5149 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5150 key->ps_prolog.wqm = info->uses_derivatives &&
5151 (key->ps_prolog.colors_read ||
5152 key->ps_prolog.states.force_persp_sample_interp ||
5153 key->ps_prolog.states.force_linear_sample_interp ||
5154 key->ps_prolog.states.force_persp_center_interp ||
5155 key->ps_prolog.states.force_linear_center_interp ||
5156 key->ps_prolog.states.bc_optimize_for_persp ||
5157 key->ps_prolog.states.bc_optimize_for_linear);
5158 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
5159
5160 if (info->colors_read) {
5161 unsigned *color = shader->selector->color_attr_index;
5162
5163 if (shader->key.part.ps.prolog.color_two_side) {
5164 /* BCOLORs are stored after the last input. */
5165 key->ps_prolog.num_interp_inputs = info->num_inputs;
5166 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5167 if (separate_prolog)
5168 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5169 }
5170
5171 for (unsigned i = 0; i < 2; i++) {
5172 unsigned interp = info->input_interpolate[color[i]];
5173 unsigned location = info->input_interpolate_loc[color[i]];
5174
5175 if (!(info->colors_read & (0xf << i*4)))
5176 continue;
5177
5178 key->ps_prolog.color_attr_index[i] = color[i];
5179
5180 if (shader->key.part.ps.prolog.flatshade_colors &&
5181 interp == TGSI_INTERPOLATE_COLOR)
5182 interp = TGSI_INTERPOLATE_CONSTANT;
5183
5184 switch (interp) {
5185 case TGSI_INTERPOLATE_CONSTANT:
5186 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5187 break;
5188 case TGSI_INTERPOLATE_PERSPECTIVE:
5189 case TGSI_INTERPOLATE_COLOR:
5190 /* Force the interpolation location for colors here. */
5191 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5192 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5193 if (shader->key.part.ps.prolog.force_persp_center_interp)
5194 location = TGSI_INTERPOLATE_LOC_CENTER;
5195
5196 switch (location) {
5197 case TGSI_INTERPOLATE_LOC_SAMPLE:
5198 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5199 if (separate_prolog) {
5200 shader->config.spi_ps_input_ena |=
5201 S_0286CC_PERSP_SAMPLE_ENA(1);
5202 }
5203 break;
5204 case TGSI_INTERPOLATE_LOC_CENTER:
5205 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5206 if (separate_prolog) {
5207 shader->config.spi_ps_input_ena |=
5208 S_0286CC_PERSP_CENTER_ENA(1);
5209 }
5210 break;
5211 case TGSI_INTERPOLATE_LOC_CENTROID:
5212 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5213 if (separate_prolog) {
5214 shader->config.spi_ps_input_ena |=
5215 S_0286CC_PERSP_CENTROID_ENA(1);
5216 }
5217 break;
5218 default:
5219 assert(0);
5220 }
5221 break;
5222 case TGSI_INTERPOLATE_LINEAR:
5223 /* Force the interpolation location for colors here. */
5224 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5225 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5226 if (shader->key.part.ps.prolog.force_linear_center_interp)
5227 location = TGSI_INTERPOLATE_LOC_CENTER;
5228
5229 /* The VGPR assignment for non-monolithic shaders
5230 * works because InitialPSInputAddr is set on the
5231 * main shader and PERSP_PULL_MODEL is never used.
5232 */
5233 switch (location) {
5234 case TGSI_INTERPOLATE_LOC_SAMPLE:
5235 key->ps_prolog.color_interp_vgpr_index[i] =
5236 separate_prolog ? 6 : 9;
5237 if (separate_prolog) {
5238 shader->config.spi_ps_input_ena |=
5239 S_0286CC_LINEAR_SAMPLE_ENA(1);
5240 }
5241 break;
5242 case TGSI_INTERPOLATE_LOC_CENTER:
5243 key->ps_prolog.color_interp_vgpr_index[i] =
5244 separate_prolog ? 8 : 11;
5245 if (separate_prolog) {
5246 shader->config.spi_ps_input_ena |=
5247 S_0286CC_LINEAR_CENTER_ENA(1);
5248 }
5249 break;
5250 case TGSI_INTERPOLATE_LOC_CENTROID:
5251 key->ps_prolog.color_interp_vgpr_index[i] =
5252 separate_prolog ? 10 : 13;
5253 if (separate_prolog) {
5254 shader->config.spi_ps_input_ena |=
5255 S_0286CC_LINEAR_CENTROID_ENA(1);
5256 }
5257 break;
5258 default:
5259 assert(0);
5260 }
5261 break;
5262 default:
5263 assert(0);
5264 }
5265 }
5266 }
5267 }
5268
5269 /**
5270 * Check whether a PS prolog is required based on the key.
5271 */
5272 static bool si_need_ps_prolog(const union si_shader_part_key *key)
5273 {
5274 return key->ps_prolog.colors_read ||
5275 key->ps_prolog.states.force_persp_sample_interp ||
5276 key->ps_prolog.states.force_linear_sample_interp ||
5277 key->ps_prolog.states.force_persp_center_interp ||
5278 key->ps_prolog.states.force_linear_center_interp ||
5279 key->ps_prolog.states.bc_optimize_for_persp ||
5280 key->ps_prolog.states.bc_optimize_for_linear ||
5281 key->ps_prolog.states.poly_stipple ||
5282 key->ps_prolog.states.samplemask_log_ps_iter;
5283 }
5284
5285 /**
5286 * Compute the PS epilog key, which contains all the information needed to
5287 * build the PS epilog function.
5288 */
5289 static void si_get_ps_epilog_key(struct si_shader *shader,
5290 union si_shader_part_key *key)
5291 {
5292 struct tgsi_shader_info *info = &shader->selector->info;
5293 memset(key, 0, sizeof(*key));
5294 key->ps_epilog.colors_written = info->colors_written;
5295 key->ps_epilog.writes_z = info->writes_z;
5296 key->ps_epilog.writes_stencil = info->writes_stencil;
5297 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5298 key->ps_epilog.states = shader->key.part.ps.epilog;
5299 }
5300
5301 /**
5302 * Build the GS prolog function. Rotate the input vertices for triangle strips
5303 * with adjacency.
5304 */
5305 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
5306 union si_shader_part_key *key)
5307 {
5308 unsigned num_sgprs, num_vgprs;
5309 LLVMBuilderRef builder = ctx->ac.builder;
5310 LLVMTypeRef returns[48];
5311 LLVMValueRef func, ret;
5312
5313 memset(&ctx->args, 0, sizeof(ctx->args));
5314
5315 if (ctx->screen->info.chip_class >= GFX9) {
5316 if (key->gs_prolog.states.gfx9_prev_is_vs)
5317 num_sgprs = 8 + GFX9_VSGS_NUM_USER_SGPR;
5318 else
5319 num_sgprs = 8 + GFX9_TESGS_NUM_USER_SGPR;
5320 num_vgprs = 5; /* ES inputs are not needed by GS */
5321 } else {
5322 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
5323 num_vgprs = 8;
5324 }
5325
5326 for (unsigned i = 0; i < num_sgprs; ++i) {
5327 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5328 returns[i] = ctx->i32;
5329 }
5330
5331 for (unsigned i = 0; i < num_vgprs; ++i) {
5332 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
5333 returns[num_sgprs + i] = ctx->f32;
5334 }
5335
5336 /* Create the function. */
5337 si_create_function(ctx, "gs_prolog", returns, num_sgprs + num_vgprs,
5338 0);
5339 func = ctx->main_fn;
5340
5341 /* Set the full EXEC mask for the prolog, because we are only fiddling
5342 * with registers here. The main shader part will set the correct EXEC
5343 * mask.
5344 */
5345 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
5346 ac_init_exec_full_mask(&ctx->ac);
5347
5348 /* Copy inputs to outputs. This should be no-op, as the registers match,
5349 * but it will prevent the compiler from overwriting them unintentionally.
5350 */
5351 ret = ctx->return_value;
5352 for (unsigned i = 0; i < num_sgprs; i++) {
5353 LLVMValueRef p = LLVMGetParam(func, i);
5354 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
5355 }
5356 for (unsigned i = 0; i < num_vgprs; i++) {
5357 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
5358 p = ac_to_float(&ctx->ac, p);
5359 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
5360 }
5361
5362 if (key->gs_prolog.states.tri_strip_adj_fix) {
5363 /* Remap the input vertices for every other primitive. */
5364 const struct ac_arg gfx6_vtx_params[6] = {
5365 { .used = true, .arg_index = num_sgprs },
5366 { .used = true, .arg_index = num_sgprs + 1 },
5367 { .used = true, .arg_index = num_sgprs + 3 },
5368 { .used = true, .arg_index = num_sgprs + 4 },
5369 { .used = true, .arg_index = num_sgprs + 5 },
5370 { .used = true, .arg_index = num_sgprs + 6 },
5371 };
5372 const struct ac_arg gfx9_vtx_params[3] = {
5373 { .used = true, .arg_index = num_sgprs },
5374 { .used = true, .arg_index = num_sgprs + 1 },
5375 { .used = true, .arg_index = num_sgprs + 4 },
5376 };
5377 LLVMValueRef vtx_in[6], vtx_out[6];
5378 LLVMValueRef prim_id, rotate;
5379
5380 if (ctx->screen->info.chip_class >= GFX9) {
5381 for (unsigned i = 0; i < 3; i++) {
5382 vtx_in[i*2] = si_unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
5383 vtx_in[i*2+1] = si_unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
5384 }
5385 } else {
5386 for (unsigned i = 0; i < 6; i++)
5387 vtx_in[i] = ac_get_arg(&ctx->ac, gfx6_vtx_params[i]);
5388 }
5389
5390 prim_id = LLVMGetParam(func, num_sgprs + 2);
5391 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
5392
5393 for (unsigned i = 0; i < 6; ++i) {
5394 LLVMValueRef base, rotated;
5395 base = vtx_in[i];
5396 rotated = vtx_in[(i + 4) % 6];
5397 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
5398 }
5399
5400 if (ctx->screen->info.chip_class >= GFX9) {
5401 for (unsigned i = 0; i < 3; i++) {
5402 LLVMValueRef hi, out;
5403
5404 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
5405 LLVMConstInt(ctx->i32, 16, 0), "");
5406 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
5407 out = ac_to_float(&ctx->ac, out);
5408 ret = LLVMBuildInsertValue(builder, ret, out,
5409 gfx9_vtx_params[i].arg_index, "");
5410 }
5411 } else {
5412 for (unsigned i = 0; i < 6; i++) {
5413 LLVMValueRef out;
5414
5415 out = ac_to_float(&ctx->ac, vtx_out[i]);
5416 ret = LLVMBuildInsertValue(builder, ret, out,
5417 gfx6_vtx_params[i].arg_index, "");
5418 }
5419 }
5420 }
5421
5422 LLVMBuildRet(builder, ret);
5423 }
5424
5425 /**
5426 * Given a list of shader part functions, build a wrapper function that
5427 * runs them in sequence to form a monolithic shader.
5428 */
5429 static void si_build_wrapper_function(struct si_shader_context *ctx,
5430 LLVMValueRef *parts,
5431 unsigned num_parts,
5432 unsigned main_part,
5433 unsigned next_shader_first_part)
5434 {
5435 LLVMBuilderRef builder = ctx->ac.builder;
5436 /* PS epilog has one arg per color component; gfx9 merged shader
5437 * prologs need to forward 32 user SGPRs.
5438 */
5439 LLVMValueRef initial[64], out[64];
5440 LLVMTypeRef function_type;
5441 unsigned num_first_params;
5442 unsigned num_out, initial_num_out;
5443 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
5444 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
5445 unsigned num_sgprs, num_vgprs;
5446 unsigned gprs;
5447
5448 memset(&ctx->args, 0, sizeof(ctx->args));
5449
5450 for (unsigned i = 0; i < num_parts; ++i) {
5451 ac_add_function_attr(ctx->ac.context, parts[i], -1,
5452 AC_FUNC_ATTR_ALWAYSINLINE);
5453 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
5454 }
5455
5456 /* The parameters of the wrapper function correspond to those of the
5457 * first part in terms of SGPRs and VGPRs, but we use the types of the
5458 * main part to get the right types. This is relevant for the
5459 * dereferenceable attribute on descriptor table pointers.
5460 */
5461 num_sgprs = 0;
5462 num_vgprs = 0;
5463
5464 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
5465 num_first_params = LLVMCountParamTypes(function_type);
5466
5467 for (unsigned i = 0; i < num_first_params; ++i) {
5468 LLVMValueRef param = LLVMGetParam(parts[0], i);
5469
5470 if (ac_is_sgpr_param(param)) {
5471 assert(num_vgprs == 0);
5472 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
5473 } else {
5474 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
5475 }
5476 }
5477
5478 gprs = 0;
5479 while (gprs < num_sgprs + num_vgprs) {
5480 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
5481 LLVMTypeRef type = LLVMTypeOf(param);
5482 unsigned size = ac_get_type_size(type) / 4;
5483
5484 /* This is going to get casted anyways, so we don't have to
5485 * have the exact same type. But we do have to preserve the
5486 * pointer-ness so that LLVM knows about it.
5487 */
5488 enum ac_arg_type arg_type = AC_ARG_INT;
5489 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
5490 arg_type = AC_ARG_CONST_PTR;
5491 }
5492
5493 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR,
5494 size, arg_type, NULL);
5495
5496 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
5497 assert(gprs + size <= num_sgprs + num_vgprs &&
5498 (gprs >= num_sgprs || gprs + size <= num_sgprs));
5499
5500 gprs += size;
5501 }
5502
5503 /* Prepare the return type. */
5504 unsigned num_returns = 0;
5505 LLVMTypeRef returns[32], last_func_type, return_type;
5506
5507 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
5508 return_type = LLVMGetReturnType(last_func_type);
5509
5510 switch (LLVMGetTypeKind(return_type)) {
5511 case LLVMStructTypeKind:
5512 num_returns = LLVMCountStructElementTypes(return_type);
5513 assert(num_returns <= ARRAY_SIZE(returns));
5514 LLVMGetStructElementTypes(return_type, returns);
5515 break;
5516 case LLVMVoidTypeKind:
5517 break;
5518 default:
5519 unreachable("unexpected type");
5520 }
5521
5522 si_create_function(ctx, "wrapper", returns, num_returns,
5523 si_get_max_workgroup_size(ctx->shader));
5524
5525 if (is_merged_shader(ctx))
5526 ac_init_exec_full_mask(&ctx->ac);
5527
5528 /* Record the arguments of the function as if they were an output of
5529 * a previous part.
5530 */
5531 num_out = 0;
5532 num_out_sgpr = 0;
5533
5534 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
5535 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
5536 LLVMTypeRef param_type = LLVMTypeOf(param);
5537 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->i32 : ctx->f32;
5538 unsigned size = ac_get_type_size(param_type) / 4;
5539
5540 if (size == 1) {
5541 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
5542 param = LLVMBuildPtrToInt(builder, param, ctx->i32, "");
5543 param_type = ctx->i32;
5544 }
5545
5546 if (param_type != out_type)
5547 param = LLVMBuildBitCast(builder, param, out_type, "");
5548 out[num_out++] = param;
5549 } else {
5550 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
5551
5552 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
5553 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
5554 param_type = ctx->i64;
5555 }
5556
5557 if (param_type != vector_type)
5558 param = LLVMBuildBitCast(builder, param, vector_type, "");
5559
5560 for (unsigned j = 0; j < size; ++j)
5561 out[num_out++] = LLVMBuildExtractElement(
5562 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
5563 }
5564
5565 if (ctx->args.args[i].file == AC_ARG_SGPR)
5566 num_out_sgpr = num_out;
5567 }
5568
5569 memcpy(initial, out, sizeof(out));
5570 initial_num_out = num_out;
5571 initial_num_out_sgpr = num_out_sgpr;
5572
5573 /* Now chain the parts. */
5574 LLVMValueRef ret = NULL;
5575 for (unsigned part = 0; part < num_parts; ++part) {
5576 LLVMValueRef in[48];
5577 LLVMTypeRef ret_type;
5578 unsigned out_idx = 0;
5579 unsigned num_params = LLVMCountParams(parts[part]);
5580
5581 /* Merged shaders are executed conditionally depending
5582 * on the number of enabled threads passed in the input SGPRs. */
5583 if (is_multi_part_shader(ctx) && part == 0) {
5584 LLVMValueRef ena, count = initial[3];
5585
5586 count = LLVMBuildAnd(builder, count,
5587 LLVMConstInt(ctx->i32, 0x7f, 0), "");
5588 ena = LLVMBuildICmp(builder, LLVMIntULT,
5589 ac_get_thread_id(&ctx->ac), count, "");
5590 ac_build_ifcc(&ctx->ac, ena, 6506);
5591 }
5592
5593 /* Derive arguments for the next part from outputs of the
5594 * previous one.
5595 */
5596 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
5597 LLVMValueRef param;
5598 LLVMTypeRef param_type;
5599 bool is_sgpr;
5600 unsigned param_size;
5601 LLVMValueRef arg = NULL;
5602
5603 param = LLVMGetParam(parts[part], param_idx);
5604 param_type = LLVMTypeOf(param);
5605 param_size = ac_get_type_size(param_type) / 4;
5606 is_sgpr = ac_is_sgpr_param(param);
5607
5608 if (is_sgpr) {
5609 ac_add_function_attr(ctx->ac.context, parts[part],
5610 param_idx + 1, AC_FUNC_ATTR_INREG);
5611 } else if (out_idx < num_out_sgpr) {
5612 /* Skip returned SGPRs the current part doesn't
5613 * declare on the input. */
5614 out_idx = num_out_sgpr;
5615 }
5616
5617 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
5618
5619 if (param_size == 1)
5620 arg = out[out_idx];
5621 else
5622 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
5623
5624 if (LLVMTypeOf(arg) != param_type) {
5625 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
5626 if (LLVMGetPointerAddressSpace(param_type) ==
5627 AC_ADDR_SPACE_CONST_32BIT) {
5628 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
5629 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
5630 } else {
5631 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
5632 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
5633 }
5634 } else {
5635 arg = LLVMBuildBitCast(builder, arg, param_type, "");
5636 }
5637 }
5638
5639 in[param_idx] = arg;
5640 out_idx += param_size;
5641 }
5642
5643 ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
5644
5645 if (is_multi_part_shader(ctx) &&
5646 part + 1 == next_shader_first_part) {
5647 ac_build_endif(&ctx->ac, 6506);
5648
5649 /* The second half of the merged shader should use
5650 * the inputs from the toplevel (wrapper) function,
5651 * not the return value from the last call.
5652 *
5653 * That's because the last call was executed condi-
5654 * tionally, so we can't consume it in the main
5655 * block.
5656 */
5657 memcpy(out, initial, sizeof(initial));
5658 num_out = initial_num_out;
5659 num_out_sgpr = initial_num_out_sgpr;
5660 continue;
5661 }
5662
5663 /* Extract the returned GPRs. */
5664 ret_type = LLVMTypeOf(ret);
5665 num_out = 0;
5666 num_out_sgpr = 0;
5667
5668 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
5669 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
5670
5671 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
5672
5673 for (unsigned i = 0; i < ret_size; ++i) {
5674 LLVMValueRef val =
5675 LLVMBuildExtractValue(builder, ret, i, "");
5676
5677 assert(num_out < ARRAY_SIZE(out));
5678 out[num_out++] = val;
5679
5680 if (LLVMTypeOf(val) == ctx->i32) {
5681 assert(num_out_sgpr + 1 == num_out);
5682 num_out_sgpr = num_out;
5683 }
5684 }
5685 }
5686 }
5687
5688 /* Return the value from the last part. */
5689 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
5690 LLVMBuildRetVoid(builder);
5691 else
5692 LLVMBuildRet(builder, ret);
5693 }
5694
5695 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
5696 struct si_shader_selector *sel)
5697 {
5698 if (!compiler->low_opt_passes)
5699 return false;
5700
5701 /* Assume a slow CPU. */
5702 assert(!sel->screen->info.has_dedicated_vram &&
5703 sel->screen->info.chip_class <= GFX8);
5704
5705 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
5706 * buffer stores. */
5707 return sel->type == PIPE_SHADER_COMPUTE &&
5708 sel->info.num_memory_instructions > 1000;
5709 }
5710
5711 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
5712 bool *free_nir)
5713 {
5714 *free_nir = false;
5715
5716 if (sel->nir) {
5717 return sel->nir;
5718 } else if (sel->nir_binary) {
5719 struct pipe_screen *screen = &sel->screen->b;
5720 const void *options =
5721 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
5722 sel->type);
5723
5724 struct blob_reader blob_reader;
5725 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
5726 *free_nir = true;
5727 return nir_deserialize(NULL, options, &blob_reader);
5728 }
5729 return NULL;
5730 }
5731
5732 int si_compile_shader(struct si_screen *sscreen,
5733 struct ac_llvm_compiler *compiler,
5734 struct si_shader *shader,
5735 struct pipe_debug_callback *debug)
5736 {
5737 struct si_shader_selector *sel = shader->selector;
5738 struct si_shader_context ctx;
5739 bool free_nir;
5740 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
5741 int r = -1;
5742
5743 /* Dump TGSI code before doing TGSI->LLVM conversion in case the
5744 * conversion fails. */
5745 if (si_can_dump_shader(sscreen, sel->type) &&
5746 !(sscreen->debug_flags & DBG(NO_TGSI))) {
5747 nir_print_shader(nir, stderr);
5748 si_dump_streamout(&sel->so);
5749 }
5750
5751 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader), 64);
5752 si_llvm_context_set_ir(&ctx, shader);
5753
5754 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
5755 sizeof(shader->info.vs_output_param_offset));
5756
5757 shader->info.uses_instanceid = sel->info.uses_instanceid;
5758
5759 if (!si_compile_tgsi_main(&ctx, nir, free_nir)) {
5760 si_llvm_dispose(&ctx);
5761 return -1;
5762 }
5763
5764 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
5765 LLVMValueRef parts[2];
5766 bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog);
5767
5768 parts[1] = ctx.main_fn;
5769
5770 if (need_prolog) {
5771 union si_shader_part_key prolog_key;
5772 si_get_vs_prolog_key(&sel->info,
5773 shader->info.num_input_sgprs,
5774 &shader->key.part.vs.prolog,
5775 shader, &prolog_key);
5776 prolog_key.vs_prolog.is_monolithic = true;
5777 si_build_vs_prolog_function(&ctx, &prolog_key);
5778 parts[0] = ctx.main_fn;
5779 }
5780
5781 si_build_wrapper_function(&ctx, parts + !need_prolog,
5782 1 + need_prolog, need_prolog, 0);
5783
5784 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
5785 si_build_prim_discard_compute_shader(&ctx);
5786 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
5787 if (sscreen->info.chip_class >= GFX9) {
5788 struct si_shader_selector *ls = shader->key.part.tcs.ls;
5789 LLVMValueRef parts[4];
5790 bool vs_needs_prolog =
5791 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
5792
5793 /* TCS main part */
5794 parts[2] = ctx.main_fn;
5795
5796 /* TCS epilog */
5797 union si_shader_part_key tcs_epilog_key;
5798 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
5799 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5800 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
5801 parts[3] = ctx.main_fn;
5802
5803 /* VS as LS main part */
5804 nir = get_nir_shader(ls, &free_nir);
5805 struct si_shader shader_ls = {};
5806 shader_ls.selector = ls;
5807 shader_ls.key.as_ls = 1;
5808 shader_ls.key.mono = shader->key.mono;
5809 shader_ls.key.opt = shader->key.opt;
5810 shader_ls.is_monolithic = true;
5811 si_llvm_context_set_ir(&ctx, &shader_ls);
5812
5813 if (!si_compile_tgsi_main(&ctx, nir, free_nir)) {
5814 si_llvm_dispose(&ctx);
5815 return -1;
5816 }
5817 shader->info.uses_instanceid |= ls->info.uses_instanceid;
5818 parts[1] = ctx.main_fn;
5819
5820 /* LS prolog */
5821 if (vs_needs_prolog) {
5822 union si_shader_part_key vs_prolog_key;
5823 si_get_vs_prolog_key(&ls->info,
5824 shader_ls.info.num_input_sgprs,
5825 &shader->key.part.tcs.ls_prolog,
5826 shader, &vs_prolog_key);
5827 vs_prolog_key.vs_prolog.is_monolithic = true;
5828 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5829 parts[0] = ctx.main_fn;
5830 }
5831
5832 /* Reset the shader context. */
5833 ctx.shader = shader;
5834 ctx.type = PIPE_SHADER_TESS_CTRL;
5835
5836 si_build_wrapper_function(&ctx,
5837 parts + !vs_needs_prolog,
5838 4 - !vs_needs_prolog, vs_needs_prolog,
5839 vs_needs_prolog ? 2 : 1);
5840 } else {
5841 LLVMValueRef parts[2];
5842 union si_shader_part_key epilog_key;
5843
5844 parts[0] = ctx.main_fn;
5845
5846 memset(&epilog_key, 0, sizeof(epilog_key));
5847 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5848 si_build_tcs_epilog_function(&ctx, &epilog_key);
5849 parts[1] = ctx.main_fn;
5850
5851 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
5852 }
5853 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
5854 if (ctx.screen->info.chip_class >= GFX9) {
5855 struct si_shader_selector *es = shader->key.part.gs.es;
5856 LLVMValueRef es_prolog = NULL;
5857 LLVMValueRef es_main = NULL;
5858 LLVMValueRef gs_prolog = NULL;
5859 LLVMValueRef gs_main = ctx.main_fn;
5860
5861 /* GS prolog */
5862 union si_shader_part_key gs_prolog_key;
5863 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
5864 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5865 gs_prolog_key.gs_prolog.is_monolithic = true;
5866 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
5867 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
5868 gs_prolog = ctx.main_fn;
5869
5870 /* ES main part */
5871 nir = get_nir_shader(es, &free_nir);
5872 struct si_shader shader_es = {};
5873 shader_es.selector = es;
5874 shader_es.key.as_es = 1;
5875 shader_es.key.as_ngg = shader->key.as_ngg;
5876 shader_es.key.mono = shader->key.mono;
5877 shader_es.key.opt = shader->key.opt;
5878 shader_es.is_monolithic = true;
5879 si_llvm_context_set_ir(&ctx, &shader_es);
5880
5881 if (!si_compile_tgsi_main(&ctx, nir, free_nir)) {
5882 si_llvm_dispose(&ctx);
5883 return -1;
5884 }
5885 shader->info.uses_instanceid |= es->info.uses_instanceid;
5886 es_main = ctx.main_fn;
5887
5888 /* ES prolog */
5889 if (es->type == PIPE_SHADER_VERTEX &&
5890 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog)) {
5891 union si_shader_part_key vs_prolog_key;
5892 si_get_vs_prolog_key(&es->info,
5893 shader_es.info.num_input_sgprs,
5894 &shader->key.part.gs.vs_prolog,
5895 shader, &vs_prolog_key);
5896 vs_prolog_key.vs_prolog.is_monolithic = true;
5897 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5898 es_prolog = ctx.main_fn;
5899 }
5900
5901 /* Reset the shader context. */
5902 ctx.shader = shader;
5903 ctx.type = PIPE_SHADER_GEOMETRY;
5904
5905 /* Prepare the array of shader parts. */
5906 LLVMValueRef parts[4];
5907 unsigned num_parts = 0, main_part, next_first_part;
5908
5909 if (es_prolog)
5910 parts[num_parts++] = es_prolog;
5911
5912 parts[main_part = num_parts++] = es_main;
5913 parts[next_first_part = num_parts++] = gs_prolog;
5914 parts[num_parts++] = gs_main;
5915
5916 si_build_wrapper_function(&ctx, parts, num_parts,
5917 main_part, next_first_part);
5918 } else {
5919 LLVMValueRef parts[2];
5920 union si_shader_part_key prolog_key;
5921
5922 parts[1] = ctx.main_fn;
5923
5924 memset(&prolog_key, 0, sizeof(prolog_key));
5925 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5926 si_build_gs_prolog_function(&ctx, &prolog_key);
5927 parts[0] = ctx.main_fn;
5928
5929 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
5930 }
5931 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
5932 LLVMValueRef parts[3];
5933 union si_shader_part_key prolog_key;
5934 union si_shader_part_key epilog_key;
5935 bool need_prolog;
5936
5937 si_get_ps_prolog_key(shader, &prolog_key, false);
5938 need_prolog = si_need_ps_prolog(&prolog_key);
5939
5940 parts[need_prolog ? 1 : 0] = ctx.main_fn;
5941
5942 if (need_prolog) {
5943 si_build_ps_prolog_function(&ctx, &prolog_key);
5944 parts[0] = ctx.main_fn;
5945 }
5946
5947 si_get_ps_epilog_key(shader, &epilog_key);
5948 si_build_ps_epilog_function(&ctx, &epilog_key);
5949 parts[need_prolog ? 2 : 1] = ctx.main_fn;
5950
5951 si_build_wrapper_function(&ctx, parts, need_prolog ? 3 : 2,
5952 need_prolog ? 1 : 0, 0);
5953 }
5954
5955 si_llvm_optimize_module(&ctx);
5956
5957 /* Post-optimization transformations and analysis. */
5958 si_optimize_vs_outputs(&ctx);
5959
5960 if ((debug && debug->debug_message) ||
5961 si_can_dump_shader(sscreen, ctx.type)) {
5962 ctx.shader->info.private_mem_vgprs =
5963 ac_count_scratch_private_memory(ctx.main_fn);
5964 }
5965
5966 /* Make sure the input is a pointer and not integer followed by inttoptr. */
5967 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
5968 LLVMPointerTypeKind);
5969
5970 /* Compile to bytecode. */
5971 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
5972 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
5973 si_get_shader_name(shader),
5974 si_should_optimize_less(compiler, shader->selector));
5975 si_llvm_dispose(&ctx);
5976 if (r) {
5977 fprintf(stderr, "LLVM failed to compile shader\n");
5978 return r;
5979 }
5980
5981 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
5982 * LLVM 3.9svn has this bug.
5983 */
5984 if (sel->type == PIPE_SHADER_COMPUTE) {
5985 unsigned wave_size = sscreen->compute_wave_size;
5986 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
5987 (wave_size == 32 ? 2 : 1);
5988 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
5989 unsigned max_sgprs_per_wave = 128;
5990 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
5991 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
5992 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
5993 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
5994
5995 max_vgprs = max_vgprs / waves_per_simd;
5996 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
5997
5998 if (shader->config.num_sgprs > max_sgprs ||
5999 shader->config.num_vgprs > max_vgprs) {
6000 fprintf(stderr, "LLVM failed to compile a shader correctly: "
6001 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
6002 shader->config.num_sgprs, shader->config.num_vgprs,
6003 max_sgprs, max_vgprs);
6004
6005 /* Just terminate the process, because dependent
6006 * shaders can hang due to bad input data, but use
6007 * the env var to allow shader-db to work.
6008 */
6009 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
6010 abort();
6011 }
6012 }
6013
6014 /* Add the scratch offset to input SGPRs. */
6015 if (shader->config.scratch_bytes_per_wave && !is_merged_shader(&ctx))
6016 shader->info.num_input_sgprs += 1; /* scratch byte offset */
6017
6018 /* Calculate the number of fragment input VGPRs. */
6019 if (ctx.type == PIPE_SHADER_FRAGMENT) {
6020 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
6021 &shader->info.face_vgpr_index,
6022 &shader->info.ancillary_vgpr_index);
6023 }
6024
6025 si_calculate_max_simd_waves(shader);
6026 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
6027 return 0;
6028 }
6029
6030 /**
6031 * Create, compile and return a shader part (prolog or epilog).
6032 *
6033 * \param sscreen screen
6034 * \param list list of shader parts of the same category
6035 * \param type shader type
6036 * \param key shader part key
6037 * \param prolog whether the part being requested is a prolog
6038 * \param tm LLVM target machine
6039 * \param debug debug callback
6040 * \param build the callback responsible for building the main function
6041 * \return non-NULL on success
6042 */
6043 static struct si_shader_part *
6044 si_get_shader_part(struct si_screen *sscreen,
6045 struct si_shader_part **list,
6046 enum pipe_shader_type type,
6047 bool prolog,
6048 union si_shader_part_key *key,
6049 struct ac_llvm_compiler *compiler,
6050 struct pipe_debug_callback *debug,
6051 void (*build)(struct si_shader_context *,
6052 union si_shader_part_key *),
6053 const char *name)
6054 {
6055 struct si_shader_part *result;
6056
6057 simple_mtx_lock(&sscreen->shader_parts_mutex);
6058
6059 /* Find existing. */
6060 for (result = *list; result; result = result->next) {
6061 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
6062 simple_mtx_unlock(&sscreen->shader_parts_mutex);
6063 return result;
6064 }
6065 }
6066
6067 /* Compile a new one. */
6068 result = CALLOC_STRUCT(si_shader_part);
6069 result->key = *key;
6070
6071 struct si_shader shader = {};
6072
6073 switch (type) {
6074 case PIPE_SHADER_VERTEX:
6075 shader.key.as_ls = key->vs_prolog.as_ls;
6076 shader.key.as_es = key->vs_prolog.as_es;
6077 shader.key.as_ngg = key->vs_prolog.as_ngg;
6078 break;
6079 case PIPE_SHADER_TESS_CTRL:
6080 assert(!prolog);
6081 shader.key.part.tcs.epilog = key->tcs_epilog.states;
6082 break;
6083 case PIPE_SHADER_GEOMETRY:
6084 assert(prolog);
6085 shader.key.as_ngg = key->gs_prolog.as_ngg;
6086 break;
6087 case PIPE_SHADER_FRAGMENT:
6088 if (prolog)
6089 shader.key.part.ps.prolog = key->ps_prolog.states;
6090 else
6091 shader.key.part.ps.epilog = key->ps_epilog.states;
6092 break;
6093 default:
6094 unreachable("bad shader part");
6095 }
6096
6097 struct si_shader_context ctx;
6098 si_llvm_context_init(&ctx, sscreen, compiler,
6099 si_get_wave_size(sscreen, type, shader.key.as_ngg,
6100 shader.key.as_es),
6101 64);
6102 ctx.shader = &shader;
6103 ctx.type = type;
6104
6105 build(&ctx, key);
6106
6107 /* Compile. */
6108 si_llvm_optimize_module(&ctx);
6109
6110 if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
6111 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
6112 name, false)) {
6113 FREE(result);
6114 result = NULL;
6115 goto out;
6116 }
6117
6118 result->next = *list;
6119 *list = result;
6120
6121 out:
6122 si_llvm_dispose(&ctx);
6123 simple_mtx_unlock(&sscreen->shader_parts_mutex);
6124 return result;
6125 }
6126
6127 static LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx)
6128 {
6129 LLVMValueRef ptr[2], list;
6130 bool merged_shader = is_merged_shader(ctx);
6131
6132 ptr[0] = LLVMGetParam(ctx->main_fn, (merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS);
6133 list = LLVMBuildIntToPtr(ctx->ac.builder, ptr[0],
6134 ac_array_in_const32_addr_space(ctx->v4i32), "");
6135 return list;
6136 }
6137
6138 /**
6139 * Build the vertex shader prolog function.
6140 *
6141 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
6142 * All inputs are returned unmodified. The vertex load indices are
6143 * stored after them, which will be used by the API VS for fetching inputs.
6144 *
6145 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
6146 * input_v0,
6147 * input_v1,
6148 * input_v2,
6149 * input_v3,
6150 * (VertexID + BaseVertex),
6151 * (InstanceID + StartInstance),
6152 * (InstanceID / 2 + StartInstance)
6153 */
6154 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
6155 union si_shader_part_key *key)
6156 {
6157 LLVMTypeRef *returns;
6158 LLVMValueRef ret, func;
6159 int num_returns, i;
6160 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
6161 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
6162 struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
6163 struct ac_arg input_vgpr_param[9];
6164 LLVMValueRef input_vgprs[9];
6165 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
6166 num_input_vgprs;
6167 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
6168
6169 memset(&ctx->args, 0, sizeof(ctx->args));
6170
6171 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
6172 returns = alloca((num_all_input_regs + key->vs_prolog.num_inputs) *
6173 sizeof(LLVMTypeRef));
6174 num_returns = 0;
6175
6176 /* Declare input and output SGPRs. */
6177 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6178 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6179 &input_sgpr_param[i]);
6180 returns[num_returns++] = ctx->i32;
6181 }
6182
6183 struct ac_arg merged_wave_info = input_sgpr_param[3];
6184
6185 /* Preloaded VGPRs (outputs must be floats) */
6186 for (i = 0; i < num_input_vgprs; i++) {
6187 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &input_vgpr_param[i]);
6188 returns[num_returns++] = ctx->f32;
6189 }
6190
6191 /* Vertex load indices. */
6192 for (i = 0; i < key->vs_prolog.num_inputs; i++)
6193 returns[num_returns++] = ctx->f32;
6194
6195 /* Create the function. */
6196 si_create_function(ctx, "vs_prolog", returns, num_returns, 0);
6197 func = ctx->main_fn;
6198
6199 for (i = 0; i < num_input_vgprs; i++) {
6200 input_vgprs[i] = ac_get_arg(&ctx->ac, input_vgpr_param[i]);
6201 }
6202
6203 if (key->vs_prolog.num_merged_next_stage_vgprs) {
6204 if (!key->vs_prolog.is_monolithic)
6205 si_init_exec_from_input(ctx, merged_wave_info, 0);
6206
6207 if (key->vs_prolog.as_ls &&
6208 ctx->screen->info.has_ls_vgpr_init_bug) {
6209 /* If there are no HS threads, SPI loads the LS VGPRs
6210 * starting at VGPR 0. Shift them back to where they
6211 * belong.
6212 */
6213 LLVMValueRef has_hs_threads =
6214 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
6215 si_unpack_param(ctx, input_sgpr_param[3], 8, 8),
6216 ctx->i32_0, "");
6217
6218 for (i = 4; i > 0; --i) {
6219 input_vgprs[i + 1] =
6220 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
6221 input_vgprs[i + 1],
6222 input_vgprs[i - 1], "");
6223 }
6224 }
6225 }
6226
6227 unsigned vertex_id_vgpr = first_vs_vgpr;
6228 unsigned instance_id_vgpr =
6229 ctx->screen->info.chip_class >= GFX10 ?
6230 first_vs_vgpr + 3 :
6231 first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
6232
6233 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
6234 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
6235
6236 /* InstanceID = VertexID >> 16;
6237 * VertexID = VertexID & 0xffff;
6238 */
6239 if (key->vs_prolog.states.unpack_instance_id_from_vertex_id) {
6240 ctx->abi.instance_id = LLVMBuildLShr(ctx->ac.builder, ctx->abi.vertex_id,
6241 LLVMConstInt(ctx->i32, 16, 0), "");
6242 ctx->abi.vertex_id = LLVMBuildAnd(ctx->ac.builder, ctx->abi.vertex_id,
6243 LLVMConstInt(ctx->i32, 0xffff, 0), "");
6244 }
6245
6246 /* Copy inputs to outputs. This should be no-op, as the registers match,
6247 * but it will prevent the compiler from overwriting them unintentionally.
6248 */
6249 ret = ctx->return_value;
6250 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
6251 LLVMValueRef p = LLVMGetParam(func, i);
6252 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
6253 }
6254 for (i = 0; i < num_input_vgprs; i++) {
6255 LLVMValueRef p = input_vgprs[i];
6256
6257 if (i == vertex_id_vgpr)
6258 p = ctx->abi.vertex_id;
6259 else if (i == instance_id_vgpr)
6260 p = ctx->abi.instance_id;
6261
6262 p = ac_to_float(&ctx->ac, p);
6263 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
6264 key->vs_prolog.num_input_sgprs + i, "");
6265 }
6266
6267 /* Compute vertex load indices from instance divisors. */
6268 LLVMValueRef instance_divisor_constbuf = NULL;
6269
6270 if (key->vs_prolog.states.instance_divisor_is_fetched) {
6271 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
6272 LLVMValueRef buf_index =
6273 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
6274 instance_divisor_constbuf =
6275 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
6276 }
6277
6278 for (i = 0; i < key->vs_prolog.num_inputs; i++) {
6279 bool divisor_is_one =
6280 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
6281 bool divisor_is_fetched =
6282 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
6283 LLVMValueRef index = NULL;
6284
6285 if (divisor_is_one) {
6286 index = ctx->abi.instance_id;
6287 } else if (divisor_is_fetched) {
6288 LLVMValueRef udiv_factors[4];
6289
6290 for (unsigned j = 0; j < 4; j++) {
6291 udiv_factors[j] =
6292 buffer_load_const(ctx, instance_divisor_constbuf,
6293 LLVMConstInt(ctx->i32, i*16 + j*4, 0));
6294 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
6295 }
6296 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
6297 * Such InstanceID might not be achievable in a reasonable time though.
6298 */
6299 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id,
6300 udiv_factors[0], udiv_factors[1],
6301 udiv_factors[2], udiv_factors[3]);
6302 }
6303
6304 if (divisor_is_one || divisor_is_fetched) {
6305 /* Add StartInstance. */
6306 index = LLVMBuildAdd(ctx->ac.builder, index,
6307 LLVMGetParam(ctx->main_fn, user_sgpr_base +
6308 SI_SGPR_START_INSTANCE), "");
6309 } else {
6310 /* VertexID + BaseVertex */
6311 index = LLVMBuildAdd(ctx->ac.builder,
6312 ctx->abi.vertex_id,
6313 LLVMGetParam(func, user_sgpr_base +
6314 SI_SGPR_BASE_VERTEX), "");
6315 }
6316
6317 index = ac_to_float(&ctx->ac, index);
6318 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
6319 ctx->args.arg_count + i, "");
6320 }
6321
6322 si_llvm_build_ret(ctx, ret);
6323 }
6324
6325 static bool si_get_vs_prolog(struct si_screen *sscreen,
6326 struct ac_llvm_compiler *compiler,
6327 struct si_shader *shader,
6328 struct pipe_debug_callback *debug,
6329 struct si_shader *main_part,
6330 const struct si_vs_prolog_bits *key)
6331 {
6332 struct si_shader_selector *vs = main_part->selector;
6333
6334 if (!si_vs_needs_prolog(vs, key))
6335 return true;
6336
6337 /* Get the prolog. */
6338 union si_shader_part_key prolog_key;
6339 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
6340 key, shader, &prolog_key);
6341
6342 shader->prolog =
6343 si_get_shader_part(sscreen, &sscreen->vs_prologs,
6344 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
6345 debug, si_build_vs_prolog_function,
6346 "Vertex Shader Prolog");
6347 return shader->prolog != NULL;
6348 }
6349
6350 /**
6351 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
6352 */
6353 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
6354 struct ac_llvm_compiler *compiler,
6355 struct si_shader *shader,
6356 struct pipe_debug_callback *debug)
6357 {
6358 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
6359 &shader->key.part.vs.prolog);
6360 }
6361
6362 /**
6363 * Compile the TCS epilog function. This writes tesselation factors to memory
6364 * based on the output primitive type of the tesselator (determined by TES).
6365 */
6366 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
6367 union si_shader_part_key *key)
6368 {
6369 memset(&ctx->args, 0, sizeof(ctx->args));
6370
6371 if (ctx->screen->info.chip_class >= GFX9) {
6372 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6373 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6374 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6375 &ctx->tcs_offchip_offset);
6376 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* wave info */
6377 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6378 &ctx->tcs_factor_offset);
6379 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6380 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6381 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6382 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6383 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6384 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6385 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6386 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6387 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6388 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6389 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6390 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6391 &ctx->tcs_offchip_layout);
6392 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6393 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6394 &ctx->tcs_out_lds_layout);
6395 } else {
6396 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6397 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6398 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6399 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6400 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6401 &ctx->tcs_offchip_layout);
6402 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6403 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6404 &ctx->tcs_out_lds_layout);
6405 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6406 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6407 &ctx->tcs_offchip_offset);
6408 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6409 &ctx->tcs_factor_offset);
6410 }
6411
6412 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
6413 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
6414 struct ac_arg rel_patch_id; /* patch index within the wave (REL_PATCH_ID) */
6415 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &rel_patch_id);
6416 struct ac_arg invocation_id; /* invocation ID within the patch */
6417 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &invocation_id);
6418 struct ac_arg tcs_out_current_patch_data_offset; /* LDS offset where tess factors should be loaded from */
6419 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
6420 &tcs_out_current_patch_data_offset);
6421
6422 struct ac_arg tess_factors[6];
6423 for (unsigned i = 0; i < 6; i++)
6424 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &tess_factors[i]);
6425
6426 /* Create the function. */
6427 si_create_function(ctx, "tcs_epilog", NULL, 0,
6428 ctx->screen->info.chip_class >= GFX7 ? 128 : 0);
6429 ac_declare_lds_as_pointer(&ctx->ac);
6430
6431 LLVMValueRef invoc0_tess_factors[6];
6432 for (unsigned i = 0; i < 6; i++)
6433 invoc0_tess_factors[i] = ac_get_arg(&ctx->ac, tess_factors[i]);
6434
6435 si_write_tess_factors(ctx,
6436 ac_get_arg(&ctx->ac, rel_patch_id),
6437 ac_get_arg(&ctx->ac, invocation_id),
6438 ac_get_arg(&ctx->ac, tcs_out_current_patch_data_offset),
6439 invoc0_tess_factors, invoc0_tess_factors + 4);
6440
6441 LLVMBuildRetVoid(ctx->ac.builder);
6442 }
6443
6444 /**
6445 * Select and compile (or reuse) TCS parts (epilog).
6446 */
6447 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
6448 struct ac_llvm_compiler *compiler,
6449 struct si_shader *shader,
6450 struct pipe_debug_callback *debug)
6451 {
6452 if (sscreen->info.chip_class >= GFX9) {
6453 struct si_shader *ls_main_part =
6454 shader->key.part.tcs.ls->main_shader_part_ls;
6455
6456 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
6457 &shader->key.part.tcs.ls_prolog))
6458 return false;
6459
6460 shader->previous_stage = ls_main_part;
6461 }
6462
6463 /* Get the epilog. */
6464 union si_shader_part_key epilog_key;
6465 memset(&epilog_key, 0, sizeof(epilog_key));
6466 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
6467
6468 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
6469 PIPE_SHADER_TESS_CTRL, false,
6470 &epilog_key, compiler, debug,
6471 si_build_tcs_epilog_function,
6472 "Tessellation Control Shader Epilog");
6473 return shader->epilog != NULL;
6474 }
6475
6476 /**
6477 * Select and compile (or reuse) GS parts (prolog).
6478 */
6479 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
6480 struct ac_llvm_compiler *compiler,
6481 struct si_shader *shader,
6482 struct pipe_debug_callback *debug)
6483 {
6484 if (sscreen->info.chip_class >= GFX9) {
6485 struct si_shader *es_main_part;
6486 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
6487
6488 if (shader->key.as_ngg)
6489 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
6490 else
6491 es_main_part = shader->key.part.gs.es->main_shader_part_es;
6492
6493 if (es_type == PIPE_SHADER_VERTEX &&
6494 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
6495 &shader->key.part.gs.vs_prolog))
6496 return false;
6497
6498 shader->previous_stage = es_main_part;
6499 }
6500
6501 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
6502 return true;
6503
6504 union si_shader_part_key prolog_key;
6505 memset(&prolog_key, 0, sizeof(prolog_key));
6506 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
6507 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
6508
6509 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
6510 PIPE_SHADER_GEOMETRY, true,
6511 &prolog_key, compiler, debug,
6512 si_build_gs_prolog_function,
6513 "Geometry Shader Prolog");
6514 return shader->prolog2 != NULL;
6515 }
6516
6517 /**
6518 * Build the pixel shader prolog function. This handles:
6519 * - two-side color selection and interpolation
6520 * - overriding interpolation parameters for the API PS
6521 * - polygon stippling
6522 *
6523 * All preloaded SGPRs and VGPRs are passed through unmodified unless they are
6524 * overriden by other states. (e.g. per-sample interpolation)
6525 * Interpolated colors are stored after the preloaded VGPRs.
6526 */
6527 static void si_build_ps_prolog_function(struct si_shader_context *ctx,
6528 union si_shader_part_key *key)
6529 {
6530 LLVMValueRef ret, func;
6531 int num_returns, i, num_color_channels;
6532
6533 assert(si_need_ps_prolog(key));
6534
6535 memset(&ctx->args, 0, sizeof(ctx->args));
6536
6537 /* Declare inputs. */
6538 LLVMTypeRef return_types[AC_MAX_ARGS];
6539 num_returns = 0;
6540 num_color_channels = util_bitcount(key->ps_prolog.colors_read);
6541 assert(key->ps_prolog.num_input_sgprs +
6542 key->ps_prolog.num_input_vgprs +
6543 num_color_channels <= AC_MAX_ARGS);
6544 for (i = 0; i < key->ps_prolog.num_input_sgprs; i++) {
6545 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
6546 return_types[num_returns++] = ctx->i32;
6547
6548 }
6549
6550 struct ac_arg pos_fixed_pt;
6551 struct ac_arg ancillary;
6552 struct ac_arg param_sample_mask;
6553 for (i = 0; i < key->ps_prolog.num_input_vgprs; i++) {
6554 struct ac_arg *arg = NULL;
6555 if (i == key->ps_prolog.ancillary_vgpr_index) {
6556 arg = &ancillary;
6557 } else if (i == key->ps_prolog.ancillary_vgpr_index + 1) {
6558 arg = &param_sample_mask;
6559 } else if (i == key->ps_prolog.num_input_vgprs - 1) {
6560 /* POS_FIXED_PT is always last. */
6561 arg = &pos_fixed_pt;
6562 }
6563 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, arg);
6564 return_types[num_returns++] = ctx->f32;
6565 }
6566
6567 /* Declare outputs (same as inputs + add colors if needed) */
6568 for (i = 0; i < num_color_channels; i++)
6569 return_types[num_returns++] = ctx->f32;
6570
6571 /* Create the function. */
6572 si_create_function(ctx, "ps_prolog", return_types, num_returns, 0);
6573 func = ctx->main_fn;
6574
6575 /* Copy inputs to outputs. This should be no-op, as the registers match,
6576 * but it will prevent the compiler from overwriting them unintentionally.
6577 */
6578 ret = ctx->return_value;
6579 for (i = 0; i < ctx->args.arg_count; i++) {
6580 LLVMValueRef p = LLVMGetParam(func, i);
6581 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
6582 }
6583
6584 /* Polygon stippling. */
6585 if (key->ps_prolog.states.poly_stipple) {
6586 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
6587
6588 si_llvm_emit_polygon_stipple(ctx, list, pos_fixed_pt);
6589 }
6590
6591 if (key->ps_prolog.states.bc_optimize_for_persp ||
6592 key->ps_prolog.states.bc_optimize_for_linear) {
6593 unsigned i, base = key->ps_prolog.num_input_sgprs;
6594 LLVMValueRef center[2], centroid[2], tmp, bc_optimize;
6595
6596 /* The shader should do: if (PRIM_MASK[31]) CENTROID = CENTER;
6597 * The hw doesn't compute CENTROID if the whole wave only
6598 * contains fully-covered quads.
6599 *
6600 * PRIM_MASK is after user SGPRs.
6601 */
6602 bc_optimize = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6603 bc_optimize = LLVMBuildLShr(ctx->ac.builder, bc_optimize,
6604 LLVMConstInt(ctx->i32, 31, 0), "");
6605 bc_optimize = LLVMBuildTrunc(ctx->ac.builder, bc_optimize,
6606 ctx->i1, "");
6607
6608 if (key->ps_prolog.states.bc_optimize_for_persp) {
6609 /* Read PERSP_CENTER. */
6610 for (i = 0; i < 2; i++)
6611 center[i] = LLVMGetParam(func, base + 2 + i);
6612 /* Read PERSP_CENTROID. */
6613 for (i = 0; i < 2; i++)
6614 centroid[i] = LLVMGetParam(func, base + 4 + i);
6615 /* Select PERSP_CENTROID. */
6616 for (i = 0; i < 2; i++) {
6617 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
6618 center[i], centroid[i], "");
6619 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6620 tmp, base + 4 + i, "");
6621 }
6622 }
6623 if (key->ps_prolog.states.bc_optimize_for_linear) {
6624 /* Read LINEAR_CENTER. */
6625 for (i = 0; i < 2; i++)
6626 center[i] = LLVMGetParam(func, base + 8 + i);
6627 /* Read LINEAR_CENTROID. */
6628 for (i = 0; i < 2; i++)
6629 centroid[i] = LLVMGetParam(func, base + 10 + i);
6630 /* Select LINEAR_CENTROID. */
6631 for (i = 0; i < 2; i++) {
6632 tmp = LLVMBuildSelect(ctx->ac.builder, bc_optimize,
6633 center[i], centroid[i], "");
6634 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6635 tmp, base + 10 + i, "");
6636 }
6637 }
6638 }
6639
6640 /* Force per-sample interpolation. */
6641 if (key->ps_prolog.states.force_persp_sample_interp) {
6642 unsigned i, base = key->ps_prolog.num_input_sgprs;
6643 LLVMValueRef persp_sample[2];
6644
6645 /* Read PERSP_SAMPLE. */
6646 for (i = 0; i < 2; i++)
6647 persp_sample[i] = LLVMGetParam(func, base + i);
6648 /* Overwrite PERSP_CENTER. */
6649 for (i = 0; i < 2; i++)
6650 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6651 persp_sample[i], base + 2 + i, "");
6652 /* Overwrite PERSP_CENTROID. */
6653 for (i = 0; i < 2; i++)
6654 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6655 persp_sample[i], base + 4 + i, "");
6656 }
6657 if (key->ps_prolog.states.force_linear_sample_interp) {
6658 unsigned i, base = key->ps_prolog.num_input_sgprs;
6659 LLVMValueRef linear_sample[2];
6660
6661 /* Read LINEAR_SAMPLE. */
6662 for (i = 0; i < 2; i++)
6663 linear_sample[i] = LLVMGetParam(func, base + 6 + i);
6664 /* Overwrite LINEAR_CENTER. */
6665 for (i = 0; i < 2; i++)
6666 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6667 linear_sample[i], base + 8 + i, "");
6668 /* Overwrite LINEAR_CENTROID. */
6669 for (i = 0; i < 2; i++)
6670 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6671 linear_sample[i], base + 10 + i, "");
6672 }
6673
6674 /* Force center interpolation. */
6675 if (key->ps_prolog.states.force_persp_center_interp) {
6676 unsigned i, base = key->ps_prolog.num_input_sgprs;
6677 LLVMValueRef persp_center[2];
6678
6679 /* Read PERSP_CENTER. */
6680 for (i = 0; i < 2; i++)
6681 persp_center[i] = LLVMGetParam(func, base + 2 + i);
6682 /* Overwrite PERSP_SAMPLE. */
6683 for (i = 0; i < 2; i++)
6684 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6685 persp_center[i], base + i, "");
6686 /* Overwrite PERSP_CENTROID. */
6687 for (i = 0; i < 2; i++)
6688 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6689 persp_center[i], base + 4 + i, "");
6690 }
6691 if (key->ps_prolog.states.force_linear_center_interp) {
6692 unsigned i, base = key->ps_prolog.num_input_sgprs;
6693 LLVMValueRef linear_center[2];
6694
6695 /* Read LINEAR_CENTER. */
6696 for (i = 0; i < 2; i++)
6697 linear_center[i] = LLVMGetParam(func, base + 8 + i);
6698 /* Overwrite LINEAR_SAMPLE. */
6699 for (i = 0; i < 2; i++)
6700 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6701 linear_center[i], base + 6 + i, "");
6702 /* Overwrite LINEAR_CENTROID. */
6703 for (i = 0; i < 2; i++)
6704 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
6705 linear_center[i], base + 10 + i, "");
6706 }
6707
6708 /* Interpolate colors. */
6709 unsigned color_out_idx = 0;
6710 for (i = 0; i < 2; i++) {
6711 unsigned writemask = (key->ps_prolog.colors_read >> (i * 4)) & 0xf;
6712 unsigned face_vgpr = key->ps_prolog.num_input_sgprs +
6713 key->ps_prolog.face_vgpr_index;
6714 LLVMValueRef interp[2], color[4];
6715 LLVMValueRef interp_ij = NULL, prim_mask = NULL, face = NULL;
6716
6717 if (!writemask)
6718 continue;
6719
6720 /* If the interpolation qualifier is not CONSTANT (-1). */
6721 if (key->ps_prolog.color_interp_vgpr_index[i] != -1) {
6722 unsigned interp_vgpr = key->ps_prolog.num_input_sgprs +
6723 key->ps_prolog.color_interp_vgpr_index[i];
6724
6725 /* Get the (i,j) updated by bc_optimize handling. */
6726 interp[0] = LLVMBuildExtractValue(ctx->ac.builder, ret,
6727 interp_vgpr, "");
6728 interp[1] = LLVMBuildExtractValue(ctx->ac.builder, ret,
6729 interp_vgpr + 1, "");
6730 interp_ij = ac_build_gather_values(&ctx->ac, interp, 2);
6731 }
6732
6733 /* Use the absolute location of the input. */
6734 prim_mask = LLVMGetParam(func, SI_PS_NUM_USER_SGPR);
6735
6736 if (key->ps_prolog.states.color_two_side) {
6737 face = LLVMGetParam(func, face_vgpr);
6738 face = ac_to_integer(&ctx->ac, face);
6739 }
6740
6741 interp_fs_color(ctx,
6742 key->ps_prolog.color_attr_index[i], i,
6743 key->ps_prolog.num_interp_inputs,
6744 key->ps_prolog.colors_read, interp_ij,
6745 prim_mask, face, color);
6746
6747 while (writemask) {
6748 unsigned chan = u_bit_scan(&writemask);
6749 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, color[chan],
6750 ctx->args.arg_count + color_out_idx++, "");
6751 }
6752 }
6753
6754 /* Section 15.2.2 (Shader Inputs) of the OpenGL 4.5 (Core Profile) spec
6755 * says:
6756 *
6757 * "When per-sample shading is active due to the use of a fragment
6758 * input qualified by sample or due to the use of the gl_SampleID
6759 * or gl_SamplePosition variables, only the bit for the current
6760 * sample is set in gl_SampleMaskIn. When state specifies multiple
6761 * fragment shader invocations for a given fragment, the sample
6762 * mask for any single fragment shader invocation may specify a
6763 * subset of the covered samples for the fragment. In this case,
6764 * the bit corresponding to each covered sample will be set in
6765 * exactly one fragment shader invocation."
6766 *
6767 * The samplemask loaded by hardware is always the coverage of the
6768 * entire pixel/fragment, so mask bits out based on the sample ID.
6769 */
6770 if (key->ps_prolog.states.samplemask_log_ps_iter) {
6771 /* The bit pattern matches that used by fixed function fragment
6772 * processing. */
6773 static const uint16_t ps_iter_masks[] = {
6774 0xffff, /* not used */
6775 0x5555,
6776 0x1111,
6777 0x0101,
6778 0x0001,
6779 };
6780 assert(key->ps_prolog.states.samplemask_log_ps_iter < ARRAY_SIZE(ps_iter_masks));
6781
6782 uint32_t ps_iter_mask = ps_iter_masks[key->ps_prolog.states.samplemask_log_ps_iter];
6783 LLVMValueRef sampleid = si_unpack_param(ctx, ancillary, 8, 4);
6784 LLVMValueRef samplemask = ac_get_arg(&ctx->ac, param_sample_mask);
6785
6786 samplemask = ac_to_integer(&ctx->ac, samplemask);
6787 samplemask = LLVMBuildAnd(
6788 ctx->ac.builder,
6789 samplemask,
6790 LLVMBuildShl(ctx->ac.builder,
6791 LLVMConstInt(ctx->i32, ps_iter_mask, false),
6792 sampleid, ""),
6793 "");
6794 samplemask = ac_to_float(&ctx->ac, samplemask);
6795
6796 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, samplemask,
6797 param_sample_mask.arg_index, "");
6798 }
6799
6800 /* Tell LLVM to insert WQM instruction sequence when needed. */
6801 if (key->ps_prolog.wqm) {
6802 LLVMAddTargetDependentFunctionAttr(func,
6803 "amdgpu-ps-wqm-outputs", "");
6804 }
6805
6806 si_llvm_build_ret(ctx, ret);
6807 }
6808
6809 /**
6810 * Build the pixel shader epilog function. This handles everything that must be
6811 * emulated for pixel shader exports. (alpha-test, format conversions, etc)
6812 */
6813 static void si_build_ps_epilog_function(struct si_shader_context *ctx,
6814 union si_shader_part_key *key)
6815 {
6816 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
6817 int i;
6818 struct si_ps_exports exp = {};
6819
6820 memset(&ctx->args, 0, sizeof(ctx->args));
6821
6822 /* Declare input SGPRs. */
6823 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->rw_buffers);
6824 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6825 &ctx->bindless_samplers_and_images);
6826 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6827 &ctx->const_and_shader_buffers);
6828 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
6829 &ctx->samplers_and_images);
6830 add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT,
6831 NULL, SI_PARAM_ALPHA_REF);
6832
6833 /* Declare input VGPRs. */
6834 unsigned required_num_params =
6835 ctx->args.num_sgprs_used +
6836 util_bitcount(key->ps_epilog.colors_written) * 4 +
6837 key->ps_epilog.writes_z +
6838 key->ps_epilog.writes_stencil +
6839 key->ps_epilog.writes_samplemask;
6840
6841 required_num_params = MAX2(required_num_params,
6842 ctx->args.num_sgprs_used + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
6843
6844 while (ctx->args.arg_count < required_num_params)
6845 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
6846
6847 /* Create the function. */
6848 si_create_function(ctx, "ps_epilog", NULL, 0, 0);
6849 /* Disable elimination of unused inputs. */
6850 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
6851 "InitialPSInputAddr", 0xffffff);
6852
6853 /* Process colors. */
6854 unsigned vgpr = ctx->args.num_sgprs_used;
6855 unsigned colors_written = key->ps_epilog.colors_written;
6856 int last_color_export = -1;
6857
6858 /* Find the last color export. */
6859 if (!key->ps_epilog.writes_z &&
6860 !key->ps_epilog.writes_stencil &&
6861 !key->ps_epilog.writes_samplemask) {
6862 unsigned spi_format = key->ps_epilog.states.spi_shader_col_format;
6863
6864 /* If last_cbuf > 0, FS_COLOR0_WRITES_ALL_CBUFS is true. */
6865 if (colors_written == 0x1 && key->ps_epilog.states.last_cbuf > 0) {
6866 /* Just set this if any of the colorbuffers are enabled. */
6867 if (spi_format &
6868 ((1ull << (4 * (key->ps_epilog.states.last_cbuf + 1))) - 1))
6869 last_color_export = 0;
6870 } else {
6871 for (i = 0; i < 8; i++)
6872 if (colors_written & (1 << i) &&
6873 (spi_format >> (i * 4)) & 0xf)
6874 last_color_export = i;
6875 }
6876 }
6877
6878 while (colors_written) {
6879 LLVMValueRef color[4];
6880 int mrt = u_bit_scan(&colors_written);
6881
6882 for (i = 0; i < 4; i++)
6883 color[i] = LLVMGetParam(ctx->main_fn, vgpr++);
6884
6885 si_export_mrt_color(ctx, color, mrt,
6886 ctx->args.arg_count - 1,
6887 mrt == last_color_export, &exp);
6888 }
6889
6890 /* Process depth, stencil, samplemask. */
6891 if (key->ps_epilog.writes_z)
6892 depth = LLVMGetParam(ctx->main_fn, vgpr++);
6893 if (key->ps_epilog.writes_stencil)
6894 stencil = LLVMGetParam(ctx->main_fn, vgpr++);
6895 if (key->ps_epilog.writes_samplemask)
6896 samplemask = LLVMGetParam(ctx->main_fn, vgpr++);
6897
6898 if (depth || stencil || samplemask)
6899 si_export_mrt_z(ctx, depth, stencil, samplemask, &exp);
6900 else if (last_color_export == -1)
6901 ac_build_export_null(&ctx->ac);
6902
6903 if (exp.num)
6904 si_emit_ps_exports(ctx, &exp);
6905
6906 /* Compile. */
6907 LLVMBuildRetVoid(ctx->ac.builder);
6908 }
6909
6910 /**
6911 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
6912 */
6913 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
6914 struct ac_llvm_compiler *compiler,
6915 struct si_shader *shader,
6916 struct pipe_debug_callback *debug)
6917 {
6918 union si_shader_part_key prolog_key;
6919 union si_shader_part_key epilog_key;
6920
6921 /* Get the prolog. */
6922 si_get_ps_prolog_key(shader, &prolog_key, true);
6923
6924 /* The prolog is a no-op if these aren't set. */
6925 if (si_need_ps_prolog(&prolog_key)) {
6926 shader->prolog =
6927 si_get_shader_part(sscreen, &sscreen->ps_prologs,
6928 PIPE_SHADER_FRAGMENT, true,
6929 &prolog_key, compiler, debug,
6930 si_build_ps_prolog_function,
6931 "Fragment Shader Prolog");
6932 if (!shader->prolog)
6933 return false;
6934 }
6935
6936 /* Get the epilog. */
6937 si_get_ps_epilog_key(shader, &epilog_key);
6938
6939 shader->epilog =
6940 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
6941 PIPE_SHADER_FRAGMENT, false,
6942 &epilog_key, compiler, debug,
6943 si_build_ps_epilog_function,
6944 "Fragment Shader Epilog");
6945 if (!shader->epilog)
6946 return false;
6947
6948 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
6949 if (shader->key.part.ps.prolog.poly_stipple) {
6950 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
6951 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
6952 }
6953
6954 /* Set up the enable bits for per-sample shading if needed. */
6955 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
6956 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
6957 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
6958 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
6959 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
6960 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
6961 }
6962 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
6963 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
6964 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
6965 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
6966 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
6967 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
6968 }
6969 if (shader->key.part.ps.prolog.force_persp_center_interp &&
6970 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
6971 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
6972 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
6973 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
6974 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
6975 }
6976 if (shader->key.part.ps.prolog.force_linear_center_interp &&
6977 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
6978 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
6979 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
6980 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
6981 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
6982 }
6983
6984 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
6985 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
6986 !(shader->config.spi_ps_input_ena & 0xf)) {
6987 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
6988 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
6989 }
6990
6991 /* At least one pair of interpolation weights must be enabled. */
6992 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
6993 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
6994 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
6995 }
6996
6997 /* Samplemask fixup requires the sample ID. */
6998 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
6999 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
7000 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
7001 }
7002
7003 /* The sample mask input is always enabled, because the API shader always
7004 * passes it through to the epilog. Disable it here if it's unused.
7005 */
7006 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
7007 !shader->selector->info.reads_samplemask)
7008 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
7009
7010 return true;
7011 }
7012
7013 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
7014 unsigned *lds_size)
7015 {
7016 /* If tessellation is all offchip and on-chip GS isn't used, this
7017 * workaround is not needed.
7018 */
7019 return;
7020
7021 /* SPI barrier management bug:
7022 * Make sure we have at least 4k of LDS in use to avoid the bug.
7023 * It applies to workgroup sizes of more than one wavefront.
7024 */
7025 if (sscreen->info.family == CHIP_BONAIRE ||
7026 sscreen->info.family == CHIP_KABINI)
7027 *lds_size = MAX2(*lds_size, 8);
7028 }
7029
7030 static void si_fix_resource_usage(struct si_screen *sscreen,
7031 struct si_shader *shader)
7032 {
7033 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
7034
7035 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
7036
7037 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
7038 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
7039 si_multiwave_lds_size_workaround(sscreen,
7040 &shader->config.lds_size);
7041 }
7042 }
7043
7044 bool si_shader_create(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
7045 struct si_shader *shader,
7046 struct pipe_debug_callback *debug)
7047 {
7048 struct si_shader_selector *sel = shader->selector;
7049 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
7050 int r;
7051
7052 /* LS, ES, VS are compiled on demand if the main part hasn't been
7053 * compiled for that stage.
7054 *
7055 * GS are compiled on demand if the main part hasn't been compiled
7056 * for the chosen NGG-ness.
7057 *
7058 * Vertex shaders are compiled on demand when a vertex fetch
7059 * workaround must be applied.
7060 */
7061 if (shader->is_monolithic) {
7062 /* Monolithic shader (compiled as a whole, has many variants,
7063 * may take a long time to compile).
7064 */
7065 r = si_compile_shader(sscreen, compiler, shader, debug);
7066 if (r)
7067 return false;
7068 } else {
7069 /* The shader consists of several parts:
7070 *
7071 * - the middle part is the user shader, it has 1 variant only
7072 * and it was compiled during the creation of the shader
7073 * selector
7074 * - the prolog part is inserted at the beginning
7075 * - the epilog part is inserted at the end
7076 *
7077 * The prolog and epilog have many (but simple) variants.
7078 *
7079 * Starting with gfx9, geometry and tessellation control
7080 * shaders also contain the prolog and user shader parts of
7081 * the previous shader stage.
7082 */
7083
7084 if (!mainp)
7085 return false;
7086
7087 /* Copy the compiled TGSI shader data over. */
7088 shader->is_binary_shared = true;
7089 shader->binary = mainp->binary;
7090 shader->config = mainp->config;
7091 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
7092 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
7093 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
7094 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
7095 memcpy(shader->info.vs_output_param_offset,
7096 mainp->info.vs_output_param_offset,
7097 sizeof(mainp->info.vs_output_param_offset));
7098 shader->info.uses_instanceid = mainp->info.uses_instanceid;
7099 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
7100 shader->info.nr_param_exports = mainp->info.nr_param_exports;
7101
7102 /* Select prologs and/or epilogs. */
7103 switch (sel->type) {
7104 case PIPE_SHADER_VERTEX:
7105 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
7106 return false;
7107 break;
7108 case PIPE_SHADER_TESS_CTRL:
7109 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
7110 return false;
7111 break;
7112 case PIPE_SHADER_TESS_EVAL:
7113 break;
7114 case PIPE_SHADER_GEOMETRY:
7115 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
7116 return false;
7117 break;
7118 case PIPE_SHADER_FRAGMENT:
7119 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
7120 return false;
7121
7122 /* Make sure we have at least as many VGPRs as there
7123 * are allocated inputs.
7124 */
7125 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7126 shader->info.num_input_vgprs);
7127 break;
7128 default:;
7129 }
7130
7131 /* Update SGPR and VGPR counts. */
7132 if (shader->prolog) {
7133 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7134 shader->prolog->config.num_sgprs);
7135 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7136 shader->prolog->config.num_vgprs);
7137 }
7138 if (shader->previous_stage) {
7139 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7140 shader->previous_stage->config.num_sgprs);
7141 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7142 shader->previous_stage->config.num_vgprs);
7143 shader->config.spilled_sgprs =
7144 MAX2(shader->config.spilled_sgprs,
7145 shader->previous_stage->config.spilled_sgprs);
7146 shader->config.spilled_vgprs =
7147 MAX2(shader->config.spilled_vgprs,
7148 shader->previous_stage->config.spilled_vgprs);
7149 shader->info.private_mem_vgprs =
7150 MAX2(shader->info.private_mem_vgprs,
7151 shader->previous_stage->info.private_mem_vgprs);
7152 shader->config.scratch_bytes_per_wave =
7153 MAX2(shader->config.scratch_bytes_per_wave,
7154 shader->previous_stage->config.scratch_bytes_per_wave);
7155 shader->info.uses_instanceid |=
7156 shader->previous_stage->info.uses_instanceid;
7157 }
7158 if (shader->prolog2) {
7159 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7160 shader->prolog2->config.num_sgprs);
7161 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7162 shader->prolog2->config.num_vgprs);
7163 }
7164 if (shader->epilog) {
7165 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
7166 shader->epilog->config.num_sgprs);
7167 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
7168 shader->epilog->config.num_vgprs);
7169 }
7170 si_calculate_max_simd_waves(shader);
7171 }
7172
7173 if (shader->key.as_ngg) {
7174 assert(!shader->key.as_es && !shader->key.as_ls);
7175 gfx10_ngg_calculate_subgroup_info(shader);
7176 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
7177 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
7178 }
7179
7180 si_fix_resource_usage(sscreen, shader);
7181 si_shader_dump(sscreen, shader, debug, stderr, true);
7182
7183 /* Upload. */
7184 if (!si_shader_binary_upload(sscreen, shader, 0)) {
7185 fprintf(stderr, "LLVM failed to upload shader\n");
7186 return false;
7187 }
7188
7189 return true;
7190 }
7191
7192 void si_shader_destroy(struct si_shader *shader)
7193 {
7194 if (shader->scratch_bo)
7195 si_resource_reference(&shader->scratch_bo, NULL);
7196
7197 si_resource_reference(&shader->bo, NULL);
7198
7199 if (!shader->is_binary_shared)
7200 si_shader_binary_clean(&shader->binary);
7201
7202 free(shader->shader_log);
7203 }