radeonsi: move PS LLVM code into si_shader_llvm_ps.c
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <llvm/Config/llvm-config.h>
26
27 #include "util/u_memory.h"
28 #include "tgsi/tgsi_strings.h"
29 #include "tgsi/tgsi_from_mesa.h"
30
31 #include "ac_exp_param.h"
32 #include "ac_shader_util.h"
33 #include "ac_rtld.h"
34 #include "ac_llvm_util.h"
35 #include "si_shader_internal.h"
36 #include "si_pipe.h"
37 #include "sid.h"
38
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_serialize.h"
41
42 static const char scratch_rsrc_dword0_symbol[] =
43 "SCRATCH_RSRC_DWORD0";
44
45 static const char scratch_rsrc_dword1_symbol[] =
46 "SCRATCH_RSRC_DWORD1";
47
48 static void si_llvm_emit_barrier(struct si_shader_context *ctx);
49
50 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
51
52 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
53 union si_shader_part_key *key);
54 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
55 union si_shader_part_key *key);
56 static void si_fix_resource_usage(struct si_screen *sscreen,
57 struct si_shader *shader);
58
59 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
60 LLVMTypeRef type)
61 {
62 if (type == ctx->ac.i64 || type == ctx->ac.f64)
63 return true;
64
65 return false;
66 }
67
68 /** Whether the shader runs as a combination of multiple API shaders */
69 static bool is_multi_part_shader(struct si_shader_context *ctx)
70 {
71 if (ctx->screen->info.chip_class <= GFX8)
72 return false;
73
74 return ctx->shader->key.as_ls ||
75 ctx->shader->key.as_es ||
76 ctx->type == PIPE_SHADER_TESS_CTRL ||
77 ctx->type == PIPE_SHADER_GEOMETRY;
78 }
79
80 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
81 bool si_is_merged_shader(struct si_shader_context *ctx)
82 {
83 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
84 }
85
86 /**
87 * Returns a unique index for a per-patch semantic name and index. The index
88 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
89 * can be calculated.
90 */
91 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
92 {
93 switch (semantic_name) {
94 case TGSI_SEMANTIC_TESSOUTER:
95 return 0;
96 case TGSI_SEMANTIC_TESSINNER:
97 return 1;
98 case TGSI_SEMANTIC_PATCH:
99 assert(index < 30);
100 return 2 + index;
101
102 default:
103 assert(!"invalid semantic name");
104 return 0;
105 }
106 }
107
108 /**
109 * Returns a unique index for a semantic name and index. The index must be
110 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
111 * calculated.
112 */
113 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
114 unsigned is_varying)
115 {
116 switch (semantic_name) {
117 case TGSI_SEMANTIC_POSITION:
118 return 0;
119 case TGSI_SEMANTIC_GENERIC:
120 /* Since some shader stages use the the highest used IO index
121 * to determine the size to allocate for inputs/outputs
122 * (in LDS, tess and GS rings). GENERIC should be placed right
123 * after POSITION to make that size as small as possible.
124 */
125 if (index < SI_MAX_IO_GENERIC)
126 return 1 + index;
127
128 assert(!"invalid generic index");
129 return 0;
130 case TGSI_SEMANTIC_FOG:
131 return SI_MAX_IO_GENERIC + 1;
132 case TGSI_SEMANTIC_COLOR:
133 assert(index < 2);
134 return SI_MAX_IO_GENERIC + 2 + index;
135 case TGSI_SEMANTIC_BCOLOR:
136 assert(index < 2);
137 /* If it's a varying, COLOR and BCOLOR alias. */
138 if (is_varying)
139 return SI_MAX_IO_GENERIC + 2 + index;
140 else
141 return SI_MAX_IO_GENERIC + 4 + index;
142 case TGSI_SEMANTIC_TEXCOORD:
143 assert(index < 8);
144 return SI_MAX_IO_GENERIC + 6 + index;
145
146 /* These are rarely used between LS and HS or ES and GS. */
147 case TGSI_SEMANTIC_CLIPDIST:
148 assert(index < 2);
149 return SI_MAX_IO_GENERIC + 6 + 8 + index;
150 case TGSI_SEMANTIC_CLIPVERTEX:
151 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
152 case TGSI_SEMANTIC_PSIZE:
153 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
154
155 /* These can't be written by LS, HS, and ES. */
156 case TGSI_SEMANTIC_LAYER:
157 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
158 case TGSI_SEMANTIC_VIEWPORT_INDEX:
159 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
160 case TGSI_SEMANTIC_PRIMID:
161 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
162 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
163 default:
164 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
165 assert(!"invalid semantic name");
166 return 0;
167 }
168 }
169
170 /**
171 * Get the value of a shader input parameter and extract a bitfield.
172 */
173 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
174 LLVMValueRef value, unsigned rshift,
175 unsigned bitwidth)
176 {
177 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
178 value = ac_to_integer(&ctx->ac, value);
179
180 if (rshift)
181 value = LLVMBuildLShr(ctx->ac.builder, value,
182 LLVMConstInt(ctx->i32, rshift, 0), "");
183
184 if (rshift + bitwidth < 32) {
185 unsigned mask = (1 << bitwidth) - 1;
186 value = LLVMBuildAnd(ctx->ac.builder, value,
187 LLVMConstInt(ctx->i32, mask, 0), "");
188 }
189
190 return value;
191 }
192
193 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
194 struct ac_arg param, unsigned rshift,
195 unsigned bitwidth)
196 {
197 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
198
199 return unpack_llvm_param(ctx, value, rshift, bitwidth);
200 }
201
202 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
203 {
204 switch (ctx->type) {
205 case PIPE_SHADER_TESS_CTRL:
206 return si_unpack_param(ctx, ctx->args.tcs_rel_ids, 0, 8);
207
208 case PIPE_SHADER_TESS_EVAL:
209 return ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id);
210
211 default:
212 assert(0);
213 return NULL;
214 }
215 }
216
217 /* Tessellation shaders pass outputs to the next shader using LDS.
218 *
219 * LS outputs = TCS inputs
220 * TCS outputs = TES inputs
221 *
222 * The LDS layout is:
223 * - TCS inputs for patch 0
224 * - TCS inputs for patch 1
225 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
226 * - ...
227 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
228 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
229 * - TCS outputs for patch 1
230 * - Per-patch TCS outputs for patch 1
231 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
232 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
233 * - ...
234 *
235 * All three shaders VS(LS), TCS, TES share the same LDS space.
236 */
237
238 static LLVMValueRef
239 get_tcs_in_patch_stride(struct si_shader_context *ctx)
240 {
241 return si_unpack_param(ctx, ctx->vs_state_bits, 8, 13);
242 }
243
244 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
245 {
246 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
247
248 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
249 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
250
251 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
252 }
253
254 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
255 {
256 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
257
258 return LLVMConstInt(ctx->i32, stride, 0);
259 }
260
261 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
262 {
263 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
264 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 0, 13);
265
266 const struct si_shader_info *info = &ctx->shader->selector->info;
267 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
268 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
269 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
270 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
271 num_patch_outputs * 4;
272 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
273 }
274
275 static LLVMValueRef
276 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
277 {
278 return LLVMBuildMul(ctx->ac.builder,
279 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 0, 16),
280 LLVMConstInt(ctx->i32, 4, 0), "");
281 }
282
283 static LLVMValueRef
284 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
285 {
286 return LLVMBuildMul(ctx->ac.builder,
287 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 16, 16),
288 LLVMConstInt(ctx->i32, 4, 0), "");
289 }
290
291 static LLVMValueRef
292 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
293 {
294 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
295 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
296
297 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
298 }
299
300 static LLVMValueRef
301 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
302 {
303 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_offset);
308 }
309
310 static LLVMValueRef
311 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
312 {
313 LLVMValueRef patch0_patch_data_offset =
314 get_tcs_out_patch0_patch_data_offset(ctx);
315 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
316 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
317
318 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_patch_data_offset);
319 }
320
321 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
322 {
323 unsigned tcs_out_vertices =
324 ctx->shader->selector ?
325 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
326
327 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
328 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
329 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
330
331 return si_unpack_param(ctx, ctx->tcs_offchip_layout, 6, 6);
332 }
333
334 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
335 {
336 unsigned stride;
337
338 switch (ctx->type) {
339 case PIPE_SHADER_VERTEX:
340 stride = ctx->shader->selector->lshs_vertex_stride / 4;
341 return LLVMConstInt(ctx->i32, stride, 0);
342
343 case PIPE_SHADER_TESS_CTRL:
344 if (ctx->screen->info.chip_class >= GFX9 &&
345 ctx->shader->is_monolithic) {
346 stride = ctx->shader->key.part.tcs.ls->lshs_vertex_stride / 4;
347 return LLVMConstInt(ctx->i32, stride, 0);
348 }
349 return si_unpack_param(ctx, ctx->vs_state_bits, 24, 8);
350
351 default:
352 assert(0);
353 return NULL;
354 }
355 }
356
357 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
358 LLVMValueRef i32, unsigned index)
359 {
360 assert(index <= 1);
361
362 if (index == 1)
363 return LLVMBuildAShr(ctx->ac.builder, i32,
364 LLVMConstInt(ctx->i32, 16, 0), "");
365
366 return LLVMBuildSExt(ctx->ac.builder,
367 LLVMBuildTrunc(ctx->ac.builder, i32,
368 ctx->ac.i16, ""),
369 ctx->i32, "");
370 }
371
372 void si_llvm_load_input_vs(
373 struct si_shader_context *ctx,
374 unsigned input_index,
375 LLVMValueRef out[4])
376 {
377 const struct si_shader_info *info = &ctx->shader->selector->info;
378 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
379
380 if (vs_blit_property) {
381 LLVMValueRef vertex_id = ctx->abi.vertex_id;
382 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
383 LLVMIntULE, vertex_id,
384 ctx->i32_1, "");
385 /* Use LLVMIntNE, because we have 3 vertices and only
386 * the middle one should use y2.
387 */
388 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
389 LLVMIntNE, vertex_id,
390 ctx->i32_1, "");
391
392 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
393 if (input_index == 0) {
394 /* Position: */
395 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
396 param_vs_blit_inputs);
397 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
398 param_vs_blit_inputs + 1);
399
400 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
401 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
402 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
403 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
404
405 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
406 x1, x2, "");
407 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
408 y1, y2, "");
409
410 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
411 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
412 out[2] = LLVMGetParam(ctx->main_fn,
413 param_vs_blit_inputs + 2);
414 out[3] = ctx->ac.f32_1;
415 return;
416 }
417
418 /* Color or texture coordinates: */
419 assert(input_index == 1);
420
421 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
422 for (int i = 0; i < 4; i++) {
423 out[i] = LLVMGetParam(ctx->main_fn,
424 param_vs_blit_inputs + 3 + i);
425 }
426 } else {
427 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
428 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
429 param_vs_blit_inputs + 3);
430 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
431 param_vs_blit_inputs + 4);
432 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
433 param_vs_blit_inputs + 5);
434 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
435 param_vs_blit_inputs + 6);
436
437 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
438 x1, x2, "");
439 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
440 y1, y2, "");
441 out[2] = LLVMGetParam(ctx->main_fn,
442 param_vs_blit_inputs + 7);
443 out[3] = LLVMGetParam(ctx->main_fn,
444 param_vs_blit_inputs + 8);
445 }
446 return;
447 }
448
449 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
450 union si_vs_fix_fetch fix_fetch;
451 LLVMValueRef vb_desc;
452 LLVMValueRef vertex_index;
453 LLVMValueRef tmp;
454
455 if (input_index < num_vbos_in_user_sgprs) {
456 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
457 } else {
458 unsigned index= input_index - num_vbos_in_user_sgprs;
459 vb_desc = ac_build_load_to_sgpr(&ctx->ac,
460 ac_get_arg(&ctx->ac, ctx->vertex_buffers),
461 LLVMConstInt(ctx->i32, index, 0));
462 }
463
464 vertex_index = LLVMGetParam(ctx->main_fn,
465 ctx->vertex_index0.arg_index +
466 input_index);
467
468 /* Use the open-coded implementation for all loads of doubles and
469 * of dword-sized data that needs fixups. We need to insert conversion
470 * code anyway, and the amd/common code does it for us.
471 *
472 * Note: On LLVM <= 8, we can only open-code formats with
473 * channel size >= 4 bytes.
474 */
475 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
476 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
477 if (opencode ||
478 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
479 (fix_fetch.u.log_size == 2)) {
480 tmp = ac_build_opencoded_load_format(
481 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
482 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
483 vb_desc, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
484 for (unsigned i = 0; i < 4; ++i)
485 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
486 return;
487 }
488
489 /* Do multiple loads for special formats. */
490 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
491 LLVMValueRef fetches[4];
492 unsigned num_fetches;
493 unsigned fetch_stride;
494 unsigned channels_per_fetch;
495
496 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
497 num_fetches = MIN2(required_channels, 3);
498 fetch_stride = 1 << fix_fetch.u.log_size;
499 channels_per_fetch = 1;
500 } else {
501 num_fetches = 1;
502 fetch_stride = 0;
503 channels_per_fetch = required_channels;
504 }
505
506 for (unsigned i = 0; i < num_fetches; ++i) {
507 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
508 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
509 channels_per_fetch, 0, true);
510 }
511
512 if (num_fetches == 1 && channels_per_fetch > 1) {
513 LLVMValueRef fetch = fetches[0];
514 for (unsigned i = 0; i < channels_per_fetch; ++i) {
515 tmp = LLVMConstInt(ctx->i32, i, false);
516 fetches[i] = LLVMBuildExtractElement(
517 ctx->ac.builder, fetch, tmp, "");
518 }
519 num_fetches = channels_per_fetch;
520 channels_per_fetch = 1;
521 }
522
523 for (unsigned i = num_fetches; i < 4; ++i)
524 fetches[i] = LLVMGetUndef(ctx->f32);
525
526 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
527 required_channels == 4) {
528 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
529 fetches[3] = ctx->ac.i32_1;
530 else
531 fetches[3] = ctx->ac.f32_1;
532 } else if (fix_fetch.u.log_size == 3 &&
533 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
534 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
535 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
536 required_channels == 4) {
537 /* For 2_10_10_10, the hardware returns an unsigned value;
538 * convert it to a signed one.
539 */
540 LLVMValueRef tmp = fetches[3];
541 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
542
543 /* First, recover the sign-extended signed integer value. */
544 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
545 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
546 else
547 tmp = ac_to_integer(&ctx->ac, tmp);
548
549 /* For the integer-like cases, do a natural sign extension.
550 *
551 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
552 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
553 * exponent.
554 */
555 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
556 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
557 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
558 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
559
560 /* Convert back to the right type. */
561 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
562 LLVMValueRef clamp;
563 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
564 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
565 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
566 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
567 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
568 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
569 }
570
571 fetches[3] = tmp;
572 }
573
574 for (unsigned i = 0; i < 4; ++i)
575 out[i] = ac_to_float(&ctx->ac, fetches[i]);
576 }
577
578 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
579 unsigned swizzle)
580 {
581 if (swizzle > 0)
582 return ctx->i32_0;
583
584 switch (ctx->type) {
585 case PIPE_SHADER_VERTEX:
586 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
587 case PIPE_SHADER_TESS_CTRL:
588 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
589 case PIPE_SHADER_TESS_EVAL:
590 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
591 case PIPE_SHADER_GEOMETRY:
592 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
593 default:
594 assert(0);
595 return ctx->i32_0;
596 }
597 }
598
599 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
600 LLVMValueRef vertex_dw_stride,
601 LLVMValueRef base_addr,
602 LLVMValueRef vertex_index,
603 LLVMValueRef param_index,
604 ubyte name, ubyte index)
605 {
606 if (vertex_dw_stride) {
607 base_addr = ac_build_imad(&ctx->ac, vertex_index,
608 vertex_dw_stride, base_addr);
609 }
610
611 if (param_index) {
612 base_addr = ac_build_imad(&ctx->ac, param_index,
613 LLVMConstInt(ctx->i32, 4, 0), base_addr);
614 }
615
616 int param = name == TGSI_SEMANTIC_PATCH ||
617 name == TGSI_SEMANTIC_TESSINNER ||
618 name == TGSI_SEMANTIC_TESSOUTER ?
619 si_shader_io_get_unique_index_patch(name, index) :
620 si_shader_io_get_unique_index(name, index, false);
621
622 /* Add the base address of the element. */
623 return LLVMBuildAdd(ctx->ac.builder, base_addr,
624 LLVMConstInt(ctx->i32, param * 4, 0), "");
625 }
626
627 /* The offchip buffer layout for TCS->TES is
628 *
629 * - attribute 0 of patch 0 vertex 0
630 * - attribute 0 of patch 0 vertex 1
631 * - attribute 0 of patch 0 vertex 2
632 * ...
633 * - attribute 0 of patch 1 vertex 0
634 * - attribute 0 of patch 1 vertex 1
635 * ...
636 * - attribute 1 of patch 0 vertex 0
637 * - attribute 1 of patch 0 vertex 1
638 * ...
639 * - per patch attribute 0 of patch 0
640 * - per patch attribute 0 of patch 1
641 * ...
642 *
643 * Note that every attribute has 4 components.
644 */
645 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
646 LLVMValueRef rel_patch_id,
647 LLVMValueRef vertex_index,
648 LLVMValueRef param_index)
649 {
650 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
651 LLVMValueRef param_stride, constant16;
652
653 vertices_per_patch = get_num_tcs_out_vertices(ctx);
654 num_patches = si_unpack_param(ctx, ctx->tcs_offchip_layout, 0, 6);
655 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
656 num_patches, "");
657
658 constant16 = LLVMConstInt(ctx->i32, 16, 0);
659 if (vertex_index) {
660 base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
661 vertices_per_patch, vertex_index);
662 param_stride = total_vertices;
663 } else {
664 base_addr = rel_patch_id;
665 param_stride = num_patches;
666 }
667
668 base_addr = ac_build_imad(&ctx->ac, param_index, param_stride, base_addr);
669 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
670
671 if (!vertex_index) {
672 LLVMValueRef patch_data_offset =
673 si_unpack_param(ctx, ctx->tcs_offchip_layout, 12, 20);
674
675 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
676 patch_data_offset, "");
677 }
678 return base_addr;
679 }
680
681 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
682 struct si_shader_context *ctx,
683 LLVMValueRef vertex_index,
684 LLVMValueRef param_index,
685 ubyte name, ubyte index)
686 {
687 unsigned param_index_base;
688
689 param_index_base = name == TGSI_SEMANTIC_PATCH ||
690 name == TGSI_SEMANTIC_TESSINNER ||
691 name == TGSI_SEMANTIC_TESSOUTER ?
692 si_shader_io_get_unique_index_patch(name, index) :
693 si_shader_io_get_unique_index(name, index, false);
694
695 if (param_index) {
696 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
697 LLVMConstInt(ctx->i32, param_index_base, 0),
698 "");
699 } else {
700 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
701 }
702
703 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
704 vertex_index, param_index);
705 }
706
707 static LLVMValueRef si_build_gather_64bit(struct si_shader_context *ctx,
708 LLVMTypeRef type,
709 LLVMValueRef val1,
710 LLVMValueRef val2)
711 {
712 LLVMValueRef values[2] = {
713 ac_to_integer(&ctx->ac, val1),
714 ac_to_integer(&ctx->ac, val2),
715 };
716 LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, 2);
717 return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
718 }
719
720 static LLVMValueRef buffer_load(struct si_shader_context *ctx,
721 LLVMTypeRef type, unsigned swizzle,
722 LLVMValueRef buffer, LLVMValueRef offset,
723 LLVMValueRef base, bool can_speculate)
724 {
725 LLVMValueRef value, value2;
726 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
727
728 if (swizzle == ~0) {
729 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
730 0, ac_glc, can_speculate, false);
731
732 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
733 }
734
735 if (!llvm_type_is_64bit(ctx, type)) {
736 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
737 0, ac_glc, can_speculate, false);
738
739 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
740 return LLVMBuildExtractElement(ctx->ac.builder, value,
741 LLVMConstInt(ctx->i32, swizzle, 0), "");
742 }
743
744 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
745 swizzle * 4, ac_glc, can_speculate, false);
746
747 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
748 swizzle * 4 + 4, ac_glc, can_speculate, false);
749
750 return si_build_gather_64bit(ctx, type, value, value2);
751 }
752
753 /**
754 * Load from LSHS LDS storage.
755 *
756 * \param type output value type
757 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
758 * \param dw_addr address in dwords
759 */
760 static LLVMValueRef lshs_lds_load(struct si_shader_context *ctx,
761 LLVMTypeRef type, unsigned swizzle,
762 LLVMValueRef dw_addr)
763 {
764 LLVMValueRef value;
765
766 if (swizzle == ~0) {
767 LLVMValueRef values[4];
768
769 for (unsigned chan = 0; chan < 4; chan++)
770 values[chan] = lshs_lds_load(ctx, type, chan, dw_addr);
771
772 return ac_build_gather_values(&ctx->ac, values, 4);
773 }
774
775 /* Split 64-bit loads. */
776 if (llvm_type_is_64bit(ctx, type)) {
777 LLVMValueRef lo, hi;
778
779 lo = lshs_lds_load(ctx, ctx->i32, swizzle, dw_addr);
780 hi = lshs_lds_load(ctx, ctx->i32, swizzle + 1, dw_addr);
781 return si_build_gather_64bit(ctx, type, lo, hi);
782 }
783
784 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
785 LLVMConstInt(ctx->i32, swizzle, 0), "");
786
787 value = ac_lds_load(&ctx->ac, dw_addr);
788
789 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
790 }
791
792 /**
793 * Store to LSHS LDS storage.
794 *
795 * \param swizzle offset (typically 0..3)
796 * \param dw_addr address in dwords
797 * \param value value to store
798 */
799 static void lshs_lds_store(struct si_shader_context *ctx,
800 unsigned dw_offset_imm, LLVMValueRef dw_addr,
801 LLVMValueRef value)
802 {
803 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
804 LLVMConstInt(ctx->i32, dw_offset_imm, 0), "");
805
806 ac_lds_store(&ctx->ac, dw_addr, value);
807 }
808
809 enum si_tess_ring {
810 TCS_FACTOR_RING,
811 TESS_OFFCHIP_RING_TCS,
812 TESS_OFFCHIP_RING_TES,
813 };
814
815 static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx,
816 enum si_tess_ring ring)
817 {
818 LLVMBuilderRef builder = ctx->ac.builder;
819 LLVMValueRef addr = ac_get_arg(&ctx->ac,
820 ring == TESS_OFFCHIP_RING_TES ?
821 ctx->tes_offchip_addr :
822 ctx->tcs_out_lds_layout);
823
824 /* TCS only receives high 13 bits of the address. */
825 if (ring == TESS_OFFCHIP_RING_TCS || ring == TCS_FACTOR_RING) {
826 addr = LLVMBuildAnd(builder, addr,
827 LLVMConstInt(ctx->i32, 0xfff80000, 0), "");
828 }
829
830 if (ring == TCS_FACTOR_RING) {
831 unsigned tf_offset = ctx->screen->tess_offchip_ring_size;
832 addr = LLVMBuildAdd(builder, addr,
833 LLVMConstInt(ctx->i32, tf_offset, 0), "");
834 }
835
836 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
837 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
838 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
839 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
840
841 if (ctx->screen->info.chip_class >= GFX10)
842 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
843 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
844 S_008F0C_RESOURCE_LEVEL(1);
845 else
846 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
847 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
848
849 LLVMValueRef desc[4];
850 desc[0] = addr;
851 desc[1] = LLVMConstInt(ctx->i32,
852 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
853 desc[2] = LLVMConstInt(ctx->i32, 0xffffffff, 0);
854 desc[3] = LLVMConstInt(ctx->i32, rsrc3, false);
855
856 return ac_build_gather_values(&ctx->ac, desc, 4);
857 }
858
859 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
860 LLVMTypeRef type,
861 LLVMValueRef vertex_index,
862 LLVMValueRef param_index,
863 unsigned const_index,
864 unsigned location,
865 unsigned driver_location,
866 unsigned component,
867 unsigned num_components,
868 bool is_patch,
869 bool is_compact,
870 bool load_input)
871 {
872 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
873 struct si_shader_info *info = &ctx->shader->selector->info;
874 LLVMValueRef dw_addr, stride;
875 ubyte name, index;
876
877 driver_location = driver_location / 4;
878
879 if (load_input) {
880 name = info->input_semantic_name[driver_location];
881 index = info->input_semantic_index[driver_location];
882 } else {
883 name = info->output_semantic_name[driver_location];
884 index = info->output_semantic_index[driver_location];
885 }
886
887 assert((name == TGSI_SEMANTIC_PATCH ||
888 name == TGSI_SEMANTIC_TESSINNER ||
889 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
890
891 if (load_input) {
892 stride = get_tcs_in_vertex_dw_stride(ctx);
893 dw_addr = get_tcs_in_current_patch_offset(ctx);
894 } else {
895 if (is_patch) {
896 stride = NULL;
897 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
898 } else {
899 stride = get_tcs_out_vertex_dw_stride(ctx);
900 dw_addr = get_tcs_out_current_patch_offset(ctx);
901 }
902 }
903
904 if (!param_index) {
905 param_index = LLVMConstInt(ctx->i32, const_index, 0);
906 }
907
908 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
909 vertex_index, param_index,
910 name, index);
911
912 LLVMValueRef value[4];
913 for (unsigned i = 0; i < num_components; i++) {
914 unsigned offset = i;
915 if (llvm_type_is_64bit(ctx, type))
916 offset *= 2;
917
918 offset += component;
919 value[i + component] = lshs_lds_load(ctx, type, offset, dw_addr);
920 }
921
922 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
923 }
924
925 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
926 LLVMTypeRef type,
927 LLVMValueRef vertex_index,
928 LLVMValueRef param_index,
929 unsigned const_index,
930 unsigned location,
931 unsigned driver_location,
932 unsigned component,
933 unsigned num_components,
934 bool is_patch,
935 bool is_compact,
936 bool load_input)
937 {
938 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
939 struct si_shader_info *info = &ctx->shader->selector->info;
940 LLVMValueRef base, addr;
941
942 driver_location = driver_location / 4;
943 ubyte name = info->input_semantic_name[driver_location];
944 ubyte index = info->input_semantic_index[driver_location];
945
946 assert((name == TGSI_SEMANTIC_PATCH ||
947 name == TGSI_SEMANTIC_TESSINNER ||
948 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
949
950 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
951
952 if (!param_index) {
953 param_index = LLVMConstInt(ctx->i32, const_index, 0);
954 }
955
956 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
957 param_index,
958 name, index);
959
960 /* TODO: This will generate rather ordinary llvm code, although it
961 * should be easy for the optimiser to fix up. In future we might want
962 * to refactor buffer_load().
963 */
964 LLVMValueRef value[4];
965 for (unsigned i = 0; i < num_components; i++) {
966 unsigned offset = i;
967 if (llvm_type_is_64bit(ctx, type)) {
968 offset *= 2;
969 if (offset == 4) {
970 ubyte name = info->input_semantic_name[driver_location + 1];
971 ubyte index = info->input_semantic_index[driver_location + 1];
972 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
973 vertex_index,
974 param_index,
975 name, index);
976 }
977
978 offset = offset % 4;
979 }
980
981 offset += component;
982 value[i + component] = buffer_load(ctx, type, offset,
983 ctx->tess_offchip_ring, base, addr, true);
984 }
985
986 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
987 }
988
989 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
990 const struct nir_variable *var,
991 LLVMValueRef vertex_index,
992 LLVMValueRef param_index,
993 unsigned const_index,
994 LLVMValueRef src,
995 unsigned writemask)
996 {
997 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
998 struct si_shader_info *info = &ctx->shader->selector->info;
999 const unsigned component = var->data.location_frac;
1000 unsigned driver_location = var->data.driver_location;
1001 LLVMValueRef dw_addr, stride;
1002 LLVMValueRef buffer, base, addr;
1003 LLVMValueRef values[8];
1004 bool skip_lds_store;
1005 bool is_tess_factor = false, is_tess_inner = false;
1006
1007 driver_location = driver_location / 4;
1008 ubyte name = info->output_semantic_name[driver_location];
1009 ubyte index = info->output_semantic_index[driver_location];
1010
1011 bool is_const = !param_index;
1012 if (!param_index)
1013 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1014
1015 const bool is_patch = var->data.patch ||
1016 var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
1017 var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER;
1018
1019 assert((name == TGSI_SEMANTIC_PATCH ||
1020 name == TGSI_SEMANTIC_TESSINNER ||
1021 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
1022
1023 if (!is_patch) {
1024 stride = get_tcs_out_vertex_dw_stride(ctx);
1025 dw_addr = get_tcs_out_current_patch_offset(ctx);
1026 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1027 vertex_index, param_index,
1028 name, index);
1029
1030 skip_lds_store = !info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1034 vertex_index, param_index,
1035 name, index);
1036
1037 skip_lds_store = !info->reads_perpatch_outputs;
1038
1039 if (is_const && const_index == 0) {
1040 int name = info->output_semantic_name[driver_location];
1041
1042 /* Always write tess factors into LDS for the TCS epilog. */
1043 if (name == TGSI_SEMANTIC_TESSINNER ||
1044 name == TGSI_SEMANTIC_TESSOUTER) {
1045 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1046 skip_lds_store = !info->reads_tessfactor_outputs &&
1047 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs;
1048 is_tess_factor = true;
1049 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1050 }
1051 }
1052 }
1053
1054 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1055
1056 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1057
1058 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1059 param_index, name, index);
1060
1061 for (unsigned chan = component; chan < 8; chan++) {
1062 if (!(writemask & (1 << chan)))
1063 continue;
1064 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1065
1066 unsigned buffer_store_offset = chan % 4;
1067 if (chan == 4) {
1068 ubyte name = info->output_semantic_name[driver_location + 1];
1069 ubyte index = info->output_semantic_index[driver_location + 1];
1070 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
1071 vertex_index,
1072 param_index,
1073 name, index);
1074 }
1075
1076 /* Skip LDS stores if there is no LDS read of this output. */
1077 if (!skip_lds_store)
1078 lshs_lds_store(ctx, chan, dw_addr, value);
1079
1080 value = ac_to_integer(&ctx->ac, value);
1081 values[chan] = value;
1082
1083 if (writemask != 0xF && !is_tess_factor) {
1084 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1085 addr, base,
1086 4 * buffer_store_offset,
1087 ac_glc);
1088 }
1089
1090 /* Write tess factors into VGPRs for the epilog. */
1091 if (is_tess_factor &&
1092 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
1093 if (!is_tess_inner) {
1094 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1095 ctx->invoc0_tess_factors[chan]);
1096 } else if (chan < 2) {
1097 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1098 ctx->invoc0_tess_factors[4 + chan]);
1099 }
1100 }
1101 }
1102
1103 if (writemask == 0xF && !is_tess_factor) {
1104 LLVMValueRef value = ac_build_gather_values(&ctx->ac,
1105 values, 4);
1106 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1107 base, 0, ac_glc);
1108 }
1109 }
1110
1111 static LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1112 unsigned input_index,
1113 unsigned vtx_offset_param,
1114 LLVMTypeRef type,
1115 unsigned swizzle)
1116 {
1117 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1118 struct si_shader *shader = ctx->shader;
1119 LLVMValueRef vtx_offset, soffset;
1120 struct si_shader_info *info = &shader->selector->info;
1121 unsigned semantic_name = info->input_semantic_name[input_index];
1122 unsigned semantic_index = info->input_semantic_index[input_index];
1123 unsigned param;
1124 LLVMValueRef value;
1125
1126 param = si_shader_io_get_unique_index(semantic_name, semantic_index, false);
1127
1128 /* GFX9 has the ESGS ring in LDS. */
1129 if (ctx->screen->info.chip_class >= GFX9) {
1130 unsigned index = vtx_offset_param;
1131
1132 switch (index / 2) {
1133 case 0:
1134 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx01_offset,
1135 index % 2 ? 16 : 0, 16);
1136 break;
1137 case 1:
1138 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx23_offset,
1139 index % 2 ? 16 : 0, 16);
1140 break;
1141 case 2:
1142 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx45_offset,
1143 index % 2 ? 16 : 0, 16);
1144 break;
1145 default:
1146 assert(0);
1147 return NULL;
1148 }
1149
1150 unsigned offset = param * 4 + swizzle;
1151 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1152 LLVMConstInt(ctx->i32, offset, false), "");
1153
1154 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->esgs_ring, vtx_offset);
1155 LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, ptr, "");
1156 if (llvm_type_is_64bit(ctx, type)) {
1157 ptr = LLVMBuildGEP(ctx->ac.builder, ptr,
1158 &ctx->ac.i32_1, 1, "");
1159 LLVMValueRef values[2] = {
1160 value,
1161 LLVMBuildLoad(ctx->ac.builder, ptr, "")
1162 };
1163 value = ac_build_gather_values(&ctx->ac, values, 2);
1164 }
1165 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1166 }
1167
1168 /* GFX6: input load from the ESGS ring in memory. */
1169 if (swizzle == ~0) {
1170 LLVMValueRef values[4];
1171 unsigned chan;
1172 for (chan = 0; chan < 4; chan++) {
1173 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1174 type, chan);
1175 }
1176 return ac_build_gather_values(&ctx->ac, values, 4);
1177 }
1178
1179 /* Get the vertex offset parameter on GFX6. */
1180 LLVMValueRef gs_vtx_offset = ac_get_arg(&ctx->ac,
1181 ctx->gs_vtx_offset[vtx_offset_param]);
1182
1183 vtx_offset = LLVMBuildMul(ctx->ac.builder, gs_vtx_offset,
1184 LLVMConstInt(ctx->i32, 4, 0), "");
1185
1186 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1187
1188 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1189 vtx_offset, soffset, 0, ac_glc, true, false);
1190 if (llvm_type_is_64bit(ctx, type)) {
1191 LLVMValueRef value2;
1192 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1193
1194 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1195 ctx->i32_0, vtx_offset, soffset,
1196 0, ac_glc, true, false);
1197 return si_build_gather_64bit(ctx, type, value, value2);
1198 }
1199 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1200 }
1201
1202 static LLVMValueRef si_nir_load_input_gs(struct ac_shader_abi *abi,
1203 unsigned location,
1204 unsigned driver_location,
1205 unsigned component,
1206 unsigned num_components,
1207 unsigned vertex_index,
1208 unsigned const_index,
1209 LLVMTypeRef type)
1210 {
1211 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1212
1213 LLVMValueRef value[4];
1214 for (unsigned i = 0; i < num_components; i++) {
1215 unsigned offset = i;
1216 if (llvm_type_is_64bit(ctx, type))
1217 offset *= 2;
1218
1219 offset += component;
1220 value[i + component] = si_llvm_load_input_gs(&ctx->abi, driver_location / 4 + const_index,
1221 vertex_index, type, offset);
1222 }
1223
1224 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1225 }
1226
1227 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
1228 {
1229 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1230
1231 /* For non-indexed draws, the base vertex set by the driver
1232 * (for direct draws) or the CP (for indirect draws) is the
1233 * first vertex ID, but GLSL expects 0 to be returned.
1234 */
1235 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
1236 ctx->vs_state_bits);
1237 LLVMValueRef indexed;
1238
1239 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
1240 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
1241
1242 return LLVMBuildSelect(ctx->ac.builder, indexed,
1243 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
1244 ctx->i32_0, "");
1245 }
1246
1247 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
1248 {
1249 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1250
1251 LLVMValueRef values[3];
1252 LLVMValueRef result;
1253 unsigned i;
1254 unsigned *properties = ctx->shader->selector->info.properties;
1255
1256 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1257 unsigned sizes[3] = {
1258 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1259 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1260 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1261 };
1262
1263 for (i = 0; i < 3; ++i)
1264 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1265
1266 result = ac_build_gather_values(&ctx->ac, values, 3);
1267 } else {
1268 result = ac_get_arg(&ctx->ac, ctx->block_size);
1269 }
1270
1271 return result;
1272 }
1273
1274 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi)
1275 {
1276 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1277 LLVMValueRef coord[4] = {
1278 ac_get_arg(&ctx->ac, ctx->tes_u),
1279 ac_get_arg(&ctx->ac, ctx->tes_v),
1280 ctx->ac.f32_0,
1281 ctx->ac.f32_0
1282 };
1283
1284 /* For triangles, the vector should be (u, v, 1-u-v). */
1285 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1286 PIPE_PRIM_TRIANGLES) {
1287 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1288 LLVMBuildFAdd(ctx->ac.builder,
1289 coord[0], coord[1], ""), "");
1290 }
1291 return ac_build_gather_values(&ctx->ac, coord, 4);
1292 }
1293
1294 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1295 unsigned semantic_name)
1296 {
1297 LLVMValueRef base, addr;
1298
1299 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1300
1301 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1302 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1303 LLVMConstInt(ctx->i32, param, 0));
1304
1305 return buffer_load(ctx, ctx->f32,
1306 ~0, ctx->tess_offchip_ring, base, addr, true);
1307
1308 }
1309
1310 static LLVMValueRef load_tess_level_default(struct si_shader_context *ctx,
1311 unsigned semantic_name)
1312 {
1313 LLVMValueRef buf, slot, val[4];
1314 int i, offset;
1315
1316 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1317 buf = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1318 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
1319 offset = semantic_name == TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL ? 4 : 0;
1320
1321 for (i = 0; i < 4; i++)
1322 val[i] = si_buffer_load_const(ctx, buf,
1323 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1324 return ac_build_gather_values(&ctx->ac, val, 4);
1325 }
1326
1327 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1328 unsigned varying_id,
1329 bool load_default_state)
1330 {
1331 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1332 unsigned semantic_name;
1333
1334 if (load_default_state) {
1335 switch (varying_id) {
1336 case VARYING_SLOT_TESS_LEVEL_INNER:
1337 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL;
1338 break;
1339 case VARYING_SLOT_TESS_LEVEL_OUTER:
1340 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL;
1341 break;
1342 default:
1343 unreachable("unknown tess level");
1344 }
1345 return load_tess_level_default(ctx, semantic_name);
1346 }
1347
1348 switch (varying_id) {
1349 case VARYING_SLOT_TESS_LEVEL_INNER:
1350 semantic_name = TGSI_SEMANTIC_TESSINNER;
1351 break;
1352 case VARYING_SLOT_TESS_LEVEL_OUTER:
1353 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1354 break;
1355 default:
1356 unreachable("unknown tess level");
1357 }
1358
1359 return load_tess_level(ctx, semantic_name);
1360
1361 }
1362
1363 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
1364 {
1365 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1366 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1367 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 13, 6);
1368 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1369 return get_num_tcs_out_vertices(ctx);
1370 else
1371 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1372 }
1373
1374 void si_declare_compute_memory(struct si_shader_context *ctx)
1375 {
1376 struct si_shader_selector *sel = ctx->shader->selector;
1377 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
1378
1379 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
1380 LLVMValueRef var;
1381
1382 assert(!ctx->ac.lds);
1383
1384 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1385 LLVMArrayType(ctx->i8, lds_size),
1386 "compute_lds",
1387 AC_ADDR_SPACE_LDS);
1388 LLVMSetAlignment(var, 64 * 1024);
1389
1390 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
1391 }
1392
1393 static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *ctx)
1394 {
1395 LLVMValueRef ptr =
1396 ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1397 struct si_shader_selector *sel = ctx->shader->selector;
1398
1399 /* Do the bounds checking with a descriptor, because
1400 * doing computation and manual bounds checking of 64-bit
1401 * addresses generates horrible VALU code with very high
1402 * VGPR usage and very low SIMD occupancy.
1403 */
1404 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.intptr, "");
1405
1406 LLVMValueRef desc0, desc1;
1407 desc0 = ptr;
1408 desc1 = LLVMConstInt(ctx->i32,
1409 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
1410
1411 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1412 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1413 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1414 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1415
1416 if (ctx->screen->info.chip_class >= GFX10)
1417 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1418 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1419 S_008F0C_RESOURCE_LEVEL(1);
1420 else
1421 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1422 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1423
1424 LLVMValueRef desc_elems[] = {
1425 desc0,
1426 desc1,
1427 LLVMConstInt(ctx->i32, sel->info.constbuf0_num_slots * 16, 0),
1428 LLVMConstInt(ctx->i32, rsrc3, false)
1429 };
1430
1431 return ac_build_gather_values(&ctx->ac, desc_elems, 4);
1432 }
1433
1434 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
1435 {
1436 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1437 struct si_shader_selector *sel = ctx->shader->selector;
1438
1439 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1440
1441 if (sel->info.const_buffers_declared == 1 &&
1442 sel->info.shader_buffers_declared == 0) {
1443 return load_const_buffer_desc_fast_path(ctx);
1444 }
1445
1446 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
1447 index = LLVMBuildAdd(ctx->ac.builder, index,
1448 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1449
1450 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
1451 }
1452
1453 static LLVMValueRef
1454 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
1455 {
1456 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1457 LLVMValueRef rsrc_ptr = ac_get_arg(&ctx->ac,
1458 ctx->const_and_shader_buffers);
1459
1460 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
1461 index = LLVMBuildSub(ctx->ac.builder,
1462 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
1463 index, "");
1464
1465 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
1466 }
1467
1468 /* Initialize arguments for the shader export intrinsic */
1469 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx,
1470 LLVMValueRef *values,
1471 unsigned target,
1472 struct ac_export_args *args)
1473 {
1474 args->enabled_channels = 0xf; /* writemask - default is 0xf */
1475 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
1476 args->done = 0; /* Specify whether this is the last export */
1477 args->target = target; /* Specify the target we are exporting */
1478 args->compr = false;
1479
1480 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
1481 }
1482
1483 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
1484 struct ac_export_args *pos, LLVMValueRef *out_elts)
1485 {
1486 unsigned reg_index;
1487 unsigned chan;
1488 unsigned const_chan;
1489 LLVMValueRef base_elt;
1490 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1491 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
1492 SI_VS_CONST_CLIP_PLANES, 0);
1493 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
1494
1495 for (reg_index = 0; reg_index < 2; reg_index ++) {
1496 struct ac_export_args *args = &pos[2 + reg_index];
1497
1498 args->out[0] =
1499 args->out[1] =
1500 args->out[2] =
1501 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
1502
1503 /* Compute dot products of position and user clip plane vectors */
1504 for (chan = 0; chan < 4; chan++) {
1505 for (const_chan = 0; const_chan < 4; const_chan++) {
1506 LLVMValueRef addr =
1507 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
1508 const_chan) * 4, 0);
1509 base_elt = si_buffer_load_const(ctx, const_resource,
1510 addr);
1511 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
1512 out_elts[const_chan], args->out[chan]);
1513 }
1514 }
1515
1516 args->enabled_channels = 0xf;
1517 args->valid_mask = 0;
1518 args->done = 0;
1519 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
1520 args->compr = 0;
1521 }
1522 }
1523
1524 static void si_dump_streamout(struct pipe_stream_output_info *so)
1525 {
1526 unsigned i;
1527
1528 if (so->num_outputs)
1529 fprintf(stderr, "STREAMOUT\n");
1530
1531 for (i = 0; i < so->num_outputs; i++) {
1532 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
1533 so->output[i].start_component;
1534 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
1535 i, so->output[i].output_buffer,
1536 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
1537 so->output[i].register_index,
1538 mask & 1 ? "x" : "",
1539 mask & 2 ? "y" : "",
1540 mask & 4 ? "z" : "",
1541 mask & 8 ? "w" : "");
1542 }
1543 }
1544
1545 void si_emit_streamout_output(struct si_shader_context *ctx,
1546 LLVMValueRef const *so_buffers,
1547 LLVMValueRef const *so_write_offsets,
1548 struct pipe_stream_output *stream_out,
1549 struct si_shader_output_values *shader_out)
1550 {
1551 unsigned buf_idx = stream_out->output_buffer;
1552 unsigned start = stream_out->start_component;
1553 unsigned num_comps = stream_out->num_components;
1554 LLVMValueRef out[4];
1555
1556 assert(num_comps && num_comps <= 4);
1557 if (!num_comps || num_comps > 4)
1558 return;
1559
1560 /* Load the output as int. */
1561 for (int j = 0; j < num_comps; j++) {
1562 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
1563
1564 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
1565 }
1566
1567 /* Pack the output. */
1568 LLVMValueRef vdata = NULL;
1569
1570 switch (num_comps) {
1571 case 1: /* as i32 */
1572 vdata = out[0];
1573 break;
1574 case 2: /* as v2i32 */
1575 case 3: /* as v3i32 */
1576 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
1577 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
1578 break;
1579 }
1580 /* as v4i32 (aligned to 4) */
1581 out[3] = LLVMGetUndef(ctx->i32);
1582 /* fall through */
1583 case 4: /* as v4i32 */
1584 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
1585 break;
1586 }
1587
1588 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
1589 vdata, num_comps,
1590 so_write_offsets[buf_idx],
1591 ctx->i32_0,
1592 stream_out->dst_offset * 4, ac_glc | ac_slc);
1593 }
1594
1595 /**
1596 * Write streamout data to buffers for vertex stream @p stream (different
1597 * vertex streams can occur for GS copy shaders).
1598 */
1599 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
1600 struct si_shader_output_values *outputs,
1601 unsigned noutput, unsigned stream)
1602 {
1603 struct si_shader_selector *sel = ctx->shader->selector;
1604 struct pipe_stream_output_info *so = &sel->so;
1605 LLVMBuilderRef builder = ctx->ac.builder;
1606 int i;
1607
1608 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1609 LLVMValueRef so_vtx_count =
1610 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
1611
1612 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
1613
1614 /* can_emit = tid < so_vtx_count; */
1615 LLVMValueRef can_emit =
1616 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
1617
1618 /* Emit the streamout code conditionally. This actually avoids
1619 * out-of-bounds buffer access. The hw tells us via the SGPR
1620 * (so_vtx_count) which threads are allowed to emit streamout data. */
1621 ac_build_ifcc(&ctx->ac, can_emit, 6501);
1622 {
1623 /* The buffer offset is computed as follows:
1624 * ByteOffset = streamout_offset[buffer_id]*4 +
1625 * (streamout_write_index + thread_id)*stride[buffer_id] +
1626 * attrib_offset
1627 */
1628
1629 LLVMValueRef so_write_index =
1630 ac_get_arg(&ctx->ac,
1631 ctx->streamout_write_index);
1632
1633 /* Compute (streamout_write_index + thread_id). */
1634 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
1635
1636 /* Load the descriptor and compute the write offset for each
1637 * enabled buffer. */
1638 LLVMValueRef so_write_offset[4] = {};
1639 LLVMValueRef so_buffers[4];
1640 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
1641 ctx->rw_buffers);
1642
1643 for (i = 0; i < 4; i++) {
1644 if (!so->stride[i])
1645 continue;
1646
1647 LLVMValueRef offset = LLVMConstInt(ctx->i32,
1648 SI_VS_STREAMOUT_BUF0 + i, 0);
1649
1650 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
1651
1652 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
1653 ctx->streamout_offset[i]);
1654 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
1655
1656 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
1657 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
1658 so_offset);
1659 }
1660
1661 /* Write streamout data. */
1662 for (i = 0; i < so->num_outputs; i++) {
1663 unsigned reg = so->output[i].register_index;
1664
1665 if (reg >= noutput)
1666 continue;
1667
1668 if (stream != so->output[i].stream)
1669 continue;
1670
1671 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
1672 &so->output[i], &outputs[reg]);
1673 }
1674 }
1675 ac_build_endif(&ctx->ac, 6501);
1676 }
1677
1678 static void si_export_param(struct si_shader_context *ctx, unsigned index,
1679 LLVMValueRef *values)
1680 {
1681 struct ac_export_args args;
1682
1683 si_llvm_init_vs_export_args(ctx, values,
1684 V_008DFC_SQ_EXP_PARAM + index, &args);
1685 ac_build_export(&ctx->ac, &args);
1686 }
1687
1688 static void si_build_param_exports(struct si_shader_context *ctx,
1689 struct si_shader_output_values *outputs,
1690 unsigned noutput)
1691 {
1692 struct si_shader *shader = ctx->shader;
1693 unsigned param_count = 0;
1694
1695 for (unsigned i = 0; i < noutput; i++) {
1696 unsigned semantic_name = outputs[i].semantic_name;
1697 unsigned semantic_index = outputs[i].semantic_index;
1698
1699 if (outputs[i].vertex_stream[0] != 0 &&
1700 outputs[i].vertex_stream[1] != 0 &&
1701 outputs[i].vertex_stream[2] != 0 &&
1702 outputs[i].vertex_stream[3] != 0)
1703 continue;
1704
1705 switch (semantic_name) {
1706 case TGSI_SEMANTIC_LAYER:
1707 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1708 case TGSI_SEMANTIC_CLIPDIST:
1709 case TGSI_SEMANTIC_COLOR:
1710 case TGSI_SEMANTIC_BCOLOR:
1711 case TGSI_SEMANTIC_PRIMID:
1712 case TGSI_SEMANTIC_FOG:
1713 case TGSI_SEMANTIC_TEXCOORD:
1714 case TGSI_SEMANTIC_GENERIC:
1715 break;
1716 default:
1717 continue;
1718 }
1719
1720 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
1721 semantic_index < SI_MAX_IO_GENERIC) &&
1722 shader->key.opt.kill_outputs &
1723 (1ull << si_shader_io_get_unique_index(semantic_name,
1724 semantic_index, true)))
1725 continue;
1726
1727 si_export_param(ctx, param_count, outputs[i].values);
1728
1729 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
1730 shader->info.vs_output_param_offset[i] = param_count++;
1731 }
1732
1733 shader->info.nr_param_exports = param_count;
1734 }
1735
1736 /**
1737 * Vertex color clamping.
1738 *
1739 * This uses a state constant loaded in a user data SGPR and
1740 * an IF statement is added that clamps all colors if the constant
1741 * is true.
1742 */
1743 static void si_vertex_color_clamping(struct si_shader_context *ctx,
1744 struct si_shader_output_values *outputs,
1745 unsigned noutput)
1746 {
1747 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
1748 bool has_colors = false;
1749
1750 /* Store original colors to alloca variables. */
1751 for (unsigned i = 0; i < noutput; i++) {
1752 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1753 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1754 continue;
1755
1756 for (unsigned j = 0; j < 4; j++) {
1757 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->f32, "");
1758 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
1759 }
1760 has_colors = true;
1761 }
1762
1763 if (!has_colors)
1764 return;
1765
1766 /* The state is in the first bit of the user SGPR. */
1767 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
1768 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
1769
1770 ac_build_ifcc(&ctx->ac, cond, 6502);
1771
1772 /* Store clamped colors to alloca variables within the conditional block. */
1773 for (unsigned i = 0; i < noutput; i++) {
1774 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1775 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1776 continue;
1777
1778 for (unsigned j = 0; j < 4; j++) {
1779 LLVMBuildStore(ctx->ac.builder,
1780 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
1781 addr[i][j]);
1782 }
1783 }
1784 ac_build_endif(&ctx->ac, 6502);
1785
1786 /* Load clamped colors */
1787 for (unsigned i = 0; i < noutput; i++) {
1788 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1789 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1790 continue;
1791
1792 for (unsigned j = 0; j < 4; j++) {
1793 outputs[i].values[j] =
1794 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
1795 }
1796 }
1797 }
1798
1799 /* Generate export instructions for hardware VS shader stage or NGG GS stage
1800 * (position and parameter data only).
1801 */
1802 void si_llvm_export_vs(struct si_shader_context *ctx,
1803 struct si_shader_output_values *outputs,
1804 unsigned noutput)
1805 {
1806 struct si_shader *shader = ctx->shader;
1807 struct ac_export_args pos_args[4] = {};
1808 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
1809 unsigned pos_idx;
1810 int i;
1811
1812 si_vertex_color_clamping(ctx, outputs, noutput);
1813
1814 /* Build position exports. */
1815 for (i = 0; i < noutput; i++) {
1816 switch (outputs[i].semantic_name) {
1817 case TGSI_SEMANTIC_POSITION:
1818 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1819 V_008DFC_SQ_EXP_POS, &pos_args[0]);
1820 break;
1821 case TGSI_SEMANTIC_PSIZE:
1822 psize_value = outputs[i].values[0];
1823 break;
1824 case TGSI_SEMANTIC_LAYER:
1825 layer_value = outputs[i].values[0];
1826 break;
1827 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1828 viewport_index_value = outputs[i].values[0];
1829 break;
1830 case TGSI_SEMANTIC_EDGEFLAG:
1831 edgeflag_value = outputs[i].values[0];
1832 break;
1833 case TGSI_SEMANTIC_CLIPDIST:
1834 if (!shader->key.opt.clip_disable) {
1835 unsigned index = 2 + outputs[i].semantic_index;
1836 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1837 V_008DFC_SQ_EXP_POS + index,
1838 &pos_args[index]);
1839 }
1840 break;
1841 case TGSI_SEMANTIC_CLIPVERTEX:
1842 if (!shader->key.opt.clip_disable) {
1843 si_llvm_emit_clipvertex(ctx, pos_args,
1844 outputs[i].values);
1845 }
1846 break;
1847 }
1848 }
1849
1850 /* We need to add the position output manually if it's missing. */
1851 if (!pos_args[0].out[0]) {
1852 pos_args[0].enabled_channels = 0xf; /* writemask */
1853 pos_args[0].valid_mask = 0; /* EXEC mask */
1854 pos_args[0].done = 0; /* last export? */
1855 pos_args[0].target = V_008DFC_SQ_EXP_POS;
1856 pos_args[0].compr = 0; /* COMPR flag */
1857 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
1858 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
1859 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
1860 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
1861 }
1862
1863 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
1864 !shader->key.as_ngg;
1865
1866 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1867 if (shader->selector->info.writes_psize ||
1868 pos_writes_edgeflag ||
1869 shader->selector->info.writes_viewport_index ||
1870 shader->selector->info.writes_layer) {
1871 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
1872 (pos_writes_edgeflag << 1) |
1873 (shader->selector->info.writes_layer << 2);
1874
1875 pos_args[1].valid_mask = 0; /* EXEC mask */
1876 pos_args[1].done = 0; /* last export? */
1877 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
1878 pos_args[1].compr = 0; /* COMPR flag */
1879 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
1880 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
1881 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
1882 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
1883
1884 if (shader->selector->info.writes_psize)
1885 pos_args[1].out[0] = psize_value;
1886
1887 if (pos_writes_edgeflag) {
1888 /* The output is a float, but the hw expects an integer
1889 * with the first bit containing the edge flag. */
1890 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
1891 edgeflag_value,
1892 ctx->i32, "");
1893 edgeflag_value = ac_build_umin(&ctx->ac,
1894 edgeflag_value,
1895 ctx->i32_1);
1896
1897 /* The LLVM intrinsic expects a float. */
1898 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
1899 }
1900
1901 if (ctx->screen->info.chip_class >= GFX9) {
1902 /* GFX9 has the layer in out.z[10:0] and the viewport
1903 * index in out.z[19:16].
1904 */
1905 if (shader->selector->info.writes_layer)
1906 pos_args[1].out[2] = layer_value;
1907
1908 if (shader->selector->info.writes_viewport_index) {
1909 LLVMValueRef v = viewport_index_value;
1910
1911 v = ac_to_integer(&ctx->ac, v);
1912 v = LLVMBuildShl(ctx->ac.builder, v,
1913 LLVMConstInt(ctx->i32, 16, 0), "");
1914 v = LLVMBuildOr(ctx->ac.builder, v,
1915 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
1916 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
1917 pos_args[1].enabled_channels |= 1 << 2;
1918 }
1919 } else {
1920 if (shader->selector->info.writes_layer)
1921 pos_args[1].out[2] = layer_value;
1922
1923 if (shader->selector->info.writes_viewport_index) {
1924 pos_args[1].out[3] = viewport_index_value;
1925 pos_args[1].enabled_channels |= 1 << 3;
1926 }
1927 }
1928 }
1929
1930 for (i = 0; i < 4; i++)
1931 if (pos_args[i].out[0])
1932 shader->info.nr_pos_exports++;
1933
1934 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
1935 * Setting valid_mask=1 prevents it and has no other effect.
1936 */
1937 if (ctx->screen->info.family == CHIP_NAVI10 ||
1938 ctx->screen->info.family == CHIP_NAVI12 ||
1939 ctx->screen->info.family == CHIP_NAVI14)
1940 pos_args[0].valid_mask = 1;
1941
1942 pos_idx = 0;
1943 for (i = 0; i < 4; i++) {
1944 if (!pos_args[i].out[0])
1945 continue;
1946
1947 /* Specify the target we are exporting */
1948 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
1949
1950 if (pos_idx == shader->info.nr_pos_exports)
1951 /* Specify that this is the last export */
1952 pos_args[i].done = 1;
1953
1954 ac_build_export(&ctx->ac, &pos_args[i]);
1955 }
1956
1957 /* Build parameter exports. */
1958 si_build_param_exports(ctx, outputs, noutput);
1959 }
1960
1961 /**
1962 * Forward all outputs from the vertex shader to the TES. This is only used
1963 * for the fixed function TCS.
1964 */
1965 static void si_copy_tcs_inputs(struct si_shader_context *ctx)
1966 {
1967 LLVMValueRef invocation_id, buffer, buffer_offset;
1968 LLVMValueRef lds_vertex_stride, lds_base;
1969 uint64_t inputs;
1970
1971 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
1972 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1973 buffer_offset = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1974
1975 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
1976 lds_base = get_tcs_in_current_patch_offset(ctx);
1977 lds_base = ac_build_imad(&ctx->ac, invocation_id, lds_vertex_stride,
1978 lds_base);
1979
1980 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
1981 while (inputs) {
1982 unsigned i = u_bit_scan64(&inputs);
1983
1984 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
1985 LLVMConstInt(ctx->i32, 4 * i, 0),
1986 "");
1987
1988 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
1989 get_rel_patch_id(ctx),
1990 invocation_id,
1991 LLVMConstInt(ctx->i32, i, 0));
1992
1993 LLVMValueRef value = lshs_lds_load(ctx, ctx->ac.i32, ~0, lds_ptr);
1994
1995 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
1996 buffer_offset, 0, ac_glc);
1997 }
1998 }
1999
2000 static void si_write_tess_factors(struct si_shader_context *ctx,
2001 LLVMValueRef rel_patch_id,
2002 LLVMValueRef invocation_id,
2003 LLVMValueRef tcs_out_current_patch_data_offset,
2004 LLVMValueRef invoc0_tf_outer[4],
2005 LLVMValueRef invoc0_tf_inner[2])
2006 {
2007 struct si_shader *shader = ctx->shader;
2008 unsigned tess_inner_index, tess_outer_index;
2009 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2010 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2011 unsigned stride, outer_comps, inner_comps, i, offset;
2012
2013 /* Add a barrier before loading tess factors from LDS. */
2014 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
2015 si_llvm_emit_barrier(ctx);
2016
2017 /* Do this only for invocation 0, because the tess levels are per-patch,
2018 * not per-vertex.
2019 *
2020 * This can't jump, because invocation 0 executes this. It should
2021 * at least mask out the loads and stores for other invocations.
2022 */
2023 ac_build_ifcc(&ctx->ac,
2024 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2025 invocation_id, ctx->i32_0, ""), 6503);
2026
2027 /* Determine the layout of one tess factor element in the buffer. */
2028 switch (shader->key.part.tcs.epilog.prim_mode) {
2029 case PIPE_PRIM_LINES:
2030 stride = 2; /* 2 dwords, 1 vec2 store */
2031 outer_comps = 2;
2032 inner_comps = 0;
2033 break;
2034 case PIPE_PRIM_TRIANGLES:
2035 stride = 4; /* 4 dwords, 1 vec4 store */
2036 outer_comps = 3;
2037 inner_comps = 1;
2038 break;
2039 case PIPE_PRIM_QUADS:
2040 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2041 outer_comps = 4;
2042 inner_comps = 2;
2043 break;
2044 default:
2045 assert(0);
2046 return;
2047 }
2048
2049 for (i = 0; i < 4; i++) {
2050 inner[i] = LLVMGetUndef(ctx->i32);
2051 outer[i] = LLVMGetUndef(ctx->i32);
2052 }
2053
2054 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
2055 /* Tess factors are in VGPRs. */
2056 for (i = 0; i < outer_comps; i++)
2057 outer[i] = out[i] = invoc0_tf_outer[i];
2058 for (i = 0; i < inner_comps; i++)
2059 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
2060 } else {
2061 /* Load tess_inner and tess_outer from LDS.
2062 * Any invocation can write them, so we can't get them from a temporary.
2063 */
2064 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2065 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2066
2067 lds_base = tcs_out_current_patch_data_offset;
2068 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2069 LLVMConstInt(ctx->i32,
2070 tess_inner_index * 4, 0), "");
2071 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2072 LLVMConstInt(ctx->i32,
2073 tess_outer_index * 4, 0), "");
2074
2075 for (i = 0; i < outer_comps; i++) {
2076 outer[i] = out[i] =
2077 lshs_lds_load(ctx, ctx->ac.i32, i, lds_outer);
2078 }
2079 for (i = 0; i < inner_comps; i++) {
2080 inner[i] = out[outer_comps+i] =
2081 lshs_lds_load(ctx, ctx->ac.i32, i, lds_inner);
2082 }
2083 }
2084
2085 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2086 /* For isolines, the hardware expects tess factors in the
2087 * reverse order from what NIR specifies.
2088 */
2089 LLVMValueRef tmp = out[0];
2090 out[0] = out[1];
2091 out[1] = tmp;
2092 }
2093
2094 /* Convert the outputs to vectors for stores. */
2095 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2096 vec1 = NULL;
2097
2098 if (stride > 4)
2099 vec1 = ac_build_gather_values(&ctx->ac, out+4, stride - 4);
2100
2101 /* Get the buffer. */
2102 buffer = get_tess_ring_descriptor(ctx, TCS_FACTOR_RING);
2103
2104 /* Get the offset. */
2105 tf_base = ac_get_arg(&ctx->ac,
2106 ctx->tcs_factor_offset);
2107 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2108 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2109
2110 ac_build_ifcc(&ctx->ac,
2111 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2112 rel_patch_id, ctx->i32_0, ""), 6504);
2113
2114 /* Store the dynamic HS control word. */
2115 offset = 0;
2116 if (ctx->screen->info.chip_class <= GFX8) {
2117 ac_build_buffer_store_dword(&ctx->ac, buffer,
2118 LLVMConstInt(ctx->i32, 0x80000000, 0),
2119 1, ctx->i32_0, tf_base,
2120 offset, ac_glc);
2121 offset += 4;
2122 }
2123
2124 ac_build_endif(&ctx->ac, 6504);
2125
2126 /* Store the tessellation factors. */
2127 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2128 MIN2(stride, 4), byteoffset, tf_base,
2129 offset, ac_glc);
2130 offset += 16;
2131 if (vec1)
2132 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2133 stride - 4, byteoffset, tf_base,
2134 offset, ac_glc);
2135
2136 /* Store the tess factors into the offchip buffer if TES reads them. */
2137 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2138 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2139 LLVMValueRef tf_inner_offset;
2140 unsigned param_outer, param_inner;
2141
2142 buf = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
2143 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
2144
2145 param_outer = si_shader_io_get_unique_index_patch(
2146 TGSI_SEMANTIC_TESSOUTER, 0);
2147 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2148 LLVMConstInt(ctx->i32, param_outer, 0));
2149
2150 unsigned outer_vec_size =
2151 ac_has_vec3_support(ctx->screen->info.chip_class, false) ?
2152 outer_comps : util_next_power_of_two(outer_comps);
2153 outer_vec = ac_build_gather_values(&ctx->ac, outer, outer_vec_size);
2154
2155 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2156 outer_comps, tf_outer_offset,
2157 base, 0, ac_glc);
2158 if (inner_comps) {
2159 param_inner = si_shader_io_get_unique_index_patch(
2160 TGSI_SEMANTIC_TESSINNER, 0);
2161 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2162 LLVMConstInt(ctx->i32, param_inner, 0));
2163
2164 inner_vec = inner_comps == 1 ? inner[0] :
2165 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2166 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2167 inner_comps, tf_inner_offset,
2168 base, 0, ac_glc);
2169 }
2170 }
2171
2172 ac_build_endif(&ctx->ac, 6503);
2173 }
2174
2175 static LLVMValueRef
2176 si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2177 struct ac_arg param, unsigned return_index)
2178 {
2179 return LLVMBuildInsertValue(ctx->ac.builder, ret,
2180 ac_get_arg(&ctx->ac, param),
2181 return_index, "");
2182 }
2183
2184 static LLVMValueRef
2185 si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2186 struct ac_arg param, unsigned return_index)
2187 {
2188 LLVMBuilderRef builder = ctx->ac.builder;
2189 LLVMValueRef p = ac_get_arg(&ctx->ac, param);
2190
2191 return LLVMBuildInsertValue(builder, ret,
2192 ac_to_float(&ctx->ac, p),
2193 return_index, "");
2194 }
2195
2196 static LLVMValueRef
2197 si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
2198 struct ac_arg param, unsigned return_index)
2199 {
2200 LLVMBuilderRef builder = ctx->ac.builder;
2201 LLVMValueRef ptr = ac_get_arg(&ctx->ac, param);
2202 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i32, "");
2203 return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
2204 }
2205
2206 /* This only writes the tessellation factor levels. */
2207 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
2208 unsigned max_outputs,
2209 LLVMValueRef *addrs)
2210 {
2211 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2212 LLVMBuilderRef builder = ctx->ac.builder;
2213 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2214
2215 si_copy_tcs_inputs(ctx);
2216
2217 rel_patch_id = get_rel_patch_id(ctx);
2218 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
2219 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2220
2221 if (ctx->screen->info.chip_class >= GFX9) {
2222 LLVMBasicBlockRef blocks[2] = {
2223 LLVMGetInsertBlock(builder),
2224 ctx->merged_wrap_if_entry_block
2225 };
2226 LLVMValueRef values[2];
2227
2228 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2229
2230 values[0] = rel_patch_id;
2231 values[1] = LLVMGetUndef(ctx->i32);
2232 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2233
2234 values[0] = tf_lds_offset;
2235 values[1] = LLVMGetUndef(ctx->i32);
2236 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2237
2238 values[0] = invocation_id;
2239 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
2240 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2241 }
2242
2243 /* Return epilog parameters from this function. */
2244 LLVMValueRef ret = ctx->return_value;
2245 unsigned vgpr;
2246
2247 if (ctx->screen->info.chip_class >= GFX9) {
2248 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2249 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2250 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2251 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2252 /* Tess offchip and tess factor offsets are at the beginning. */
2253 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2254 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2255 vgpr = 8 + GFX9_SGPR_TCS_OUT_LAYOUT + 1;
2256 } else {
2257 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2258 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2259 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2260 GFX6_SGPR_TCS_OUT_LAYOUT);
2261 /* Tess offchip and tess factor offsets are after user SGPRs. */
2262 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset,
2263 GFX6_TCS_NUM_USER_SGPR);
2264 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset,
2265 GFX6_TCS_NUM_USER_SGPR + 1);
2266 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2267 }
2268
2269 /* VGPRs */
2270 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
2271 invocation_id = ac_to_float(&ctx->ac, invocation_id);
2272 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
2273
2274 /* Leave a hole corresponding to the two input VGPRs. This ensures that
2275 * the invocation_id output does not alias the tcs_rel_ids input,
2276 * which saves a V_MOV on gfx9.
2277 */
2278 vgpr += 2;
2279
2280 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2281 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2282
2283 if (ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
2284 vgpr++; /* skip the tess factor LDS offset */
2285 for (unsigned i = 0; i < 6; i++) {
2286 LLVMValueRef value =
2287 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
2288 value = ac_to_float(&ctx->ac, value);
2289 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
2290 }
2291 } else {
2292 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2293 }
2294 ctx->return_value = ret;
2295 }
2296
2297 /* Pass TCS inputs from LS to TCS on GFX9. */
2298 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2299 {
2300 LLVMValueRef ret = ctx->return_value;
2301
2302 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2303 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2304 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2305 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2306 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2307 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2308
2309 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2310 8 + SI_SGPR_RW_BUFFERS);
2311 ret = si_insert_input_ptr(ctx, ret,
2312 ctx->bindless_samplers_and_images,
2313 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2314
2315 ret = si_insert_input_ret(ctx, ret, ctx->vs_state_bits,
2316 8 + SI_SGPR_VS_STATE_BITS);
2317
2318 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2319 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2320 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_offsets,
2321 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2322 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2323 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2324
2325 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2326 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2327 ac_to_float(&ctx->ac,
2328 ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id)),
2329 vgpr++, "");
2330 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2331 ac_to_float(&ctx->ac,
2332 ac_get_arg(&ctx->ac, ctx->args.tcs_rel_ids)),
2333 vgpr++, "");
2334 ctx->return_value = ret;
2335 }
2336
2337 /* Pass GS inputs from ES to GS on GFX9. */
2338 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2339 {
2340 LLVMValueRef ret = ctx->return_value;
2341
2342 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2343 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2344 if (ctx->shader->key.as_ngg)
2345 ret = si_insert_input_ptr(ctx, ret, ctx->gs_tg_info, 2);
2346 else
2347 ret = si_insert_input_ret(ctx, ret, ctx->gs2vs_offset, 2);
2348 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2349 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2350
2351 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2352 8 + SI_SGPR_RW_BUFFERS);
2353 ret = si_insert_input_ptr(ctx, ret,
2354 ctx->bindless_samplers_and_images,
2355 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2356 if (ctx->screen->use_ngg) {
2357 ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits,
2358 8 + SI_SGPR_VS_STATE_BITS);
2359 }
2360
2361 unsigned vgpr;
2362 if (ctx->type == PIPE_SHADER_VERTEX)
2363 vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR;
2364 else
2365 vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
2366
2367 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx01_offset, vgpr++);
2368 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx23_offset, vgpr++);
2369 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
2370 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
2371 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx45_offset, vgpr++);
2372 ctx->return_value = ret;
2373 }
2374
2375 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
2376 unsigned max_outputs,
2377 LLVMValueRef *addrs)
2378 {
2379 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2380 struct si_shader *shader = ctx->shader;
2381 struct si_shader_info *info = &shader->selector->info;
2382 unsigned i, chan;
2383 LLVMValueRef vertex_id = ac_get_arg(&ctx->ac, ctx->rel_auto_id);
2384 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
2385 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2386 vertex_dw_stride, "");
2387
2388 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2389 * its inputs from it. */
2390 for (i = 0; i < info->num_outputs; i++) {
2391 unsigned name = info->output_semantic_name[i];
2392 unsigned index = info->output_semantic_index[i];
2393
2394 /* The ARB_shader_viewport_layer_array spec contains the
2395 * following issue:
2396 *
2397 * 2) What happens if gl_ViewportIndex or gl_Layer is
2398 * written in the vertex shader and a geometry shader is
2399 * present?
2400 *
2401 * RESOLVED: The value written by the last vertex processing
2402 * stage is used. If the last vertex processing stage
2403 * (vertex, tessellation evaluation or geometry) does not
2404 * statically assign to gl_ViewportIndex or gl_Layer, index
2405 * or layer zero is assumed.
2406 *
2407 * So writes to those outputs in VS-as-LS are simply ignored.
2408 */
2409 if (name == TGSI_SEMANTIC_LAYER ||
2410 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2411 continue;
2412
2413 int param = si_shader_io_get_unique_index(name, index, false);
2414 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2415 LLVMConstInt(ctx->i32, param * 4, 0), "");
2416
2417 for (chan = 0; chan < 4; chan++) {
2418 if (!(info->output_usagemask[i] & (1 << chan)))
2419 continue;
2420
2421 lshs_lds_store(ctx, chan, dw_addr,
2422 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
2423 }
2424 }
2425
2426 if (ctx->screen->info.chip_class >= GFX9)
2427 si_set_ls_return_value_for_tcs(ctx);
2428 }
2429
2430 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
2431 unsigned max_outputs,
2432 LLVMValueRef *addrs)
2433 {
2434 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2435 struct si_shader *es = ctx->shader;
2436 struct si_shader_info *info = &es->selector->info;
2437 LLVMValueRef lds_base = NULL;
2438 unsigned chan;
2439 int i;
2440
2441 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
2442 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2443 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2444 LLVMValueRef wave_idx = si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
2445 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2446 LLVMBuildMul(ctx->ac.builder, wave_idx,
2447 LLVMConstInt(ctx->i32, ctx->ac.wave_size, false), ""), "");
2448 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2449 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2450 }
2451
2452 for (i = 0; i < info->num_outputs; i++) {
2453 int param;
2454
2455 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2456 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2457 continue;
2458
2459 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2460 info->output_semantic_index[i], false);
2461
2462 for (chan = 0; chan < 4; chan++) {
2463 if (!(info->output_usagemask[i] & (1 << chan)))
2464 continue;
2465
2466 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2467 out_val = ac_to_integer(&ctx->ac, out_val);
2468
2469 /* GFX9 has the ESGS ring in LDS. */
2470 if (ctx->screen->info.chip_class >= GFX9) {
2471 LLVMValueRef idx = LLVMConstInt(ctx->i32, param * 4 + chan, false);
2472 idx = LLVMBuildAdd(ctx->ac.builder, lds_base, idx, "");
2473 ac_build_indexed_store(&ctx->ac, ctx->esgs_ring, idx, out_val);
2474 continue;
2475 }
2476
2477 ac_build_buffer_store_dword(&ctx->ac,
2478 ctx->esgs_ring,
2479 out_val, 1, NULL,
2480 ac_get_arg(&ctx->ac, ctx->es2gs_offset),
2481 (4 * param + chan) * 4,
2482 ac_glc | ac_slc | ac_swizzled);
2483 }
2484 }
2485
2486 if (ctx->screen->info.chip_class >= GFX9)
2487 si_set_es_return_value_for_gs(ctx);
2488 }
2489
2490 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2491 {
2492 if (ctx->screen->info.chip_class >= GFX9)
2493 return si_unpack_param(ctx, ctx->merged_wave_info, 16, 8);
2494 else
2495 return ac_get_arg(&ctx->ac, ctx->gs_wave_id);
2496 }
2497
2498 static void emit_gs_epilogue(struct si_shader_context *ctx)
2499 {
2500 if (ctx->shader->key.as_ngg) {
2501 gfx10_ngg_gs_emit_epilogue(ctx);
2502 return;
2503 }
2504
2505 if (ctx->screen->info.chip_class >= GFX10)
2506 LLVMBuildFence(ctx->ac.builder, LLVMAtomicOrderingRelease, false, "");
2507
2508 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2509 si_get_gs_wave_id(ctx));
2510
2511 if (ctx->screen->info.chip_class >= GFX9)
2512 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2513 }
2514
2515 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
2516 unsigned max_outputs,
2517 LLVMValueRef *addrs)
2518 {
2519 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2520 struct si_shader_info UNUSED *info = &ctx->shader->selector->info;
2521
2522 assert(info->num_outputs <= max_outputs);
2523
2524 emit_gs_epilogue(ctx);
2525 }
2526
2527 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
2528 unsigned max_outputs,
2529 LLVMValueRef *addrs)
2530 {
2531 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2532 struct si_shader_info *info = &ctx->shader->selector->info;
2533 struct si_shader_output_values *outputs = NULL;
2534 int i,j;
2535
2536 assert(!ctx->shader->is_gs_copy_shader);
2537 assert(info->num_outputs <= max_outputs);
2538
2539 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2540
2541 for (i = 0; i < info->num_outputs; i++) {
2542 outputs[i].semantic_name = info->output_semantic_name[i];
2543 outputs[i].semantic_index = info->output_semantic_index[i];
2544
2545 for (j = 0; j < 4; j++) {
2546 outputs[i].values[j] =
2547 LLVMBuildLoad(ctx->ac.builder,
2548 addrs[4 * i + j],
2549 "");
2550 outputs[i].vertex_stream[j] =
2551 (info->output_streams[i] >> (2 * j)) & 3;
2552 }
2553 }
2554
2555 if (!ctx->screen->use_ngg_streamout &&
2556 ctx->shader->selector->so.num_outputs)
2557 si_llvm_emit_streamout(ctx, outputs, i, 0);
2558
2559 /* Export PrimitiveID. */
2560 if (ctx->shader->key.mono.u.vs_export_prim_id) {
2561 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
2562 outputs[i].semantic_index = 0;
2563 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
2564 for (j = 1; j < 4; j++)
2565 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
2566
2567 memset(outputs[i].vertex_stream, 0,
2568 sizeof(outputs[i].vertex_stream));
2569 i++;
2570 }
2571
2572 si_llvm_export_vs(ctx, outputs, i);
2573 FREE(outputs);
2574 }
2575
2576 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
2577 unsigned max_outputs,
2578 LLVMValueRef *addrs)
2579 {
2580 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2581 struct si_shader_info *info = &ctx->shader->selector->info;
2582 LLVMValueRef pos[4] = {};
2583
2584 assert(info->num_outputs <= max_outputs);
2585
2586 for (unsigned i = 0; i < info->num_outputs; i++) {
2587 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
2588 continue;
2589
2590 for (unsigned chan = 0; chan < 4; chan++)
2591 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2592 break;
2593 }
2594 assert(pos[0] != NULL);
2595
2596 /* Return the position output. */
2597 LLVMValueRef ret = ctx->return_value;
2598 for (unsigned chan = 0; chan < 4; chan++)
2599 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
2600 ctx->return_value = ret;
2601 }
2602
2603 /* Emit one vertex from the geometry shader */
2604 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
2605 unsigned stream,
2606 LLVMValueRef *addrs)
2607 {
2608 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2609
2610 if (ctx->shader->key.as_ngg) {
2611 gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
2612 return;
2613 }
2614
2615 struct si_shader_info *info = &ctx->shader->selector->info;
2616 struct si_shader *shader = ctx->shader;
2617 LLVMValueRef soffset = ac_get_arg(&ctx->ac, ctx->gs2vs_offset);
2618 LLVMValueRef gs_next_vertex;
2619 LLVMValueRef can_emit;
2620 unsigned chan, offset;
2621 int i;
2622
2623 /* Write vertex attribute values to GSVS ring */
2624 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
2625 ctx->gs_next_vertex[stream],
2626 "");
2627
2628 /* If this thread has already emitted the declared maximum number of
2629 * vertices, skip the write: excessive vertex emissions are not
2630 * supposed to have any effect.
2631 *
2632 * If the shader has no writes to memory, kill it instead. This skips
2633 * further memory loads and may allow LLVM to skip to the end
2634 * altogether.
2635 */
2636 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
2637 LLVMConstInt(ctx->i32,
2638 shader->selector->gs_max_out_vertices, 0), "");
2639
2640 bool use_kill = !info->writes_memory;
2641 if (use_kill) {
2642 ac_build_kill_if_false(&ctx->ac, can_emit);
2643 } else {
2644 ac_build_ifcc(&ctx->ac, can_emit, 6505);
2645 }
2646
2647 offset = 0;
2648 for (i = 0; i < info->num_outputs; i++) {
2649 for (chan = 0; chan < 4; chan++) {
2650 if (!(info->output_usagemask[i] & (1 << chan)) ||
2651 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
2652 continue;
2653
2654 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2655 LLVMValueRef voffset =
2656 LLVMConstInt(ctx->i32, offset *
2657 shader->selector->gs_max_out_vertices, 0);
2658 offset++;
2659
2660 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
2661 voffset = LLVMBuildMul(ctx->ac.builder, voffset,
2662 LLVMConstInt(ctx->i32, 4, 0), "");
2663
2664 out_val = ac_to_integer(&ctx->ac, out_val);
2665
2666 ac_build_buffer_store_dword(&ctx->ac,
2667 ctx->gsvs_ring[stream],
2668 out_val, 1,
2669 voffset, soffset, 0,
2670 ac_glc | ac_slc | ac_swizzled);
2671 }
2672 }
2673
2674 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex, ctx->i32_1, "");
2675 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
2676
2677 /* Signal vertex emission if vertex data was written. */
2678 if (offset) {
2679 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
2680 si_get_gs_wave_id(ctx));
2681 }
2682
2683 if (!use_kill)
2684 ac_build_endif(&ctx->ac, 6505);
2685 }
2686
2687 /* Cut one primitive from the geometry shader */
2688 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
2689 unsigned stream)
2690 {
2691 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2692
2693 if (ctx->shader->key.as_ngg) {
2694 LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
2695 return;
2696 }
2697
2698 /* Signal primitive cut */
2699 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
2700 si_get_gs_wave_id(ctx));
2701 }
2702
2703 static void si_llvm_emit_barrier(struct si_shader_context *ctx)
2704 {
2705 /* GFX6 only (thanks to a hw bug workaround):
2706 * The real barrier instruction isn’t needed, because an entire patch
2707 * always fits into a single wave.
2708 */
2709 if (ctx->screen->info.chip_class == GFX6 &&
2710 ctx->type == PIPE_SHADER_TESS_CTRL) {
2711 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
2712 return;
2713 }
2714
2715 ac_build_s_barrier(&ctx->ac);
2716 }
2717
2718 static void declare_streamout_params(struct si_shader_context *ctx,
2719 struct pipe_stream_output_info *so)
2720 {
2721 if (ctx->screen->use_ngg_streamout) {
2722 if (ctx->type == PIPE_SHADER_TESS_EVAL)
2723 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2724 return;
2725 }
2726
2727 /* Streamout SGPRs. */
2728 if (so->num_outputs) {
2729 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
2730 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
2731 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
2732 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2733 }
2734
2735 /* A streamout buffer offset is loaded if the stride is non-zero. */
2736 for (int i = 0; i < 4; i++) {
2737 if (!so->stride[i])
2738 continue;
2739
2740 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
2741 }
2742 }
2743
2744 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
2745 {
2746 switch (shader->selector->type) {
2747 case PIPE_SHADER_VERTEX:
2748 case PIPE_SHADER_TESS_EVAL:
2749 return shader->key.as_ngg ? 128 : 0;
2750
2751 case PIPE_SHADER_TESS_CTRL:
2752 /* Return this so that LLVM doesn't remove s_barrier
2753 * instructions on chips where we use s_barrier. */
2754 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
2755
2756 case PIPE_SHADER_GEOMETRY:
2757 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
2758
2759 case PIPE_SHADER_COMPUTE:
2760 break; /* see below */
2761
2762 default:
2763 return 0;
2764 }
2765
2766 const unsigned *properties = shader->selector->info.properties;
2767 unsigned max_work_group_size =
2768 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
2769 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
2770 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
2771
2772 if (!max_work_group_size) {
2773 /* This is a variable group size compute shader,
2774 * compile it for the maximum possible group size.
2775 */
2776 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
2777 }
2778 return max_work_group_size;
2779 }
2780
2781 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
2782 bool assign_params)
2783 {
2784 enum ac_arg_type const_shader_buf_type;
2785
2786 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
2787 ctx->shader->selector->info.shader_buffers_declared == 0)
2788 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
2789 else
2790 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
2791
2792 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
2793 assign_params ? &ctx->const_and_shader_buffers :
2794 &ctx->other_const_and_shader_buffers);
2795 }
2796
2797 static void declare_samplers_and_images(struct si_shader_context *ctx,
2798 bool assign_params)
2799 {
2800 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2801 assign_params ? &ctx->samplers_and_images :
2802 &ctx->other_samplers_and_images);
2803 }
2804
2805 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
2806 bool assign_params)
2807 {
2808 declare_const_and_shader_buffers(ctx, assign_params);
2809 declare_samplers_and_images(ctx, assign_params);
2810 }
2811
2812 static void declare_global_desc_pointers(struct si_shader_context *ctx)
2813 {
2814 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
2815 &ctx->rw_buffers);
2816 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2817 &ctx->bindless_samplers_and_images);
2818 }
2819
2820 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
2821 {
2822 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
2823 if (!ctx->shader->is_gs_copy_shader) {
2824 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
2825 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
2826 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
2827 }
2828 }
2829
2830 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
2831 {
2832 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
2833
2834 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
2835 if (num_vbos_in_user_sgprs) {
2836 unsigned user_sgprs = ctx->args.num_sgprs_used;
2837
2838 if (si_is_merged_shader(ctx))
2839 user_sgprs -= 8;
2840 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
2841
2842 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
2843 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
2844 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
2845
2846 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
2847 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
2848 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
2849 }
2850 }
2851
2852 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
2853 unsigned *num_prolog_vgprs)
2854 {
2855 struct si_shader *shader = ctx->shader;
2856
2857 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
2858 if (shader->key.as_ls) {
2859 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
2860 if (ctx->screen->info.chip_class >= GFX10) {
2861 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
2862 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2863 } else {
2864 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2865 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
2866 }
2867 } else if (ctx->screen->info.chip_class >= GFX10) {
2868 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
2869 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
2870 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
2871 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2872 } else {
2873 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2874 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
2875 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
2876 }
2877
2878 if (!shader->is_gs_copy_shader) {
2879 /* Vertex load indices. */
2880 if (shader->selector->info.num_inputs) {
2881 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
2882 &ctx->vertex_index0);
2883 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
2884 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
2885 }
2886 *num_prolog_vgprs += shader->selector->info.num_inputs;
2887 }
2888 }
2889
2890 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
2891 unsigned vs_blit_property)
2892 {
2893 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
2894 &ctx->vs_blit_inputs); /* i16 x1, y1 */
2895 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
2896 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
2897
2898 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
2899 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
2900 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
2901 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
2902 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
2903 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
2904 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
2905 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
2906 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
2907 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
2908 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
2909 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
2910 }
2911 }
2912
2913 static void declare_tes_input_vgprs(struct si_shader_context *ctx)
2914 {
2915 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
2916 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
2917 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
2918 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
2919 }
2920
2921 enum {
2922 /* Convenient merged shader definitions. */
2923 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
2924 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
2925 };
2926
2927 void si_add_arg_checked(struct ac_shader_args *args,
2928 enum ac_arg_regfile file,
2929 unsigned registers, enum ac_arg_type type,
2930 struct ac_arg *arg,
2931 unsigned idx)
2932 {
2933 assert(args->arg_count == idx);
2934 ac_add_arg(args, file, registers, type, arg);
2935 }
2936
2937 static void create_function(struct si_shader_context *ctx)
2938 {
2939 struct si_shader *shader = ctx->shader;
2940 LLVMTypeRef returns[AC_MAX_ARGS];
2941 unsigned i, num_return_sgprs;
2942 unsigned num_returns = 0;
2943 unsigned num_prolog_vgprs = 0;
2944 unsigned type = ctx->type;
2945 unsigned vs_blit_property =
2946 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
2947
2948 memset(&ctx->args, 0, sizeof(ctx->args));
2949
2950 /* Set MERGED shaders. */
2951 if (ctx->screen->info.chip_class >= GFX9) {
2952 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
2953 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
2954 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
2955 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
2956 }
2957
2958 switch (type) {
2959 case PIPE_SHADER_VERTEX:
2960 declare_global_desc_pointers(ctx);
2961
2962 if (vs_blit_property) {
2963 declare_vs_blit_inputs(ctx, vs_blit_property);
2964
2965 /* VGPRs */
2966 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
2967 break;
2968 }
2969
2970 declare_per_stage_desc_pointers(ctx, true);
2971 declare_vs_specific_input_sgprs(ctx);
2972 if (!shader->is_gs_copy_shader)
2973 declare_vb_descriptor_input_sgprs(ctx);
2974
2975 if (shader->key.as_es) {
2976 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
2977 &ctx->es2gs_offset);
2978 } else if (shader->key.as_ls) {
2979 /* no extra parameters */
2980 } else {
2981 /* The locations of the other parameters are assigned dynamically. */
2982 declare_streamout_params(ctx, &shader->selector->so);
2983 }
2984
2985 /* VGPRs */
2986 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
2987
2988 /* Return values */
2989 if (shader->key.opt.vs_as_prim_discard_cs) {
2990 for (i = 0; i < 4; i++)
2991 returns[num_returns++] = ctx->f32; /* VGPRs */
2992 }
2993 break;
2994
2995 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
2996 declare_global_desc_pointers(ctx);
2997 declare_per_stage_desc_pointers(ctx, true);
2998 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
2999 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
3000 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
3001 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3002 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3003 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
3004
3005 /* VGPRs */
3006 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
3007 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
3008
3009 /* param_tcs_offchip_offset and param_tcs_factor_offset are
3010 * placed after the user SGPRs.
3011 */
3012 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
3013 returns[num_returns++] = ctx->i32; /* SGPRs */
3014 for (i = 0; i < 11; i++)
3015 returns[num_returns++] = ctx->f32; /* VGPRs */
3016 break;
3017
3018 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
3019 /* Merged stages have 8 system SGPRs at the beginning. */
3020 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
3021 declare_per_stage_desc_pointers(ctx,
3022 ctx->type == PIPE_SHADER_TESS_CTRL);
3023 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3024 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
3025 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
3026 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
3027 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
3028 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
3029
3030 declare_global_desc_pointers(ctx);
3031 declare_per_stage_desc_pointers(ctx,
3032 ctx->type == PIPE_SHADER_VERTEX);
3033 declare_vs_specific_input_sgprs(ctx);
3034
3035 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3036 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
3037 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
3038 declare_vb_descriptor_input_sgprs(ctx);
3039
3040 /* VGPRs (first TCS, then VS) */
3041 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
3042 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
3043
3044 if (ctx->type == PIPE_SHADER_VERTEX) {
3045 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3046
3047 /* LS return values are inputs to the TCS main shader part. */
3048 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
3049 returns[num_returns++] = ctx->i32; /* SGPRs */
3050 for (i = 0; i < 2; i++)
3051 returns[num_returns++] = ctx->f32; /* VGPRs */
3052 } else {
3053 /* TCS return values are inputs to the TCS epilog.
3054 *
3055 * param_tcs_offchip_offset, param_tcs_factor_offset,
3056 * param_tcs_offchip_layout, and param_rw_buffers
3057 * should be passed to the epilog.
3058 */
3059 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
3060 returns[num_returns++] = ctx->i32; /* SGPRs */
3061 for (i = 0; i < 11; i++)
3062 returns[num_returns++] = ctx->f32; /* VGPRs */
3063 }
3064 break;
3065
3066 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
3067 /* Merged stages have 8 system SGPRs at the beginning. */
3068 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
3069 declare_per_stage_desc_pointers(ctx,
3070 ctx->type == PIPE_SHADER_GEOMETRY);
3071
3072 if (ctx->shader->key.as_ngg)
3073 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
3074 else
3075 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3076
3077 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
3078 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3079 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
3080 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
3081 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
3082
3083 declare_global_desc_pointers(ctx);
3084 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
3085 declare_per_stage_desc_pointers(ctx,
3086 (ctx->type == PIPE_SHADER_VERTEX ||
3087 ctx->type == PIPE_SHADER_TESS_EVAL));
3088 }
3089
3090 if (ctx->type == PIPE_SHADER_VERTEX) {
3091 if (vs_blit_property)
3092 declare_vs_blit_inputs(ctx, vs_blit_property);
3093 else
3094 declare_vs_specific_input_sgprs(ctx);
3095 } else {
3096 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3097 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3098 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3099 /* Declare as many input SGPRs as the VS has. */
3100 }
3101
3102 if (ctx->type == PIPE_SHADER_VERTEX)
3103 declare_vb_descriptor_input_sgprs(ctx);
3104
3105 /* VGPRs (first GS, then VS/TES) */
3106 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
3107 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
3108 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3109 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3110 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
3111
3112 if (ctx->type == PIPE_SHADER_VERTEX) {
3113 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3114 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3115 declare_tes_input_vgprs(ctx);
3116 }
3117
3118 if (ctx->shader->key.as_es &&
3119 (ctx->type == PIPE_SHADER_VERTEX ||
3120 ctx->type == PIPE_SHADER_TESS_EVAL)) {
3121 unsigned num_user_sgprs;
3122
3123 if (ctx->type == PIPE_SHADER_VERTEX)
3124 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
3125 else
3126 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
3127
3128 /* ES return values are inputs to GS. */
3129 for (i = 0; i < 8 + num_user_sgprs; i++)
3130 returns[num_returns++] = ctx->i32; /* SGPRs */
3131 for (i = 0; i < 5; i++)
3132 returns[num_returns++] = ctx->f32; /* VGPRs */
3133 }
3134 break;
3135
3136 case PIPE_SHADER_TESS_EVAL:
3137 declare_global_desc_pointers(ctx);
3138 declare_per_stage_desc_pointers(ctx, true);
3139 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3140 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3141 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3142
3143 if (shader->key.as_es) {
3144 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3145 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
3146 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
3147 } else {
3148 declare_streamout_params(ctx, &shader->selector->so);
3149 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3150 }
3151
3152 /* VGPRs */
3153 declare_tes_input_vgprs(ctx);
3154 break;
3155
3156 case PIPE_SHADER_GEOMETRY:
3157 declare_global_desc_pointers(ctx);
3158 declare_per_stage_desc_pointers(ctx, true);
3159 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3160 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
3161
3162 /* VGPRs */
3163 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
3164 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
3165 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3166 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
3167 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
3168 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
3169 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
3170 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3171 break;
3172
3173 case PIPE_SHADER_FRAGMENT:
3174 declare_global_desc_pointers(ctx);
3175 declare_per_stage_desc_pointers(ctx, true);
3176 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
3177 SI_PARAM_ALPHA_REF);
3178 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3179 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
3180
3181 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
3182 SI_PARAM_PERSP_SAMPLE);
3183 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3184 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
3185 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3186 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
3187 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3188 NULL, SI_PARAM_PERSP_PULL_MODEL);
3189 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3190 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
3191 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3192 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
3193 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3194 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
3195 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
3196 NULL, SI_PARAM_LINE_STIPPLE_TEX);
3197 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3198 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
3199 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3200 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
3201 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3202 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
3203 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3204 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
3205 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
3206 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3207 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
3208 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
3209 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3210 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
3211 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3212 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
3213 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3214 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
3215
3216 /* Color inputs from the prolog. */
3217 if (shader->selector->info.colors_read) {
3218 unsigned num_color_elements =
3219 util_bitcount(shader->selector->info.colors_read);
3220
3221 for (i = 0; i < num_color_elements; i++)
3222 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
3223
3224 num_prolog_vgprs += num_color_elements;
3225 }
3226
3227 /* Outputs for the epilog. */
3228 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
3229 num_returns =
3230 num_return_sgprs +
3231 util_bitcount(shader->selector->info.colors_written) * 4 +
3232 shader->selector->info.writes_z +
3233 shader->selector->info.writes_stencil +
3234 shader->selector->info.writes_samplemask +
3235 1 /* SampleMaskIn */;
3236
3237 num_returns = MAX2(num_returns,
3238 num_return_sgprs +
3239 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
3240
3241 for (i = 0; i < num_return_sgprs; i++)
3242 returns[i] = ctx->i32;
3243 for (; i < num_returns; i++)
3244 returns[i] = ctx->f32;
3245 break;
3246
3247 case PIPE_SHADER_COMPUTE:
3248 declare_global_desc_pointers(ctx);
3249 declare_per_stage_desc_pointers(ctx, true);
3250 if (shader->selector->info.uses_grid_size)
3251 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
3252 &ctx->args.num_work_groups);
3253 if (shader->selector->info.uses_block_size &&
3254 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
3255 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
3256
3257 unsigned cs_user_data_dwords =
3258 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
3259 if (cs_user_data_dwords) {
3260 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
3261 &ctx->cs_user_data);
3262 }
3263
3264 /* Hardware SGPRs. */
3265 for (i = 0; i < 3; i++) {
3266 if (shader->selector->info.uses_block_id[i]) {
3267 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3268 &ctx->args.workgroup_ids[i]);
3269 }
3270 }
3271 if (shader->selector->info.uses_subgroup_info)
3272 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
3273
3274 /* Hardware VGPRs. */
3275 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3276 &ctx->args.local_invocation_ids);
3277 break;
3278 default:
3279 assert(0 && "unimplemented shader");
3280 return;
3281 }
3282
3283 si_llvm_create_func(ctx, "main", returns, num_returns,
3284 si_get_max_workgroup_size(shader));
3285
3286 /* Reserve register locations for VGPR inputs the PS prolog may need. */
3287 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
3288 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
3289 "InitialPSInputAddr",
3290 S_0286D0_PERSP_SAMPLE_ENA(1) |
3291 S_0286D0_PERSP_CENTER_ENA(1) |
3292 S_0286D0_PERSP_CENTROID_ENA(1) |
3293 S_0286D0_LINEAR_SAMPLE_ENA(1) |
3294 S_0286D0_LINEAR_CENTER_ENA(1) |
3295 S_0286D0_LINEAR_CENTROID_ENA(1) |
3296 S_0286D0_FRONT_FACE_ENA(1) |
3297 S_0286D0_ANCILLARY_ENA(1) |
3298 S_0286D0_POS_FIXED_PT_ENA(1));
3299 }
3300
3301 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
3302 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
3303
3304 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
3305 shader->info.num_input_vgprs -= num_prolog_vgprs;
3306
3307 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
3308 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3309 /* The LSHS size is not known until draw time, so we append it
3310 * at the end of whatever LDS use there may be in the rest of
3311 * the shader (currently none, unless LLVM decides to do its
3312 * own LDS-based lowering).
3313 */
3314 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
3315 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3316 "__lds_end", AC_ADDR_SPACE_LDS);
3317 LLVMSetAlignment(ctx->ac.lds, 256);
3318 } else {
3319 ac_declare_lds_as_pointer(&ctx->ac);
3320 }
3321 }
3322
3323 /* Unlike radv, we override these arguments in the prolog, so to the
3324 * API shader they appear as normal arguments.
3325 */
3326 if (ctx->type == PIPE_SHADER_VERTEX) {
3327 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
3328 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
3329 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
3330 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
3331 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
3332 }
3333 }
3334
3335 /* Ensure that the esgs ring is declared.
3336 *
3337 * We declare it with 64KB alignment as a hint that the
3338 * pointer value will always be 0.
3339 */
3340 static void declare_esgs_ring(struct si_shader_context *ctx)
3341 {
3342 if (ctx->esgs_ring)
3343 return;
3344
3345 assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
3346
3347 ctx->esgs_ring = LLVMAddGlobalInAddressSpace(
3348 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3349 "esgs_ring",
3350 AC_ADDR_SPACE_LDS);
3351 LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
3352 LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
3353 }
3354
3355 /**
3356 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
3357 * for later use.
3358 */
3359 static void preload_ring_buffers(struct si_shader_context *ctx)
3360 {
3361 LLVMBuilderRef builder = ctx->ac.builder;
3362
3363 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
3364
3365 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY) {
3366 if (ctx->screen->info.chip_class <= GFX8) {
3367 unsigned ring =
3368 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
3369 : SI_ES_RING_ESGS;
3370 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
3371
3372 ctx->esgs_ring =
3373 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3374 } else {
3375 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3376 /* Declare the ESGS ring as an explicit LDS symbol. */
3377 declare_esgs_ring(ctx);
3378 } else {
3379 ac_declare_lds_as_pointer(&ctx->ac);
3380 ctx->esgs_ring = ctx->ac.lds;
3381 }
3382 }
3383 }
3384
3385 if (ctx->shader->is_gs_copy_shader) {
3386 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3387
3388 ctx->gsvs_ring[0] =
3389 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3390 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
3391 const struct si_shader_selector *sel = ctx->shader->selector;
3392 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3393 LLVMValueRef base_ring;
3394
3395 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3396
3397 /* The conceptual layout of the GSVS ring is
3398 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3399 * but the real memory layout is swizzled across
3400 * threads:
3401 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3402 * t16v0c0 ..
3403 * Override the buffer descriptor accordingly.
3404 */
3405 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
3406 uint64_t stream_offset = 0;
3407
3408 for (unsigned stream = 0; stream < 4; ++stream) {
3409 unsigned num_components;
3410 unsigned stride;
3411 unsigned num_records;
3412 LLVMValueRef ring, tmp;
3413
3414 num_components = sel->info.num_stream_output_components[stream];
3415 if (!num_components)
3416 continue;
3417
3418 stride = 4 * num_components * sel->gs_max_out_vertices;
3419
3420 /* Limit on the stride field for <= GFX7. */
3421 assert(stride < (1 << 14));
3422
3423 num_records = ctx->ac.wave_size;
3424
3425 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
3426 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
3427 tmp = LLVMBuildAdd(builder, tmp,
3428 LLVMConstInt(ctx->i64,
3429 stream_offset, 0), "");
3430 stream_offset += stride * ctx->ac.wave_size;
3431
3432 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
3433 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
3434 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
3435 tmp = LLVMBuildOr(builder, tmp,
3436 LLVMConstInt(ctx->i32,
3437 S_008F04_STRIDE(stride) |
3438 S_008F04_SWIZZLE_ENABLE(1), 0), "");
3439 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
3440 ring = LLVMBuildInsertElement(builder, ring,
3441 LLVMConstInt(ctx->i32, num_records, 0),
3442 LLVMConstInt(ctx->i32, 2, 0), "");
3443
3444 uint32_t rsrc3 =
3445 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
3446 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3447 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
3448 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
3449 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
3450 S_008F0C_ADD_TID_ENABLE(1);
3451
3452 if (ctx->ac.chip_class >= GFX10) {
3453 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
3454 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
3455 S_008F0C_RESOURCE_LEVEL(1);
3456 } else {
3457 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3458 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
3459 S_008F0C_ELEMENT_SIZE(1); /* element_size = 4 (bytes) */
3460 }
3461
3462 ring = LLVMBuildInsertElement(builder, ring,
3463 LLVMConstInt(ctx->i32, rsrc3, false),
3464 LLVMConstInt(ctx->i32, 3, 0), "");
3465
3466 ctx->gsvs_ring[stream] = ring;
3467 }
3468 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3469 ctx->tess_offchip_ring = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TES);
3470 }
3471 }
3472
3473 /* For the UMR disassembler. */
3474 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
3475 #define DEBUGGER_NUM_MARKERS 5
3476
3477 static bool si_shader_binary_open(struct si_screen *screen,
3478 struct si_shader *shader,
3479 struct ac_rtld_binary *rtld)
3480 {
3481 const struct si_shader_selector *sel = shader->selector;
3482 const char *part_elfs[5];
3483 size_t part_sizes[5];
3484 unsigned num_parts = 0;
3485
3486 #define add_part(shader_or_part) \
3487 if (shader_or_part) { \
3488 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
3489 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
3490 num_parts++; \
3491 }
3492
3493 add_part(shader->prolog);
3494 add_part(shader->previous_stage);
3495 add_part(shader->prolog2);
3496 add_part(shader);
3497 add_part(shader->epilog);
3498
3499 #undef add_part
3500
3501 struct ac_rtld_symbol lds_symbols[2];
3502 unsigned num_lds_symbols = 0;
3503
3504 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
3505 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
3506 /* We add this symbol even on LLVM <= 8 to ensure that
3507 * shader->config.lds_size is set correctly below.
3508 */
3509 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
3510 sym->name = "esgs_ring";
3511 sym->size = shader->gs_info.esgs_ring_size;
3512 sym->align = 64 * 1024;
3513 }
3514
3515 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
3516 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
3517 sym->name = "ngg_emit";
3518 sym->size = shader->ngg.ngg_emit_size * 4;
3519 sym->align = 4;
3520 }
3521
3522 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
3523 .info = &screen->info,
3524 .options = {
3525 .halt_at_entry = screen->options.halt_shaders,
3526 },
3527 .shader_type = tgsi_processor_to_shader_stage(sel->type),
3528 .wave_size = si_get_shader_wave_size(shader),
3529 .num_parts = num_parts,
3530 .elf_ptrs = part_elfs,
3531 .elf_sizes = part_sizes,
3532 .num_shared_lds_symbols = num_lds_symbols,
3533 .shared_lds_symbols = lds_symbols });
3534
3535 if (rtld->lds_size > 0) {
3536 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
3537 shader->config.lds_size =
3538 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
3539 }
3540
3541 return ok;
3542 }
3543
3544 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
3545 {
3546 struct ac_rtld_binary rtld;
3547 si_shader_binary_open(screen, shader, &rtld);
3548 return rtld.exec_size;
3549 }
3550
3551 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
3552 {
3553 uint64_t *scratch_va = data;
3554
3555 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
3556 *value = (uint32_t)*scratch_va;
3557 return true;
3558 }
3559 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
3560 /* Enable scratch coalescing. */
3561 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
3562 S_008F04_SWIZZLE_ENABLE(1);
3563 return true;
3564 }
3565
3566 return false;
3567 }
3568
3569 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
3570 uint64_t scratch_va)
3571 {
3572 struct ac_rtld_binary binary;
3573 if (!si_shader_binary_open(sscreen, shader, &binary))
3574 return false;
3575
3576 si_resource_reference(&shader->bo, NULL);
3577 shader->bo = si_aligned_buffer_create(&sscreen->b,
3578 sscreen->info.cpdma_prefetch_writes_memory ?
3579 0 : SI_RESOURCE_FLAG_READ_ONLY,
3580 PIPE_USAGE_IMMUTABLE,
3581 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
3582 256);
3583 if (!shader->bo)
3584 return false;
3585
3586 /* Upload. */
3587 struct ac_rtld_upload_info u = {};
3588 u.binary = &binary;
3589 u.get_external_symbol = si_get_external_symbol;
3590 u.cb_data = &scratch_va;
3591 u.rx_va = shader->bo->gpu_address;
3592 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
3593 PIPE_TRANSFER_READ_WRITE |
3594 PIPE_TRANSFER_UNSYNCHRONIZED |
3595 RADEON_TRANSFER_TEMPORARY);
3596 if (!u.rx_ptr)
3597 return false;
3598
3599 bool ok = ac_rtld_upload(&u);
3600
3601 sscreen->ws->buffer_unmap(shader->bo->buf);
3602 ac_rtld_close(&binary);
3603
3604 return ok;
3605 }
3606
3607 static void si_shader_dump_disassembly(struct si_screen *screen,
3608 const struct si_shader_binary *binary,
3609 enum pipe_shader_type shader_type,
3610 unsigned wave_size,
3611 struct pipe_debug_callback *debug,
3612 const char *name, FILE *file)
3613 {
3614 struct ac_rtld_binary rtld_binary;
3615
3616 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
3617 .info = &screen->info,
3618 .shader_type = tgsi_processor_to_shader_stage(shader_type),
3619 .wave_size = wave_size,
3620 .num_parts = 1,
3621 .elf_ptrs = &binary->elf_buffer,
3622 .elf_sizes = &binary->elf_size }))
3623 return;
3624
3625 const char *disasm;
3626 size_t nbytes;
3627
3628 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
3629 goto out;
3630
3631 if (nbytes > INT_MAX)
3632 goto out;
3633
3634 if (debug && debug->debug_message) {
3635 /* Very long debug messages are cut off, so send the
3636 * disassembly one line at a time. This causes more
3637 * overhead, but on the plus side it simplifies
3638 * parsing of resulting logs.
3639 */
3640 pipe_debug_message(debug, SHADER_INFO,
3641 "Shader Disassembly Begin");
3642
3643 uint64_t line = 0;
3644 while (line < nbytes) {
3645 int count = nbytes - line;
3646 const char *nl = memchr(disasm + line, '\n', nbytes - line);
3647 if (nl)
3648 count = nl - (disasm + line);
3649
3650 if (count) {
3651 pipe_debug_message(debug, SHADER_INFO,
3652 "%.*s", count, disasm + line);
3653 }
3654
3655 line += count + 1;
3656 }
3657
3658 pipe_debug_message(debug, SHADER_INFO,
3659 "Shader Disassembly End");
3660 }
3661
3662 if (file) {
3663 fprintf(file, "Shader %s disassembly:\n", name);
3664 fprintf(file, "%*s", (int)nbytes, disasm);
3665 }
3666
3667 out:
3668 ac_rtld_close(&rtld_binary);
3669 }
3670
3671 static void si_calculate_max_simd_waves(struct si_shader *shader)
3672 {
3673 struct si_screen *sscreen = shader->selector->screen;
3674 struct ac_shader_config *conf = &shader->config;
3675 unsigned num_inputs = shader->selector->info.num_inputs;
3676 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
3677 unsigned lds_per_wave = 0;
3678 unsigned max_simd_waves;
3679
3680 max_simd_waves = sscreen->info.max_wave64_per_simd;
3681
3682 /* Compute LDS usage for PS. */
3683 switch (shader->selector->type) {
3684 case PIPE_SHADER_FRAGMENT:
3685 /* The minimum usage per wave is (num_inputs * 48). The maximum
3686 * usage is (num_inputs * 48 * 16).
3687 * We can get anything in between and it varies between waves.
3688 *
3689 * The 48 bytes per input for a single primitive is equal to
3690 * 4 bytes/component * 4 components/input * 3 points.
3691 *
3692 * Other stages don't know the size at compile time or don't
3693 * allocate LDS per wave, but instead they do it per thread group.
3694 */
3695 lds_per_wave = conf->lds_size * lds_increment +
3696 align(num_inputs * 48, lds_increment);
3697 break;
3698 case PIPE_SHADER_COMPUTE:
3699 if (shader->selector) {
3700 unsigned max_workgroup_size =
3701 si_get_max_workgroup_size(shader);
3702 lds_per_wave = (conf->lds_size * lds_increment) /
3703 DIV_ROUND_UP(max_workgroup_size,
3704 sscreen->compute_wave_size);
3705 }
3706 break;
3707 default:;
3708 }
3709
3710 /* Compute the per-SIMD wave counts. */
3711 if (conf->num_sgprs) {
3712 max_simd_waves =
3713 MIN2(max_simd_waves,
3714 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
3715 }
3716
3717 if (conf->num_vgprs) {
3718 /* Always print wave limits as Wave64, so that we can compare
3719 * Wave32 and Wave64 with shader-db fairly. */
3720 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
3721 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
3722 }
3723
3724 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
3725 * 16KB makes some SIMDs unoccupied).
3726 *
3727 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
3728 */
3729 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
3730 unsigned max_lds_per_simd = max_lds_size / 4;
3731 if (lds_per_wave)
3732 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
3733
3734 shader->info.max_simd_waves = max_simd_waves;
3735 }
3736
3737 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
3738 struct si_shader *shader,
3739 struct pipe_debug_callback *debug)
3740 {
3741 const struct ac_shader_config *conf = &shader->config;
3742
3743 if (screen->options.debug_disassembly)
3744 si_shader_dump_disassembly(screen, &shader->binary,
3745 shader->selector->type,
3746 si_get_shader_wave_size(shader),
3747 debug, "main", NULL);
3748
3749 pipe_debug_message(debug, SHADER_INFO,
3750 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
3751 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
3752 "Spilled VGPRs: %d PrivMem VGPRs: %d",
3753 conf->num_sgprs, conf->num_vgprs,
3754 si_get_shader_binary_size(screen, shader),
3755 conf->lds_size, conf->scratch_bytes_per_wave,
3756 shader->info.max_simd_waves, conf->spilled_sgprs,
3757 conf->spilled_vgprs, shader->info.private_mem_vgprs);
3758 }
3759
3760 static void si_shader_dump_stats(struct si_screen *sscreen,
3761 struct si_shader *shader,
3762 FILE *file,
3763 bool check_debug_option)
3764 {
3765 const struct ac_shader_config *conf = &shader->config;
3766
3767 if (!check_debug_option ||
3768 si_can_dump_shader(sscreen, shader->selector->type)) {
3769 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
3770 fprintf(file, "*** SHADER CONFIG ***\n"
3771 "SPI_PS_INPUT_ADDR = 0x%04x\n"
3772 "SPI_PS_INPUT_ENA = 0x%04x\n",
3773 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
3774 }
3775
3776 fprintf(file, "*** SHADER STATS ***\n"
3777 "SGPRS: %d\n"
3778 "VGPRS: %d\n"
3779 "Spilled SGPRs: %d\n"
3780 "Spilled VGPRs: %d\n"
3781 "Private memory VGPRs: %d\n"
3782 "Code Size: %d bytes\n"
3783 "LDS: %d blocks\n"
3784 "Scratch: %d bytes per wave\n"
3785 "Max Waves: %d\n"
3786 "********************\n\n\n",
3787 conf->num_sgprs, conf->num_vgprs,
3788 conf->spilled_sgprs, conf->spilled_vgprs,
3789 shader->info.private_mem_vgprs,
3790 si_get_shader_binary_size(sscreen, shader),
3791 conf->lds_size, conf->scratch_bytes_per_wave,
3792 shader->info.max_simd_waves);
3793 }
3794 }
3795
3796 const char *si_get_shader_name(const struct si_shader *shader)
3797 {
3798 switch (shader->selector->type) {
3799 case PIPE_SHADER_VERTEX:
3800 if (shader->key.as_es)
3801 return "Vertex Shader as ES";
3802 else if (shader->key.as_ls)
3803 return "Vertex Shader as LS";
3804 else if (shader->key.opt.vs_as_prim_discard_cs)
3805 return "Vertex Shader as Primitive Discard CS";
3806 else if (shader->key.as_ngg)
3807 return "Vertex Shader as ESGS";
3808 else
3809 return "Vertex Shader as VS";
3810 case PIPE_SHADER_TESS_CTRL:
3811 return "Tessellation Control Shader";
3812 case PIPE_SHADER_TESS_EVAL:
3813 if (shader->key.as_es)
3814 return "Tessellation Evaluation Shader as ES";
3815 else if (shader->key.as_ngg)
3816 return "Tessellation Evaluation Shader as ESGS";
3817 else
3818 return "Tessellation Evaluation Shader as VS";
3819 case PIPE_SHADER_GEOMETRY:
3820 if (shader->is_gs_copy_shader)
3821 return "GS Copy Shader as VS";
3822 else
3823 return "Geometry Shader";
3824 case PIPE_SHADER_FRAGMENT:
3825 return "Pixel Shader";
3826 case PIPE_SHADER_COMPUTE:
3827 return "Compute Shader";
3828 default:
3829 return "Unknown Shader";
3830 }
3831 }
3832
3833 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
3834 struct pipe_debug_callback *debug,
3835 FILE *file, bool check_debug_option)
3836 {
3837 enum pipe_shader_type shader_type = shader->selector->type;
3838
3839 if (!check_debug_option ||
3840 si_can_dump_shader(sscreen, shader_type))
3841 si_dump_shader_key(shader, file);
3842
3843 if (!check_debug_option && shader->binary.llvm_ir_string) {
3844 if (shader->previous_stage &&
3845 shader->previous_stage->binary.llvm_ir_string) {
3846 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
3847 si_get_shader_name(shader));
3848 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
3849 }
3850
3851 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
3852 si_get_shader_name(shader));
3853 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
3854 }
3855
3856 if (!check_debug_option ||
3857 (si_can_dump_shader(sscreen, shader_type) &&
3858 !(sscreen->debug_flags & DBG(NO_ASM)))) {
3859 unsigned wave_size = si_get_shader_wave_size(shader);
3860
3861 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
3862
3863 if (shader->prolog)
3864 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
3865 shader_type, wave_size, debug, "prolog", file);
3866 if (shader->previous_stage)
3867 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
3868 shader_type, wave_size, debug, "previous stage", file);
3869 if (shader->prolog2)
3870 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
3871 shader_type, wave_size, debug, "prolog2", file);
3872
3873 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
3874 wave_size, debug, "main", file);
3875
3876 if (shader->epilog)
3877 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
3878 shader_type, wave_size, debug, "epilog", file);
3879 fprintf(file, "\n");
3880 }
3881
3882 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
3883 }
3884
3885 static int si_compile_llvm(struct si_screen *sscreen,
3886 struct si_shader_binary *binary,
3887 struct ac_shader_config *conf,
3888 struct ac_llvm_compiler *compiler,
3889 LLVMModuleRef mod,
3890 struct pipe_debug_callback *debug,
3891 enum pipe_shader_type shader_type,
3892 unsigned wave_size,
3893 const char *name,
3894 bool less_optimized)
3895 {
3896 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
3897
3898 if (si_can_dump_shader(sscreen, shader_type)) {
3899 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
3900
3901 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
3902 fprintf(stderr, "%s LLVM IR:\n\n", name);
3903 ac_dump_module(mod);
3904 fprintf(stderr, "\n");
3905 }
3906 }
3907
3908 if (sscreen->record_llvm_ir) {
3909 char *ir = LLVMPrintModuleToString(mod);
3910 binary->llvm_ir_string = strdup(ir);
3911 LLVMDisposeMessage(ir);
3912 }
3913
3914 if (!si_replace_shader(count, binary)) {
3915 unsigned r = si_llvm_compile(mod, binary, compiler, debug,
3916 less_optimized, wave_size);
3917 if (r)
3918 return r;
3919 }
3920
3921 struct ac_rtld_binary rtld;
3922 if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
3923 .info = &sscreen->info,
3924 .shader_type = tgsi_processor_to_shader_stage(shader_type),
3925 .wave_size = wave_size,
3926 .num_parts = 1,
3927 .elf_ptrs = &binary->elf_buffer,
3928 .elf_sizes = &binary->elf_size }))
3929 return -1;
3930
3931 bool ok = ac_rtld_read_config(&rtld, conf);
3932 ac_rtld_close(&rtld);
3933 if (!ok)
3934 return -1;
3935
3936 /* Enable 64-bit and 16-bit denormals, because there is no performance
3937 * cost.
3938 *
3939 * If denormals are enabled, all floating-point output modifiers are
3940 * ignored.
3941 *
3942 * Don't enable denormals for 32-bit floats, because:
3943 * - Floating-point output modifiers would be ignored by the hw.
3944 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3945 * have to stop using those.
3946 * - GFX6 & GFX7 would be very slow.
3947 */
3948 conf->float_mode |= V_00B028_FP_64_DENORMS;
3949
3950 return 0;
3951 }
3952
3953 /* Generate code for the hardware VS shader stage to go with a geometry shader */
3954 struct si_shader *
3955 si_generate_gs_copy_shader(struct si_screen *sscreen,
3956 struct ac_llvm_compiler *compiler,
3957 struct si_shader_selector *gs_selector,
3958 struct pipe_debug_callback *debug)
3959 {
3960 struct si_shader_context ctx;
3961 struct si_shader *shader;
3962 LLVMBuilderRef builder;
3963 struct si_shader_output_values outputs[SI_MAX_VS_OUTPUTS];
3964 struct si_shader_info *gsinfo = &gs_selector->info;
3965 int i;
3966
3967
3968 shader = CALLOC_STRUCT(si_shader);
3969 if (!shader)
3970 return NULL;
3971
3972 /* We can leave the fence as permanently signaled because the GS copy
3973 * shader only becomes visible globally after it has been compiled. */
3974 util_queue_fence_init(&shader->ready);
3975
3976 shader->selector = gs_selector;
3977 shader->is_gs_copy_shader = true;
3978
3979 si_llvm_context_init(&ctx, sscreen, compiler,
3980 si_get_wave_size(sscreen, PIPE_SHADER_VERTEX, false, false));
3981 ctx.shader = shader;
3982 ctx.type = PIPE_SHADER_VERTEX;
3983
3984 builder = ctx.ac.builder;
3985
3986 create_function(&ctx);
3987 preload_ring_buffers(&ctx);
3988
3989 LLVMValueRef voffset =
3990 LLVMBuildMul(ctx.ac.builder, ctx.abi.vertex_id,
3991 LLVMConstInt(ctx.i32, 4, 0), "");
3992
3993 /* Fetch the vertex stream ID.*/
3994 LLVMValueRef stream_id;
3995
3996 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs)
3997 stream_id = si_unpack_param(&ctx, ctx.streamout_config, 24, 2);
3998 else
3999 stream_id = ctx.i32_0;
4000
4001 /* Fill in output information. */
4002 for (i = 0; i < gsinfo->num_outputs; ++i) {
4003 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
4004 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
4005
4006 for (int chan = 0; chan < 4; chan++) {
4007 outputs[i].vertex_stream[chan] =
4008 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
4009 }
4010 }
4011
4012 LLVMBasicBlockRef end_bb;
4013 LLVMValueRef switch_inst;
4014
4015 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
4016 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
4017
4018 for (int stream = 0; stream < 4; stream++) {
4019 LLVMBasicBlockRef bb;
4020 unsigned offset;
4021
4022 if (!gsinfo->num_stream_output_components[stream])
4023 continue;
4024
4025 if (stream > 0 && !gs_selector->so.num_outputs)
4026 continue;
4027
4028 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
4029 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
4030 LLVMPositionBuilderAtEnd(builder, bb);
4031
4032 /* Fetch vertex data from GSVS ring */
4033 offset = 0;
4034 for (i = 0; i < gsinfo->num_outputs; ++i) {
4035 for (unsigned chan = 0; chan < 4; chan++) {
4036 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
4037 outputs[i].vertex_stream[chan] != stream) {
4038 outputs[i].values[chan] = LLVMGetUndef(ctx.f32);
4039 continue;
4040 }
4041
4042 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
4043 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
4044 offset++;
4045
4046 outputs[i].values[chan] =
4047 ac_build_buffer_load(&ctx.ac,
4048 ctx.gsvs_ring[0], 1,
4049 ctx.i32_0, voffset,
4050 soffset, 0, ac_glc | ac_slc,
4051 true, false);
4052 }
4053 }
4054
4055 /* Streamout and exports. */
4056 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs) {
4057 si_llvm_emit_streamout(&ctx, outputs,
4058 gsinfo->num_outputs,
4059 stream);
4060 }
4061
4062 if (stream == 0)
4063 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
4064
4065 LLVMBuildBr(builder, end_bb);
4066 }
4067
4068 LLVMPositionBuilderAtEnd(builder, end_bb);
4069
4070 LLVMBuildRetVoid(ctx.ac.builder);
4071
4072 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
4073 si_llvm_optimize_module(&ctx);
4074
4075 bool ok = false;
4076 if (si_compile_llvm(sscreen, &ctx.shader->binary,
4077 &ctx.shader->config, ctx.compiler,
4078 ctx.ac.module,
4079 debug, PIPE_SHADER_GEOMETRY, ctx.ac.wave_size,
4080 "GS Copy Shader", false) == 0) {
4081 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
4082 fprintf(stderr, "GS Copy Shader:\n");
4083 si_shader_dump(sscreen, ctx.shader, debug, stderr, true);
4084
4085 if (!ctx.shader->config.scratch_bytes_per_wave)
4086 ok = si_shader_binary_upload(sscreen, ctx.shader, 0);
4087 else
4088 ok = true;
4089 }
4090
4091 si_llvm_dispose(&ctx);
4092
4093 if (!ok) {
4094 FREE(shader);
4095 shader = NULL;
4096 } else {
4097 si_fix_resource_usage(sscreen, shader);
4098 }
4099 return shader;
4100 }
4101
4102 static void si_dump_shader_key_vs(const struct si_shader_key *key,
4103 const struct si_vs_prolog_bits *prolog,
4104 const char *prefix, FILE *f)
4105 {
4106 fprintf(f, " %s.instance_divisor_is_one = %u\n",
4107 prefix, prolog->instance_divisor_is_one);
4108 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
4109 prefix, prolog->instance_divisor_is_fetched);
4110 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
4111 prefix, prolog->unpack_instance_id_from_vertex_id);
4112 fprintf(f, " %s.ls_vgpr_fix = %u\n",
4113 prefix, prolog->ls_vgpr_fix);
4114
4115 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
4116 fprintf(f, " mono.vs.fix_fetch = {");
4117 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
4118 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
4119 if (i)
4120 fprintf(f, ", ");
4121 if (!fix.bits)
4122 fprintf(f, "0");
4123 else
4124 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
4125 fix.u.num_channels_m1, fix.u.format);
4126 }
4127 fprintf(f, "}\n");
4128 }
4129
4130 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
4131 {
4132 const struct si_shader_key *key = &shader->key;
4133 enum pipe_shader_type shader_type = shader->selector->type;
4134
4135 fprintf(f, "SHADER KEY\n");
4136
4137 switch (shader_type) {
4138 case PIPE_SHADER_VERTEX:
4139 si_dump_shader_key_vs(key, &key->part.vs.prolog,
4140 "part.vs.prolog", f);
4141 fprintf(f, " as_es = %u\n", key->as_es);
4142 fprintf(f, " as_ls = %u\n", key->as_ls);
4143 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4144 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4145 key->mono.u.vs_export_prim_id);
4146 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
4147 key->opt.vs_as_prim_discard_cs);
4148 fprintf(f, " opt.cs_prim_type = %s\n",
4149 tgsi_primitive_names[key->opt.cs_prim_type]);
4150 fprintf(f, " opt.cs_indexed = %u\n",
4151 key->opt.cs_indexed);
4152 fprintf(f, " opt.cs_instancing = %u\n",
4153 key->opt.cs_instancing);
4154 fprintf(f, " opt.cs_primitive_restart = %u\n",
4155 key->opt.cs_primitive_restart);
4156 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
4157 key->opt.cs_provoking_vertex_first);
4158 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
4159 key->opt.cs_need_correct_orientation);
4160 fprintf(f, " opt.cs_cull_front = %u\n",
4161 key->opt.cs_cull_front);
4162 fprintf(f, " opt.cs_cull_back = %u\n",
4163 key->opt.cs_cull_back);
4164 fprintf(f, " opt.cs_cull_z = %u\n",
4165 key->opt.cs_cull_z);
4166 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
4167 key->opt.cs_halfz_clip_space);
4168 break;
4169
4170 case PIPE_SHADER_TESS_CTRL:
4171 if (shader->selector->screen->info.chip_class >= GFX9) {
4172 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
4173 "part.tcs.ls_prolog", f);
4174 }
4175 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
4176 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
4177 break;
4178
4179 case PIPE_SHADER_TESS_EVAL:
4180 fprintf(f, " as_es = %u\n", key->as_es);
4181 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4182 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4183 key->mono.u.vs_export_prim_id);
4184 break;
4185
4186 case PIPE_SHADER_GEOMETRY:
4187 if (shader->is_gs_copy_shader)
4188 break;
4189
4190 if (shader->selector->screen->info.chip_class >= GFX9 &&
4191 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
4192 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
4193 "part.gs.vs_prolog", f);
4194 }
4195 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
4196 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
4197 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4198 break;
4199
4200 case PIPE_SHADER_COMPUTE:
4201 break;
4202
4203 case PIPE_SHADER_FRAGMENT:
4204 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
4205 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
4206 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
4207 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
4208 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
4209 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
4210 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
4211 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
4212 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
4213 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
4214 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
4215 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
4216 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
4217 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
4218 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
4219 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
4220 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
4221 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
4222 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
4223 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
4224 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
4225 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
4226 break;
4227
4228 default:
4229 assert(0);
4230 }
4231
4232 if ((shader_type == PIPE_SHADER_GEOMETRY ||
4233 shader_type == PIPE_SHADER_TESS_EVAL ||
4234 shader_type == PIPE_SHADER_VERTEX) &&
4235 !key->as_es && !key->as_ls) {
4236 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
4237 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
4238 }
4239 }
4240
4241 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
4242 {
4243 struct si_shader *shader = ctx->shader;
4244 struct si_shader_info *info = &shader->selector->info;
4245
4246 if ((ctx->type != PIPE_SHADER_VERTEX &&
4247 ctx->type != PIPE_SHADER_TESS_EVAL) ||
4248 shader->key.as_ls ||
4249 shader->key.as_es)
4250 return;
4251
4252 ac_optimize_vs_outputs(&ctx->ac,
4253 ctx->main_fn,
4254 shader->info.vs_output_param_offset,
4255 info->num_outputs,
4256 &shader->info.nr_param_exports);
4257 }
4258
4259 static void si_init_exec_from_input(struct si_shader_context *ctx,
4260 struct ac_arg param, unsigned bitoffset)
4261 {
4262 LLVMValueRef args[] = {
4263 ac_get_arg(&ctx->ac, param),
4264 LLVMConstInt(ctx->i32, bitoffset, 0),
4265 };
4266 ac_build_intrinsic(&ctx->ac,
4267 "llvm.amdgcn.init.exec.from.input",
4268 ctx->voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
4269 }
4270
4271 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
4272 const struct si_vs_prolog_bits *key)
4273 {
4274 /* VGPR initialization fixup for Vega10 and Raven is always done in the
4275 * VS prolog. */
4276 return sel->vs_needs_prolog ||
4277 key->ls_vgpr_fix ||
4278 key->unpack_instance_id_from_vertex_id;
4279 }
4280
4281 LLVMValueRef si_is_es_thread(struct si_shader_context *ctx)
4282 {
4283 /* Return true if the current thread should execute an ES thread. */
4284 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4285 ac_get_thread_id(&ctx->ac),
4286 si_unpack_param(ctx, ctx->merged_wave_info, 0, 8), "");
4287 }
4288
4289 LLVMValueRef si_is_gs_thread(struct si_shader_context *ctx)
4290 {
4291 /* Return true if the current thread should execute a GS thread. */
4292 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4293 ac_get_thread_id(&ctx->ac),
4294 si_unpack_param(ctx, ctx->merged_wave_info, 8, 8), "");
4295 }
4296
4297 static bool si_build_main_function(struct si_shader_context *ctx,
4298 struct nir_shader *nir, bool free_nir)
4299 {
4300 struct si_shader *shader = ctx->shader;
4301 struct si_shader_selector *sel = shader->selector;
4302
4303 switch (ctx->type) {
4304 case PIPE_SHADER_VERTEX:
4305 if (shader->key.as_ls)
4306 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
4307 else if (shader->key.as_es)
4308 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4309 else if (shader->key.opt.vs_as_prim_discard_cs)
4310 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
4311 else if (shader->key.as_ngg)
4312 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4313 else
4314 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4315 ctx->abi.load_base_vertex = get_base_vertex;
4316 break;
4317 case PIPE_SHADER_TESS_CTRL:
4318 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
4319 ctx->abi.load_tess_level = si_load_tess_level;
4320 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
4321 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
4322 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4323 break;
4324 case PIPE_SHADER_TESS_EVAL:
4325 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
4326 ctx->abi.load_tess_coord = si_load_tess_coord;
4327 ctx->abi.load_tess_level = si_load_tess_level;
4328 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4329 if (shader->key.as_es)
4330 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4331 else if (shader->key.as_ngg)
4332 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4333 else
4334 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4335 break;
4336 case PIPE_SHADER_GEOMETRY:
4337 ctx->abi.load_inputs = si_nir_load_input_gs;
4338 ctx->abi.emit_vertex = si_llvm_emit_vertex;
4339 ctx->abi.emit_primitive = si_llvm_emit_primitive;
4340 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
4341 break;
4342 case PIPE_SHADER_FRAGMENT:
4343 si_llvm_init_ps_callbacks(ctx);
4344 break;
4345 case PIPE_SHADER_COMPUTE:
4346 ctx->abi.load_local_group_size = get_block_size;
4347 break;
4348 default:
4349 assert(!"Unsupported shader type");
4350 return false;
4351 }
4352
4353 ctx->abi.load_ubo = load_ubo;
4354 ctx->abi.load_ssbo = load_ssbo;
4355
4356 create_function(ctx);
4357 preload_ring_buffers(ctx);
4358
4359 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
4360 sel->info.tessfactors_are_def_in_all_invocs) {
4361 for (unsigned i = 0; i < 6; i++) {
4362 ctx->invoc0_tess_factors[i] =
4363 ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
4364 }
4365 }
4366
4367 if (ctx->type == PIPE_SHADER_GEOMETRY) {
4368 for (unsigned i = 0; i < 4; i++) {
4369 ctx->gs_next_vertex[i] =
4370 ac_build_alloca(&ctx->ac, ctx->i32, "");
4371 }
4372 if (shader->key.as_ngg) {
4373 for (unsigned i = 0; i < 4; ++i) {
4374 ctx->gs_curprim_verts[i] =
4375 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4376 ctx->gs_generated_prims[i] =
4377 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4378 }
4379
4380 unsigned scratch_size = 8;
4381 if (sel->so.num_outputs)
4382 scratch_size = 44;
4383
4384 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
4385 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4386 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4387 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
4388 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4389
4390 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4391 LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
4392 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
4393 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
4394 }
4395 }
4396
4397 if (ctx->type != PIPE_SHADER_GEOMETRY &&
4398 (shader->key.as_ngg && !shader->key.as_es)) {
4399 /* Unconditionally declare scratch space base for streamout and
4400 * vertex compaction. Whether space is actually allocated is
4401 * determined during linking / PM4 creation.
4402 *
4403 * Add an extra dword per vertex to ensure an odd stride, which
4404 * avoids bank conflicts for SoA accesses.
4405 */
4406 if (!gfx10_is_ngg_passthrough(shader))
4407 declare_esgs_ring(ctx);
4408
4409 /* This is really only needed when streamout and / or vertex
4410 * compaction is enabled.
4411 */
4412 if (sel->so.num_outputs && !ctx->gs_ngg_scratch) {
4413 LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
4414 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4415 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4416 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
4417 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4418 }
4419 }
4420
4421 /* For GFX9 merged shaders:
4422 * - Set EXEC for the first shader. If the prolog is present, set
4423 * EXEC there instead.
4424 * - Add a barrier before the second shader.
4425 * - In the second shader, reset EXEC to ~0 and wrap the main part in
4426 * an if-statement. This is required for correctness in geometry
4427 * shaders, to ensure that empty GS waves do not send GS_EMIT and
4428 * GS_CUT messages.
4429 *
4430 * For monolithic merged shaders, the first shader is wrapped in an
4431 * if-block together with its prolog in si_build_wrapper_function.
4432 *
4433 * NGG vertex and tess eval shaders running as the last
4434 * vertex/geometry stage handle execution explicitly using
4435 * if-statements.
4436 */
4437 if (ctx->screen->info.chip_class >= GFX9) {
4438 if (!shader->is_monolithic &&
4439 (shader->key.as_es || shader->key.as_ls) &&
4440 (ctx->type == PIPE_SHADER_TESS_EVAL ||
4441 (ctx->type == PIPE_SHADER_VERTEX &&
4442 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
4443 si_init_exec_from_input(ctx,
4444 ctx->merged_wave_info, 0);
4445 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
4446 ctx->type == PIPE_SHADER_GEOMETRY ||
4447 (shader->key.as_ngg && !shader->key.as_es)) {
4448 LLVMValueRef thread_enabled;
4449 bool nested_barrier;
4450
4451 if (!shader->is_monolithic ||
4452 (ctx->type == PIPE_SHADER_TESS_EVAL &&
4453 (shader->key.as_ngg && !shader->key.as_es)))
4454 ac_init_exec_full_mask(&ctx->ac);
4455
4456 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
4457 ctx->type == PIPE_SHADER_GEOMETRY) {
4458 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
4459 gfx10_ngg_gs_emit_prologue(ctx);
4460 nested_barrier = false;
4461 } else {
4462 nested_barrier = true;
4463 }
4464
4465 thread_enabled = si_is_gs_thread(ctx);
4466 } else {
4467 thread_enabled = si_is_es_thread(ctx);
4468 nested_barrier = false;
4469 }
4470
4471 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
4472 ctx->merged_wrap_if_label = 11500;
4473 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
4474
4475 if (nested_barrier) {
4476 /* Execute a barrier before the second shader in
4477 * a merged shader.
4478 *
4479 * Execute the barrier inside the conditional block,
4480 * so that empty waves can jump directly to s_endpgm,
4481 * which will also signal the barrier.
4482 *
4483 * This is possible in gfx9, because an empty wave
4484 * for the second shader does not participate in
4485 * the epilogue. With NGG, empty waves may still
4486 * be required to export data (e.g. GS output vertices),
4487 * so we cannot let them exit early.
4488 *
4489 * If the shader is TCS and the TCS epilog is present
4490 * and contains a barrier, it will wait there and then
4491 * reach s_endpgm.
4492 */
4493 si_llvm_emit_barrier(ctx);
4494 }
4495 }
4496 }
4497
4498 if (sel->force_correct_derivs_after_kill) {
4499 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
4500 /* true = don't kill. */
4501 LLVMBuildStore(ctx->ac.builder, ctx->i1true,
4502 ctx->postponed_kill);
4503 }
4504
4505 bool success = si_nir_build_llvm(ctx, nir);
4506 if (free_nir)
4507 ralloc_free(nir);
4508 if (!success) {
4509 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
4510 return false;
4511 }
4512
4513 si_llvm_build_ret(ctx, ctx->return_value);
4514 return true;
4515 }
4516
4517 /**
4518 * Compute the VS prolog key, which contains all the information needed to
4519 * build the VS prolog function, and set shader->info bits where needed.
4520 *
4521 * \param info Shader info of the vertex shader.
4522 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
4523 * \param prolog_key Key of the VS prolog
4524 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
4525 * \param key Output shader part key.
4526 */
4527 static void si_get_vs_prolog_key(const struct si_shader_info *info,
4528 unsigned num_input_sgprs,
4529 const struct si_vs_prolog_bits *prolog_key,
4530 struct si_shader *shader_out,
4531 union si_shader_part_key *key)
4532 {
4533 memset(key, 0, sizeof(*key));
4534 key->vs_prolog.states = *prolog_key;
4535 key->vs_prolog.num_input_sgprs = num_input_sgprs;
4536 key->vs_prolog.num_inputs = info->num_inputs;
4537 key->vs_prolog.as_ls = shader_out->key.as_ls;
4538 key->vs_prolog.as_es = shader_out->key.as_es;
4539 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
4540
4541 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
4542 key->vs_prolog.as_ls = 1;
4543 key->vs_prolog.num_merged_next_stage_vgprs = 2;
4544 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
4545 key->vs_prolog.as_es = 1;
4546 key->vs_prolog.num_merged_next_stage_vgprs = 5;
4547 } else if (shader_out->key.as_ngg) {
4548 key->vs_prolog.num_merged_next_stage_vgprs = 5;
4549 }
4550
4551 /* Enable loading the InstanceID VGPR. */
4552 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
4553
4554 if ((key->vs_prolog.states.instance_divisor_is_one |
4555 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
4556 shader_out->info.uses_instanceid = true;
4557 }
4558
4559 /**
4560 * Build the GS prolog function. Rotate the input vertices for triangle strips
4561 * with adjacency.
4562 */
4563 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
4564 union si_shader_part_key *key)
4565 {
4566 unsigned num_sgprs, num_vgprs;
4567 LLVMBuilderRef builder = ctx->ac.builder;
4568 LLVMTypeRef returns[AC_MAX_ARGS];
4569 LLVMValueRef func, ret;
4570
4571 memset(&ctx->args, 0, sizeof(ctx->args));
4572
4573 if (ctx->screen->info.chip_class >= GFX9) {
4574 if (key->gs_prolog.states.gfx9_prev_is_vs)
4575 num_sgprs = 8 + GFX9_VSGS_NUM_USER_SGPR;
4576 else
4577 num_sgprs = 8 + GFX9_TESGS_NUM_USER_SGPR;
4578 num_vgprs = 5; /* ES inputs are not needed by GS */
4579 } else {
4580 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
4581 num_vgprs = 8;
4582 }
4583
4584 for (unsigned i = 0; i < num_sgprs; ++i) {
4585 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
4586 returns[i] = ctx->i32;
4587 }
4588
4589 for (unsigned i = 0; i < num_vgprs; ++i) {
4590 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
4591 returns[num_sgprs + i] = ctx->f32;
4592 }
4593
4594 /* Create the function. */
4595 si_llvm_create_func(ctx, "gs_prolog", returns, num_sgprs + num_vgprs, 0);
4596 func = ctx->main_fn;
4597
4598 /* Set the full EXEC mask for the prolog, because we are only fiddling
4599 * with registers here. The main shader part will set the correct EXEC
4600 * mask.
4601 */
4602 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
4603 ac_init_exec_full_mask(&ctx->ac);
4604
4605 /* Copy inputs to outputs. This should be no-op, as the registers match,
4606 * but it will prevent the compiler from overwriting them unintentionally.
4607 */
4608 ret = ctx->return_value;
4609 for (unsigned i = 0; i < num_sgprs; i++) {
4610 LLVMValueRef p = LLVMGetParam(func, i);
4611 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
4612 }
4613 for (unsigned i = 0; i < num_vgprs; i++) {
4614 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
4615 p = ac_to_float(&ctx->ac, p);
4616 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
4617 }
4618
4619 if (key->gs_prolog.states.tri_strip_adj_fix) {
4620 /* Remap the input vertices for every other primitive. */
4621 const struct ac_arg gfx6_vtx_params[6] = {
4622 { .used = true, .arg_index = num_sgprs },
4623 { .used = true, .arg_index = num_sgprs + 1 },
4624 { .used = true, .arg_index = num_sgprs + 3 },
4625 { .used = true, .arg_index = num_sgprs + 4 },
4626 { .used = true, .arg_index = num_sgprs + 5 },
4627 { .used = true, .arg_index = num_sgprs + 6 },
4628 };
4629 const struct ac_arg gfx9_vtx_params[3] = {
4630 { .used = true, .arg_index = num_sgprs },
4631 { .used = true, .arg_index = num_sgprs + 1 },
4632 { .used = true, .arg_index = num_sgprs + 4 },
4633 };
4634 LLVMValueRef vtx_in[6], vtx_out[6];
4635 LLVMValueRef prim_id, rotate;
4636
4637 if (ctx->screen->info.chip_class >= GFX9) {
4638 for (unsigned i = 0; i < 3; i++) {
4639 vtx_in[i*2] = si_unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
4640 vtx_in[i*2+1] = si_unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
4641 }
4642 } else {
4643 for (unsigned i = 0; i < 6; i++)
4644 vtx_in[i] = ac_get_arg(&ctx->ac, gfx6_vtx_params[i]);
4645 }
4646
4647 prim_id = LLVMGetParam(func, num_sgprs + 2);
4648 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
4649
4650 for (unsigned i = 0; i < 6; ++i) {
4651 LLVMValueRef base, rotated;
4652 base = vtx_in[i];
4653 rotated = vtx_in[(i + 4) % 6];
4654 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
4655 }
4656
4657 if (ctx->screen->info.chip_class >= GFX9) {
4658 for (unsigned i = 0; i < 3; i++) {
4659 LLVMValueRef hi, out;
4660
4661 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
4662 LLVMConstInt(ctx->i32, 16, 0), "");
4663 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
4664 out = ac_to_float(&ctx->ac, out);
4665 ret = LLVMBuildInsertValue(builder, ret, out,
4666 gfx9_vtx_params[i].arg_index, "");
4667 }
4668 } else {
4669 for (unsigned i = 0; i < 6; i++) {
4670 LLVMValueRef out;
4671
4672 out = ac_to_float(&ctx->ac, vtx_out[i]);
4673 ret = LLVMBuildInsertValue(builder, ret, out,
4674 gfx6_vtx_params[i].arg_index, "");
4675 }
4676 }
4677 }
4678
4679 LLVMBuildRet(builder, ret);
4680 }
4681
4682 /**
4683 * Given a list of shader part functions, build a wrapper function that
4684 * runs them in sequence to form a monolithic shader.
4685 */
4686 void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
4687 unsigned num_parts, unsigned main_part,
4688 unsigned next_shader_first_part)
4689 {
4690 LLVMBuilderRef builder = ctx->ac.builder;
4691 /* PS epilog has one arg per color component; gfx9 merged shader
4692 * prologs need to forward 40 SGPRs.
4693 */
4694 LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
4695 LLVMTypeRef function_type;
4696 unsigned num_first_params;
4697 unsigned num_out, initial_num_out;
4698 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
4699 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
4700 unsigned num_sgprs, num_vgprs;
4701 unsigned gprs;
4702
4703 memset(&ctx->args, 0, sizeof(ctx->args));
4704
4705 for (unsigned i = 0; i < num_parts; ++i) {
4706 ac_add_function_attr(ctx->ac.context, parts[i], -1,
4707 AC_FUNC_ATTR_ALWAYSINLINE);
4708 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
4709 }
4710
4711 /* The parameters of the wrapper function correspond to those of the
4712 * first part in terms of SGPRs and VGPRs, but we use the types of the
4713 * main part to get the right types. This is relevant for the
4714 * dereferenceable attribute on descriptor table pointers.
4715 */
4716 num_sgprs = 0;
4717 num_vgprs = 0;
4718
4719 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
4720 num_first_params = LLVMCountParamTypes(function_type);
4721
4722 for (unsigned i = 0; i < num_first_params; ++i) {
4723 LLVMValueRef param = LLVMGetParam(parts[0], i);
4724
4725 if (ac_is_sgpr_param(param)) {
4726 assert(num_vgprs == 0);
4727 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
4728 } else {
4729 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
4730 }
4731 }
4732
4733 gprs = 0;
4734 while (gprs < num_sgprs + num_vgprs) {
4735 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
4736 LLVMTypeRef type = LLVMTypeOf(param);
4737 unsigned size = ac_get_type_size(type) / 4;
4738
4739 /* This is going to get casted anyways, so we don't have to
4740 * have the exact same type. But we do have to preserve the
4741 * pointer-ness so that LLVM knows about it.
4742 */
4743 enum ac_arg_type arg_type = AC_ARG_INT;
4744 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
4745 arg_type = AC_ARG_CONST_PTR;
4746 }
4747
4748 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR,
4749 size, arg_type, NULL);
4750
4751 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
4752 assert(gprs + size <= num_sgprs + num_vgprs &&
4753 (gprs >= num_sgprs || gprs + size <= num_sgprs));
4754
4755 gprs += size;
4756 }
4757
4758 /* Prepare the return type. */
4759 unsigned num_returns = 0;
4760 LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
4761
4762 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
4763 return_type = LLVMGetReturnType(last_func_type);
4764
4765 switch (LLVMGetTypeKind(return_type)) {
4766 case LLVMStructTypeKind:
4767 num_returns = LLVMCountStructElementTypes(return_type);
4768 assert(num_returns <= ARRAY_SIZE(returns));
4769 LLVMGetStructElementTypes(return_type, returns);
4770 break;
4771 case LLVMVoidTypeKind:
4772 break;
4773 default:
4774 unreachable("unexpected type");
4775 }
4776
4777 si_llvm_create_func(ctx, "wrapper", returns, num_returns,
4778 si_get_max_workgroup_size(ctx->shader));
4779
4780 if (si_is_merged_shader(ctx))
4781 ac_init_exec_full_mask(&ctx->ac);
4782
4783 /* Record the arguments of the function as if they were an output of
4784 * a previous part.
4785 */
4786 num_out = 0;
4787 num_out_sgpr = 0;
4788
4789 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
4790 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
4791 LLVMTypeRef param_type = LLVMTypeOf(param);
4792 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->i32 : ctx->f32;
4793 unsigned size = ac_get_type_size(param_type) / 4;
4794
4795 if (size == 1) {
4796 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4797 param = LLVMBuildPtrToInt(builder, param, ctx->i32, "");
4798 param_type = ctx->i32;
4799 }
4800
4801 if (param_type != out_type)
4802 param = LLVMBuildBitCast(builder, param, out_type, "");
4803 out[num_out++] = param;
4804 } else {
4805 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
4806
4807 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4808 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
4809 param_type = ctx->i64;
4810 }
4811
4812 if (param_type != vector_type)
4813 param = LLVMBuildBitCast(builder, param, vector_type, "");
4814
4815 for (unsigned j = 0; j < size; ++j)
4816 out[num_out++] = LLVMBuildExtractElement(
4817 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
4818 }
4819
4820 if (ctx->args.args[i].file == AC_ARG_SGPR)
4821 num_out_sgpr = num_out;
4822 }
4823
4824 memcpy(initial, out, sizeof(out));
4825 initial_num_out = num_out;
4826 initial_num_out_sgpr = num_out_sgpr;
4827
4828 /* Now chain the parts. */
4829 LLVMValueRef ret = NULL;
4830 for (unsigned part = 0; part < num_parts; ++part) {
4831 LLVMValueRef in[AC_MAX_ARGS];
4832 LLVMTypeRef ret_type;
4833 unsigned out_idx = 0;
4834 unsigned num_params = LLVMCountParams(parts[part]);
4835
4836 /* Merged shaders are executed conditionally depending
4837 * on the number of enabled threads passed in the input SGPRs. */
4838 if (is_multi_part_shader(ctx) && part == 0) {
4839 LLVMValueRef ena, count = initial[3];
4840
4841 count = LLVMBuildAnd(builder, count,
4842 LLVMConstInt(ctx->i32, 0x7f, 0), "");
4843 ena = LLVMBuildICmp(builder, LLVMIntULT,
4844 ac_get_thread_id(&ctx->ac), count, "");
4845 ac_build_ifcc(&ctx->ac, ena, 6506);
4846 }
4847
4848 /* Derive arguments for the next part from outputs of the
4849 * previous one.
4850 */
4851 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
4852 LLVMValueRef param;
4853 LLVMTypeRef param_type;
4854 bool is_sgpr;
4855 unsigned param_size;
4856 LLVMValueRef arg = NULL;
4857
4858 param = LLVMGetParam(parts[part], param_idx);
4859 param_type = LLVMTypeOf(param);
4860 param_size = ac_get_type_size(param_type) / 4;
4861 is_sgpr = ac_is_sgpr_param(param);
4862
4863 if (is_sgpr) {
4864 ac_add_function_attr(ctx->ac.context, parts[part],
4865 param_idx + 1, AC_FUNC_ATTR_INREG);
4866 } else if (out_idx < num_out_sgpr) {
4867 /* Skip returned SGPRs the current part doesn't
4868 * declare on the input. */
4869 out_idx = num_out_sgpr;
4870 }
4871
4872 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
4873
4874 if (param_size == 1)
4875 arg = out[out_idx];
4876 else
4877 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
4878
4879 if (LLVMTypeOf(arg) != param_type) {
4880 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4881 if (LLVMGetPointerAddressSpace(param_type) ==
4882 AC_ADDR_SPACE_CONST_32BIT) {
4883 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
4884 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
4885 } else {
4886 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
4887 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
4888 }
4889 } else {
4890 arg = LLVMBuildBitCast(builder, arg, param_type, "");
4891 }
4892 }
4893
4894 in[param_idx] = arg;
4895 out_idx += param_size;
4896 }
4897
4898 ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
4899
4900 if (is_multi_part_shader(ctx) &&
4901 part + 1 == next_shader_first_part) {
4902 ac_build_endif(&ctx->ac, 6506);
4903
4904 /* The second half of the merged shader should use
4905 * the inputs from the toplevel (wrapper) function,
4906 * not the return value from the last call.
4907 *
4908 * That's because the last call was executed condi-
4909 * tionally, so we can't consume it in the main
4910 * block.
4911 */
4912 memcpy(out, initial, sizeof(initial));
4913 num_out = initial_num_out;
4914 num_out_sgpr = initial_num_out_sgpr;
4915 continue;
4916 }
4917
4918 /* Extract the returned GPRs. */
4919 ret_type = LLVMTypeOf(ret);
4920 num_out = 0;
4921 num_out_sgpr = 0;
4922
4923 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
4924 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
4925
4926 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
4927
4928 for (unsigned i = 0; i < ret_size; ++i) {
4929 LLVMValueRef val =
4930 LLVMBuildExtractValue(builder, ret, i, "");
4931
4932 assert(num_out < ARRAY_SIZE(out));
4933 out[num_out++] = val;
4934
4935 if (LLVMTypeOf(val) == ctx->i32) {
4936 assert(num_out_sgpr + 1 == num_out);
4937 num_out_sgpr = num_out;
4938 }
4939 }
4940 }
4941 }
4942
4943 /* Return the value from the last part. */
4944 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
4945 LLVMBuildRetVoid(builder);
4946 else
4947 LLVMBuildRet(builder, ret);
4948 }
4949
4950 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
4951 struct si_shader_selector *sel)
4952 {
4953 if (!compiler->low_opt_passes)
4954 return false;
4955
4956 /* Assume a slow CPU. */
4957 assert(!sel->screen->info.has_dedicated_vram &&
4958 sel->screen->info.chip_class <= GFX8);
4959
4960 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
4961 * buffer stores. */
4962 return sel->type == PIPE_SHADER_COMPUTE &&
4963 sel->info.num_memory_instructions > 1000;
4964 }
4965
4966 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
4967 bool *free_nir)
4968 {
4969 *free_nir = false;
4970
4971 if (sel->nir) {
4972 return sel->nir;
4973 } else if (sel->nir_binary) {
4974 struct pipe_screen *screen = &sel->screen->b;
4975 const void *options =
4976 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
4977 sel->type);
4978
4979 struct blob_reader blob_reader;
4980 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
4981 *free_nir = true;
4982 return nir_deserialize(NULL, options, &blob_reader);
4983 }
4984 return NULL;
4985 }
4986
4987 int si_compile_shader(struct si_screen *sscreen,
4988 struct ac_llvm_compiler *compiler,
4989 struct si_shader *shader,
4990 struct pipe_debug_callback *debug)
4991 {
4992 struct si_shader_selector *sel = shader->selector;
4993 struct si_shader_context ctx;
4994 bool free_nir;
4995 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
4996 int r = -1;
4997
4998 /* Dump NIR before doing NIR->LLVM conversion in case the
4999 * conversion fails. */
5000 if (si_can_dump_shader(sscreen, sel->type) &&
5001 !(sscreen->debug_flags & DBG(NO_NIR))) {
5002 nir_print_shader(nir, stderr);
5003 si_dump_streamout(&sel->so);
5004 }
5005
5006 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
5007 si_llvm_context_set_ir(&ctx, shader);
5008
5009 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
5010 sizeof(shader->info.vs_output_param_offset));
5011
5012 shader->info.uses_instanceid = sel->info.uses_instanceid;
5013
5014 if (!si_build_main_function(&ctx, nir, free_nir)) {
5015 si_llvm_dispose(&ctx);
5016 return -1;
5017 }
5018
5019 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
5020 LLVMValueRef parts[2];
5021 bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog);
5022
5023 parts[1] = ctx.main_fn;
5024
5025 if (need_prolog) {
5026 union si_shader_part_key prolog_key;
5027 si_get_vs_prolog_key(&sel->info,
5028 shader->info.num_input_sgprs,
5029 &shader->key.part.vs.prolog,
5030 shader, &prolog_key);
5031 prolog_key.vs_prolog.is_monolithic = true;
5032 si_build_vs_prolog_function(&ctx, &prolog_key);
5033 parts[0] = ctx.main_fn;
5034 }
5035
5036 si_build_wrapper_function(&ctx, parts + !need_prolog,
5037 1 + need_prolog, need_prolog, 0);
5038
5039 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
5040 si_build_prim_discard_compute_shader(&ctx);
5041 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
5042 if (sscreen->info.chip_class >= GFX9) {
5043 struct si_shader_selector *ls = shader->key.part.tcs.ls;
5044 LLVMValueRef parts[4];
5045 bool vs_needs_prolog =
5046 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
5047
5048 /* TCS main part */
5049 parts[2] = ctx.main_fn;
5050
5051 /* TCS epilog */
5052 union si_shader_part_key tcs_epilog_key;
5053 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
5054 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5055 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
5056 parts[3] = ctx.main_fn;
5057
5058 /* VS as LS main part */
5059 nir = get_nir_shader(ls, &free_nir);
5060 struct si_shader shader_ls = {};
5061 shader_ls.selector = ls;
5062 shader_ls.key.as_ls = 1;
5063 shader_ls.key.mono = shader->key.mono;
5064 shader_ls.key.opt = shader->key.opt;
5065 shader_ls.is_monolithic = true;
5066 si_llvm_context_set_ir(&ctx, &shader_ls);
5067
5068 if (!si_build_main_function(&ctx, nir, free_nir)) {
5069 si_llvm_dispose(&ctx);
5070 return -1;
5071 }
5072 shader->info.uses_instanceid |= ls->info.uses_instanceid;
5073 parts[1] = ctx.main_fn;
5074
5075 /* LS prolog */
5076 if (vs_needs_prolog) {
5077 union si_shader_part_key vs_prolog_key;
5078 si_get_vs_prolog_key(&ls->info,
5079 shader_ls.info.num_input_sgprs,
5080 &shader->key.part.tcs.ls_prolog,
5081 shader, &vs_prolog_key);
5082 vs_prolog_key.vs_prolog.is_monolithic = true;
5083 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5084 parts[0] = ctx.main_fn;
5085 }
5086
5087 /* Reset the shader context. */
5088 ctx.shader = shader;
5089 ctx.type = PIPE_SHADER_TESS_CTRL;
5090
5091 si_build_wrapper_function(&ctx,
5092 parts + !vs_needs_prolog,
5093 4 - !vs_needs_prolog, vs_needs_prolog,
5094 vs_needs_prolog ? 2 : 1);
5095 } else {
5096 LLVMValueRef parts[2];
5097 union si_shader_part_key epilog_key;
5098
5099 parts[0] = ctx.main_fn;
5100
5101 memset(&epilog_key, 0, sizeof(epilog_key));
5102 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5103 si_build_tcs_epilog_function(&ctx, &epilog_key);
5104 parts[1] = ctx.main_fn;
5105
5106 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
5107 }
5108 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
5109 if (ctx.screen->info.chip_class >= GFX9) {
5110 struct si_shader_selector *es = shader->key.part.gs.es;
5111 LLVMValueRef es_prolog = NULL;
5112 LLVMValueRef es_main = NULL;
5113 LLVMValueRef gs_prolog = NULL;
5114 LLVMValueRef gs_main = ctx.main_fn;
5115
5116 /* GS prolog */
5117 union si_shader_part_key gs_prolog_key;
5118 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
5119 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5120 gs_prolog_key.gs_prolog.is_monolithic = true;
5121 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
5122 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
5123 gs_prolog = ctx.main_fn;
5124
5125 /* ES main part */
5126 nir = get_nir_shader(es, &free_nir);
5127 struct si_shader shader_es = {};
5128 shader_es.selector = es;
5129 shader_es.key.as_es = 1;
5130 shader_es.key.as_ngg = shader->key.as_ngg;
5131 shader_es.key.mono = shader->key.mono;
5132 shader_es.key.opt = shader->key.opt;
5133 shader_es.is_monolithic = true;
5134 si_llvm_context_set_ir(&ctx, &shader_es);
5135
5136 if (!si_build_main_function(&ctx, nir, free_nir)) {
5137 si_llvm_dispose(&ctx);
5138 return -1;
5139 }
5140 shader->info.uses_instanceid |= es->info.uses_instanceid;
5141 es_main = ctx.main_fn;
5142
5143 /* ES prolog */
5144 if (es->type == PIPE_SHADER_VERTEX &&
5145 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog)) {
5146 union si_shader_part_key vs_prolog_key;
5147 si_get_vs_prolog_key(&es->info,
5148 shader_es.info.num_input_sgprs,
5149 &shader->key.part.gs.vs_prolog,
5150 shader, &vs_prolog_key);
5151 vs_prolog_key.vs_prolog.is_monolithic = true;
5152 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5153 es_prolog = ctx.main_fn;
5154 }
5155
5156 /* Reset the shader context. */
5157 ctx.shader = shader;
5158 ctx.type = PIPE_SHADER_GEOMETRY;
5159
5160 /* Prepare the array of shader parts. */
5161 LLVMValueRef parts[4];
5162 unsigned num_parts = 0, main_part, next_first_part;
5163
5164 if (es_prolog)
5165 parts[num_parts++] = es_prolog;
5166
5167 parts[main_part = num_parts++] = es_main;
5168 parts[next_first_part = num_parts++] = gs_prolog;
5169 parts[num_parts++] = gs_main;
5170
5171 si_build_wrapper_function(&ctx, parts, num_parts,
5172 main_part, next_first_part);
5173 } else {
5174 LLVMValueRef parts[2];
5175 union si_shader_part_key prolog_key;
5176
5177 parts[1] = ctx.main_fn;
5178
5179 memset(&prolog_key, 0, sizeof(prolog_key));
5180 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5181 si_build_gs_prolog_function(&ctx, &prolog_key);
5182 parts[0] = ctx.main_fn;
5183
5184 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
5185 }
5186 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
5187 si_llvm_build_monolithic_ps(&ctx, shader);
5188 }
5189
5190 si_llvm_optimize_module(&ctx);
5191
5192 /* Post-optimization transformations and analysis. */
5193 si_optimize_vs_outputs(&ctx);
5194
5195 if ((debug && debug->debug_message) ||
5196 si_can_dump_shader(sscreen, ctx.type)) {
5197 ctx.shader->info.private_mem_vgprs =
5198 ac_count_scratch_private_memory(ctx.main_fn);
5199 }
5200
5201 /* Make sure the input is a pointer and not integer followed by inttoptr. */
5202 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
5203 LLVMPointerTypeKind);
5204
5205 /* Compile to bytecode. */
5206 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
5207 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
5208 si_get_shader_name(shader),
5209 si_should_optimize_less(compiler, shader->selector));
5210 si_llvm_dispose(&ctx);
5211 if (r) {
5212 fprintf(stderr, "LLVM failed to compile shader\n");
5213 return r;
5214 }
5215
5216 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
5217 * LLVM 3.9svn has this bug.
5218 */
5219 if (sel->type == PIPE_SHADER_COMPUTE) {
5220 unsigned wave_size = sscreen->compute_wave_size;
5221 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
5222 (wave_size == 32 ? 2 : 1);
5223 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
5224 unsigned max_sgprs_per_wave = 128;
5225 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
5226 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
5227 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
5228 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
5229
5230 max_vgprs = max_vgprs / waves_per_simd;
5231 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
5232
5233 if (shader->config.num_sgprs > max_sgprs ||
5234 shader->config.num_vgprs > max_vgprs) {
5235 fprintf(stderr, "LLVM failed to compile a shader correctly: "
5236 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
5237 shader->config.num_sgprs, shader->config.num_vgprs,
5238 max_sgprs, max_vgprs);
5239
5240 /* Just terminate the process, because dependent
5241 * shaders can hang due to bad input data, but use
5242 * the env var to allow shader-db to work.
5243 */
5244 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
5245 abort();
5246 }
5247 }
5248
5249 /* Add the scratch offset to input SGPRs. */
5250 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(&ctx))
5251 shader->info.num_input_sgprs += 1; /* scratch byte offset */
5252
5253 /* Calculate the number of fragment input VGPRs. */
5254 if (ctx.type == PIPE_SHADER_FRAGMENT) {
5255 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
5256 &shader->info.face_vgpr_index,
5257 &shader->info.ancillary_vgpr_index);
5258 }
5259
5260 si_calculate_max_simd_waves(shader);
5261 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
5262 return 0;
5263 }
5264
5265 /**
5266 * Create, compile and return a shader part (prolog or epilog).
5267 *
5268 * \param sscreen screen
5269 * \param list list of shader parts of the same category
5270 * \param type shader type
5271 * \param key shader part key
5272 * \param prolog whether the part being requested is a prolog
5273 * \param tm LLVM target machine
5274 * \param debug debug callback
5275 * \param build the callback responsible for building the main function
5276 * \return non-NULL on success
5277 */
5278 static struct si_shader_part *
5279 si_get_shader_part(struct si_screen *sscreen,
5280 struct si_shader_part **list,
5281 enum pipe_shader_type type,
5282 bool prolog,
5283 union si_shader_part_key *key,
5284 struct ac_llvm_compiler *compiler,
5285 struct pipe_debug_callback *debug,
5286 void (*build)(struct si_shader_context *,
5287 union si_shader_part_key *),
5288 const char *name)
5289 {
5290 struct si_shader_part *result;
5291
5292 simple_mtx_lock(&sscreen->shader_parts_mutex);
5293
5294 /* Find existing. */
5295 for (result = *list; result; result = result->next) {
5296 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
5297 simple_mtx_unlock(&sscreen->shader_parts_mutex);
5298 return result;
5299 }
5300 }
5301
5302 /* Compile a new one. */
5303 result = CALLOC_STRUCT(si_shader_part);
5304 result->key = *key;
5305
5306 struct si_shader shader = {};
5307
5308 switch (type) {
5309 case PIPE_SHADER_VERTEX:
5310 shader.key.as_ls = key->vs_prolog.as_ls;
5311 shader.key.as_es = key->vs_prolog.as_es;
5312 shader.key.as_ngg = key->vs_prolog.as_ngg;
5313 break;
5314 case PIPE_SHADER_TESS_CTRL:
5315 assert(!prolog);
5316 shader.key.part.tcs.epilog = key->tcs_epilog.states;
5317 break;
5318 case PIPE_SHADER_GEOMETRY:
5319 assert(prolog);
5320 shader.key.as_ngg = key->gs_prolog.as_ngg;
5321 break;
5322 case PIPE_SHADER_FRAGMENT:
5323 if (prolog)
5324 shader.key.part.ps.prolog = key->ps_prolog.states;
5325 else
5326 shader.key.part.ps.epilog = key->ps_epilog.states;
5327 break;
5328 default:
5329 unreachable("bad shader part");
5330 }
5331
5332 struct si_shader_context ctx;
5333 si_llvm_context_init(&ctx, sscreen, compiler,
5334 si_get_wave_size(sscreen, type, shader.key.as_ngg,
5335 shader.key.as_es));
5336 ctx.shader = &shader;
5337 ctx.type = type;
5338
5339 build(&ctx, key);
5340
5341 /* Compile. */
5342 si_llvm_optimize_module(&ctx);
5343
5344 if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
5345 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
5346 name, false)) {
5347 FREE(result);
5348 result = NULL;
5349 goto out;
5350 }
5351
5352 result->next = *list;
5353 *list = result;
5354
5355 out:
5356 si_llvm_dispose(&ctx);
5357 simple_mtx_unlock(&sscreen->shader_parts_mutex);
5358 return result;
5359 }
5360
5361 /**
5362 * Build the vertex shader prolog function.
5363 *
5364 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
5365 * All inputs are returned unmodified. The vertex load indices are
5366 * stored after them, which will be used by the API VS for fetching inputs.
5367 *
5368 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
5369 * input_v0,
5370 * input_v1,
5371 * input_v2,
5372 * input_v3,
5373 * (VertexID + BaseVertex),
5374 * (InstanceID + StartInstance),
5375 * (InstanceID / 2 + StartInstance)
5376 */
5377 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
5378 union si_shader_part_key *key)
5379 {
5380 LLVMTypeRef *returns;
5381 LLVMValueRef ret, func;
5382 int num_returns, i;
5383 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
5384 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
5385 struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
5386 struct ac_arg input_vgpr_param[9];
5387 LLVMValueRef input_vgprs[9];
5388 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
5389 num_input_vgprs;
5390 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
5391
5392 memset(&ctx->args, 0, sizeof(ctx->args));
5393
5394 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
5395 returns = alloca((num_all_input_regs + key->vs_prolog.num_inputs) *
5396 sizeof(LLVMTypeRef));
5397 num_returns = 0;
5398
5399 /* Declare input and output SGPRs. */
5400 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5401 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5402 &input_sgpr_param[i]);
5403 returns[num_returns++] = ctx->i32;
5404 }
5405
5406 struct ac_arg merged_wave_info = input_sgpr_param[3];
5407
5408 /* Preloaded VGPRs (outputs must be floats) */
5409 for (i = 0; i < num_input_vgprs; i++) {
5410 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &input_vgpr_param[i]);
5411 returns[num_returns++] = ctx->f32;
5412 }
5413
5414 /* Vertex load indices. */
5415 for (i = 0; i < key->vs_prolog.num_inputs; i++)
5416 returns[num_returns++] = ctx->f32;
5417
5418 /* Create the function. */
5419 si_llvm_create_func(ctx, "vs_prolog", returns, num_returns, 0);
5420 func = ctx->main_fn;
5421
5422 for (i = 0; i < num_input_vgprs; i++) {
5423 input_vgprs[i] = ac_get_arg(&ctx->ac, input_vgpr_param[i]);
5424 }
5425
5426 if (key->vs_prolog.num_merged_next_stage_vgprs) {
5427 if (!key->vs_prolog.is_monolithic)
5428 si_init_exec_from_input(ctx, merged_wave_info, 0);
5429
5430 if (key->vs_prolog.as_ls &&
5431 ctx->screen->info.has_ls_vgpr_init_bug) {
5432 /* If there are no HS threads, SPI loads the LS VGPRs
5433 * starting at VGPR 0. Shift them back to where they
5434 * belong.
5435 */
5436 LLVMValueRef has_hs_threads =
5437 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
5438 si_unpack_param(ctx, input_sgpr_param[3], 8, 8),
5439 ctx->i32_0, "");
5440
5441 for (i = 4; i > 0; --i) {
5442 input_vgprs[i + 1] =
5443 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
5444 input_vgprs[i + 1],
5445 input_vgprs[i - 1], "");
5446 }
5447 }
5448 }
5449
5450 unsigned vertex_id_vgpr = first_vs_vgpr;
5451 unsigned instance_id_vgpr =
5452 ctx->screen->info.chip_class >= GFX10 ?
5453 first_vs_vgpr + 3 :
5454 first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
5455
5456 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
5457 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
5458
5459 /* InstanceID = VertexID >> 16;
5460 * VertexID = VertexID & 0xffff;
5461 */
5462 if (key->vs_prolog.states.unpack_instance_id_from_vertex_id) {
5463 ctx->abi.instance_id = LLVMBuildLShr(ctx->ac.builder, ctx->abi.vertex_id,
5464 LLVMConstInt(ctx->i32, 16, 0), "");
5465 ctx->abi.vertex_id = LLVMBuildAnd(ctx->ac.builder, ctx->abi.vertex_id,
5466 LLVMConstInt(ctx->i32, 0xffff, 0), "");
5467 }
5468
5469 /* Copy inputs to outputs. This should be no-op, as the registers match,
5470 * but it will prevent the compiler from overwriting them unintentionally.
5471 */
5472 ret = ctx->return_value;
5473 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5474 LLVMValueRef p = LLVMGetParam(func, i);
5475 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
5476 }
5477 for (i = 0; i < num_input_vgprs; i++) {
5478 LLVMValueRef p = input_vgprs[i];
5479
5480 if (i == vertex_id_vgpr)
5481 p = ctx->abi.vertex_id;
5482 else if (i == instance_id_vgpr)
5483 p = ctx->abi.instance_id;
5484
5485 p = ac_to_float(&ctx->ac, p);
5486 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
5487 key->vs_prolog.num_input_sgprs + i, "");
5488 }
5489
5490 /* Compute vertex load indices from instance divisors. */
5491 LLVMValueRef instance_divisor_constbuf = NULL;
5492
5493 if (key->vs_prolog.states.instance_divisor_is_fetched) {
5494 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
5495 LLVMValueRef buf_index =
5496 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
5497 instance_divisor_constbuf =
5498 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
5499 }
5500
5501 for (i = 0; i < key->vs_prolog.num_inputs; i++) {
5502 bool divisor_is_one =
5503 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
5504 bool divisor_is_fetched =
5505 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
5506 LLVMValueRef index = NULL;
5507
5508 if (divisor_is_one) {
5509 index = ctx->abi.instance_id;
5510 } else if (divisor_is_fetched) {
5511 LLVMValueRef udiv_factors[4];
5512
5513 for (unsigned j = 0; j < 4; j++) {
5514 udiv_factors[j] =
5515 si_buffer_load_const(ctx, instance_divisor_constbuf,
5516 LLVMConstInt(ctx->i32, i*16 + j*4, 0));
5517 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
5518 }
5519 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
5520 * Such InstanceID might not be achievable in a reasonable time though.
5521 */
5522 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id,
5523 udiv_factors[0], udiv_factors[1],
5524 udiv_factors[2], udiv_factors[3]);
5525 }
5526
5527 if (divisor_is_one || divisor_is_fetched) {
5528 /* Add StartInstance. */
5529 index = LLVMBuildAdd(ctx->ac.builder, index,
5530 LLVMGetParam(ctx->main_fn, user_sgpr_base +
5531 SI_SGPR_START_INSTANCE), "");
5532 } else {
5533 /* VertexID + BaseVertex */
5534 index = LLVMBuildAdd(ctx->ac.builder,
5535 ctx->abi.vertex_id,
5536 LLVMGetParam(func, user_sgpr_base +
5537 SI_SGPR_BASE_VERTEX), "");
5538 }
5539
5540 index = ac_to_float(&ctx->ac, index);
5541 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
5542 ctx->args.arg_count + i, "");
5543 }
5544
5545 si_llvm_build_ret(ctx, ret);
5546 }
5547
5548 static bool si_get_vs_prolog(struct si_screen *sscreen,
5549 struct ac_llvm_compiler *compiler,
5550 struct si_shader *shader,
5551 struct pipe_debug_callback *debug,
5552 struct si_shader *main_part,
5553 const struct si_vs_prolog_bits *key)
5554 {
5555 struct si_shader_selector *vs = main_part->selector;
5556
5557 if (!si_vs_needs_prolog(vs, key))
5558 return true;
5559
5560 /* Get the prolog. */
5561 union si_shader_part_key prolog_key;
5562 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
5563 key, shader, &prolog_key);
5564
5565 shader->prolog =
5566 si_get_shader_part(sscreen, &sscreen->vs_prologs,
5567 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
5568 debug, si_build_vs_prolog_function,
5569 "Vertex Shader Prolog");
5570 return shader->prolog != NULL;
5571 }
5572
5573 /**
5574 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
5575 */
5576 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
5577 struct ac_llvm_compiler *compiler,
5578 struct si_shader *shader,
5579 struct pipe_debug_callback *debug)
5580 {
5581 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
5582 &shader->key.part.vs.prolog);
5583 }
5584
5585 /**
5586 * Compile the TCS epilog function. This writes tesselation factors to memory
5587 * based on the output primitive type of the tesselator (determined by TES).
5588 */
5589 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
5590 union si_shader_part_key *key)
5591 {
5592 memset(&ctx->args, 0, sizeof(ctx->args));
5593
5594 if (ctx->screen->info.chip_class >= GFX9) {
5595 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5596 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5597 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5598 &ctx->tcs_offchip_offset);
5599 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* wave info */
5600 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5601 &ctx->tcs_factor_offset);
5602 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5603 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5604 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5605 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5606 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5607 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5608 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5609 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5610 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5611 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5612 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5613 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5614 &ctx->tcs_offchip_layout);
5615 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5616 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5617 &ctx->tcs_out_lds_layout);
5618 } else {
5619 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5620 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5621 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5622 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5623 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5624 &ctx->tcs_offchip_layout);
5625 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5626 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5627 &ctx->tcs_out_lds_layout);
5628 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5629 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5630 &ctx->tcs_offchip_offset);
5631 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5632 &ctx->tcs_factor_offset);
5633 }
5634
5635 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
5636 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
5637 struct ac_arg rel_patch_id; /* patch index within the wave (REL_PATCH_ID) */
5638 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &rel_patch_id);
5639 struct ac_arg invocation_id; /* invocation ID within the patch */
5640 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &invocation_id);
5641 struct ac_arg tcs_out_current_patch_data_offset; /* LDS offset where tess factors should be loaded from */
5642 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
5643 &tcs_out_current_patch_data_offset);
5644
5645 struct ac_arg tess_factors[6];
5646 for (unsigned i = 0; i < 6; i++)
5647 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &tess_factors[i]);
5648
5649 /* Create the function. */
5650 si_llvm_create_func(ctx, "tcs_epilog", NULL, 0,
5651 ctx->screen->info.chip_class >= GFX7 ? 128 : 0);
5652 ac_declare_lds_as_pointer(&ctx->ac);
5653
5654 LLVMValueRef invoc0_tess_factors[6];
5655 for (unsigned i = 0; i < 6; i++)
5656 invoc0_tess_factors[i] = ac_get_arg(&ctx->ac, tess_factors[i]);
5657
5658 si_write_tess_factors(ctx,
5659 ac_get_arg(&ctx->ac, rel_patch_id),
5660 ac_get_arg(&ctx->ac, invocation_id),
5661 ac_get_arg(&ctx->ac, tcs_out_current_patch_data_offset),
5662 invoc0_tess_factors, invoc0_tess_factors + 4);
5663
5664 LLVMBuildRetVoid(ctx->ac.builder);
5665 }
5666
5667 /**
5668 * Select and compile (or reuse) TCS parts (epilog).
5669 */
5670 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
5671 struct ac_llvm_compiler *compiler,
5672 struct si_shader *shader,
5673 struct pipe_debug_callback *debug)
5674 {
5675 if (sscreen->info.chip_class >= GFX9) {
5676 struct si_shader *ls_main_part =
5677 shader->key.part.tcs.ls->main_shader_part_ls;
5678
5679 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
5680 &shader->key.part.tcs.ls_prolog))
5681 return false;
5682
5683 shader->previous_stage = ls_main_part;
5684 }
5685
5686 /* Get the epilog. */
5687 union si_shader_part_key epilog_key;
5688 memset(&epilog_key, 0, sizeof(epilog_key));
5689 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5690
5691 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
5692 PIPE_SHADER_TESS_CTRL, false,
5693 &epilog_key, compiler, debug,
5694 si_build_tcs_epilog_function,
5695 "Tessellation Control Shader Epilog");
5696 return shader->epilog != NULL;
5697 }
5698
5699 /**
5700 * Select and compile (or reuse) GS parts (prolog).
5701 */
5702 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
5703 struct ac_llvm_compiler *compiler,
5704 struct si_shader *shader,
5705 struct pipe_debug_callback *debug)
5706 {
5707 if (sscreen->info.chip_class >= GFX9) {
5708 struct si_shader *es_main_part;
5709 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
5710
5711 if (shader->key.as_ngg)
5712 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
5713 else
5714 es_main_part = shader->key.part.gs.es->main_shader_part_es;
5715
5716 if (es_type == PIPE_SHADER_VERTEX &&
5717 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
5718 &shader->key.part.gs.vs_prolog))
5719 return false;
5720
5721 shader->previous_stage = es_main_part;
5722 }
5723
5724 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
5725 return true;
5726
5727 union si_shader_part_key prolog_key;
5728 memset(&prolog_key, 0, sizeof(prolog_key));
5729 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5730 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
5731
5732 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
5733 PIPE_SHADER_GEOMETRY, true,
5734 &prolog_key, compiler, debug,
5735 si_build_gs_prolog_function,
5736 "Geometry Shader Prolog");
5737 return shader->prolog2 != NULL;
5738 }
5739
5740 /**
5741 * Compute the PS prolog key, which contains all the information needed to
5742 * build the PS prolog function, and set related bits in shader->config.
5743 */
5744 void si_get_ps_prolog_key(struct si_shader *shader,
5745 union si_shader_part_key *key,
5746 bool separate_prolog)
5747 {
5748 struct si_shader_info *info = &shader->selector->info;
5749
5750 memset(key, 0, sizeof(*key));
5751 key->ps_prolog.states = shader->key.part.ps.prolog;
5752 key->ps_prolog.colors_read = info->colors_read;
5753 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5754 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5755 key->ps_prolog.wqm = info->uses_derivatives &&
5756 (key->ps_prolog.colors_read ||
5757 key->ps_prolog.states.force_persp_sample_interp ||
5758 key->ps_prolog.states.force_linear_sample_interp ||
5759 key->ps_prolog.states.force_persp_center_interp ||
5760 key->ps_prolog.states.force_linear_center_interp ||
5761 key->ps_prolog.states.bc_optimize_for_persp ||
5762 key->ps_prolog.states.bc_optimize_for_linear);
5763 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
5764
5765 if (info->colors_read) {
5766 unsigned *color = shader->selector->color_attr_index;
5767
5768 if (shader->key.part.ps.prolog.color_two_side) {
5769 /* BCOLORs are stored after the last input. */
5770 key->ps_prolog.num_interp_inputs = info->num_inputs;
5771 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5772 if (separate_prolog)
5773 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5774 }
5775
5776 for (unsigned i = 0; i < 2; i++) {
5777 unsigned interp = info->input_interpolate[color[i]];
5778 unsigned location = info->input_interpolate_loc[color[i]];
5779
5780 if (!(info->colors_read & (0xf << i*4)))
5781 continue;
5782
5783 key->ps_prolog.color_attr_index[i] = color[i];
5784
5785 if (shader->key.part.ps.prolog.flatshade_colors &&
5786 interp == TGSI_INTERPOLATE_COLOR)
5787 interp = TGSI_INTERPOLATE_CONSTANT;
5788
5789 switch (interp) {
5790 case TGSI_INTERPOLATE_CONSTANT:
5791 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5792 break;
5793 case TGSI_INTERPOLATE_PERSPECTIVE:
5794 case TGSI_INTERPOLATE_COLOR:
5795 /* Force the interpolation location for colors here. */
5796 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5797 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5798 if (shader->key.part.ps.prolog.force_persp_center_interp)
5799 location = TGSI_INTERPOLATE_LOC_CENTER;
5800
5801 switch (location) {
5802 case TGSI_INTERPOLATE_LOC_SAMPLE:
5803 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5804 if (separate_prolog) {
5805 shader->config.spi_ps_input_ena |=
5806 S_0286CC_PERSP_SAMPLE_ENA(1);
5807 }
5808 break;
5809 case TGSI_INTERPOLATE_LOC_CENTER:
5810 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5811 if (separate_prolog) {
5812 shader->config.spi_ps_input_ena |=
5813 S_0286CC_PERSP_CENTER_ENA(1);
5814 }
5815 break;
5816 case TGSI_INTERPOLATE_LOC_CENTROID:
5817 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5818 if (separate_prolog) {
5819 shader->config.spi_ps_input_ena |=
5820 S_0286CC_PERSP_CENTROID_ENA(1);
5821 }
5822 break;
5823 default:
5824 assert(0);
5825 }
5826 break;
5827 case TGSI_INTERPOLATE_LINEAR:
5828 /* Force the interpolation location for colors here. */
5829 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5830 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5831 if (shader->key.part.ps.prolog.force_linear_center_interp)
5832 location = TGSI_INTERPOLATE_LOC_CENTER;
5833
5834 /* The VGPR assignment for non-monolithic shaders
5835 * works because InitialPSInputAddr is set on the
5836 * main shader and PERSP_PULL_MODEL is never used.
5837 */
5838 switch (location) {
5839 case TGSI_INTERPOLATE_LOC_SAMPLE:
5840 key->ps_prolog.color_interp_vgpr_index[i] =
5841 separate_prolog ? 6 : 9;
5842 if (separate_prolog) {
5843 shader->config.spi_ps_input_ena |=
5844 S_0286CC_LINEAR_SAMPLE_ENA(1);
5845 }
5846 break;
5847 case TGSI_INTERPOLATE_LOC_CENTER:
5848 key->ps_prolog.color_interp_vgpr_index[i] =
5849 separate_prolog ? 8 : 11;
5850 if (separate_prolog) {
5851 shader->config.spi_ps_input_ena |=
5852 S_0286CC_LINEAR_CENTER_ENA(1);
5853 }
5854 break;
5855 case TGSI_INTERPOLATE_LOC_CENTROID:
5856 key->ps_prolog.color_interp_vgpr_index[i] =
5857 separate_prolog ? 10 : 13;
5858 if (separate_prolog) {
5859 shader->config.spi_ps_input_ena |=
5860 S_0286CC_LINEAR_CENTROID_ENA(1);
5861 }
5862 break;
5863 default:
5864 assert(0);
5865 }
5866 break;
5867 default:
5868 assert(0);
5869 }
5870 }
5871 }
5872 }
5873
5874 /**
5875 * Check whether a PS prolog is required based on the key.
5876 */
5877 bool si_need_ps_prolog(const union si_shader_part_key *key)
5878 {
5879 return key->ps_prolog.colors_read ||
5880 key->ps_prolog.states.force_persp_sample_interp ||
5881 key->ps_prolog.states.force_linear_sample_interp ||
5882 key->ps_prolog.states.force_persp_center_interp ||
5883 key->ps_prolog.states.force_linear_center_interp ||
5884 key->ps_prolog.states.bc_optimize_for_persp ||
5885 key->ps_prolog.states.bc_optimize_for_linear ||
5886 key->ps_prolog.states.poly_stipple ||
5887 key->ps_prolog.states.samplemask_log_ps_iter;
5888 }
5889
5890 /**
5891 * Compute the PS epilog key, which contains all the information needed to
5892 * build the PS epilog function.
5893 */
5894 void si_get_ps_epilog_key(struct si_shader *shader,
5895 union si_shader_part_key *key)
5896 {
5897 struct si_shader_info *info = &shader->selector->info;
5898 memset(key, 0, sizeof(*key));
5899 key->ps_epilog.colors_written = info->colors_written;
5900 key->ps_epilog.writes_z = info->writes_z;
5901 key->ps_epilog.writes_stencil = info->writes_stencil;
5902 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5903 key->ps_epilog.states = shader->key.part.ps.epilog;
5904 }
5905
5906 /**
5907 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
5908 */
5909 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
5910 struct ac_llvm_compiler *compiler,
5911 struct si_shader *shader,
5912 struct pipe_debug_callback *debug)
5913 {
5914 union si_shader_part_key prolog_key;
5915 union si_shader_part_key epilog_key;
5916
5917 /* Get the prolog. */
5918 si_get_ps_prolog_key(shader, &prolog_key, true);
5919
5920 /* The prolog is a no-op if these aren't set. */
5921 if (si_need_ps_prolog(&prolog_key)) {
5922 shader->prolog =
5923 si_get_shader_part(sscreen, &sscreen->ps_prologs,
5924 PIPE_SHADER_FRAGMENT, true,
5925 &prolog_key, compiler, debug,
5926 si_llvm_build_ps_prolog,
5927 "Fragment Shader Prolog");
5928 if (!shader->prolog)
5929 return false;
5930 }
5931
5932 /* Get the epilog. */
5933 si_get_ps_epilog_key(shader, &epilog_key);
5934
5935 shader->epilog =
5936 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
5937 PIPE_SHADER_FRAGMENT, false,
5938 &epilog_key, compiler, debug,
5939 si_llvm_build_ps_epilog,
5940 "Fragment Shader Epilog");
5941 if (!shader->epilog)
5942 return false;
5943
5944 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
5945 if (shader->key.part.ps.prolog.poly_stipple) {
5946 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
5947 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
5948 }
5949
5950 /* Set up the enable bits for per-sample shading if needed. */
5951 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
5952 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
5953 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5954 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
5955 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
5956 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
5957 }
5958 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
5959 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
5960 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5961 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
5962 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
5963 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
5964 }
5965 if (shader->key.part.ps.prolog.force_persp_center_interp &&
5966 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
5967 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5968 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
5969 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
5970 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
5971 }
5972 if (shader->key.part.ps.prolog.force_linear_center_interp &&
5973 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
5974 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5975 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
5976 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
5977 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
5978 }
5979
5980 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
5981 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
5982 !(shader->config.spi_ps_input_ena & 0xf)) {
5983 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
5984 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
5985 }
5986
5987 /* At least one pair of interpolation weights must be enabled. */
5988 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
5989 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
5990 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
5991 }
5992
5993 /* Samplemask fixup requires the sample ID. */
5994 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
5995 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
5996 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
5997 }
5998
5999 /* The sample mask input is always enabled, because the API shader always
6000 * passes it through to the epilog. Disable it here if it's unused.
6001 */
6002 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
6003 !shader->selector->info.reads_samplemask)
6004 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
6005
6006 return true;
6007 }
6008
6009 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
6010 unsigned *lds_size)
6011 {
6012 /* If tessellation is all offchip and on-chip GS isn't used, this
6013 * workaround is not needed.
6014 */
6015 return;
6016
6017 /* SPI barrier management bug:
6018 * Make sure we have at least 4k of LDS in use to avoid the bug.
6019 * It applies to workgroup sizes of more than one wavefront.
6020 */
6021 if (sscreen->info.family == CHIP_BONAIRE ||
6022 sscreen->info.family == CHIP_KABINI)
6023 *lds_size = MAX2(*lds_size, 8);
6024 }
6025
6026 static void si_fix_resource_usage(struct si_screen *sscreen,
6027 struct si_shader *shader)
6028 {
6029 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
6030
6031 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
6032
6033 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
6034 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
6035 si_multiwave_lds_size_workaround(sscreen,
6036 &shader->config.lds_size);
6037 }
6038 }
6039
6040 bool si_create_shader_variant(struct si_screen *sscreen,
6041 struct ac_llvm_compiler *compiler,
6042 struct si_shader *shader,
6043 struct pipe_debug_callback *debug)
6044 {
6045 struct si_shader_selector *sel = shader->selector;
6046 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
6047 int r;
6048
6049 /* LS, ES, VS are compiled on demand if the main part hasn't been
6050 * compiled for that stage.
6051 *
6052 * GS are compiled on demand if the main part hasn't been compiled
6053 * for the chosen NGG-ness.
6054 *
6055 * Vertex shaders are compiled on demand when a vertex fetch
6056 * workaround must be applied.
6057 */
6058 if (shader->is_monolithic) {
6059 /* Monolithic shader (compiled as a whole, has many variants,
6060 * may take a long time to compile).
6061 */
6062 r = si_compile_shader(sscreen, compiler, shader, debug);
6063 if (r)
6064 return false;
6065 } else {
6066 /* The shader consists of several parts:
6067 *
6068 * - the middle part is the user shader, it has 1 variant only
6069 * and it was compiled during the creation of the shader
6070 * selector
6071 * - the prolog part is inserted at the beginning
6072 * - the epilog part is inserted at the end
6073 *
6074 * The prolog and epilog have many (but simple) variants.
6075 *
6076 * Starting with gfx9, geometry and tessellation control
6077 * shaders also contain the prolog and user shader parts of
6078 * the previous shader stage.
6079 */
6080
6081 if (!mainp)
6082 return false;
6083
6084 /* Copy the compiled shader data over. */
6085 shader->is_binary_shared = true;
6086 shader->binary = mainp->binary;
6087 shader->config = mainp->config;
6088 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
6089 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
6090 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
6091 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
6092 memcpy(shader->info.vs_output_param_offset,
6093 mainp->info.vs_output_param_offset,
6094 sizeof(mainp->info.vs_output_param_offset));
6095 shader->info.uses_instanceid = mainp->info.uses_instanceid;
6096 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
6097 shader->info.nr_param_exports = mainp->info.nr_param_exports;
6098
6099 /* Select prologs and/or epilogs. */
6100 switch (sel->type) {
6101 case PIPE_SHADER_VERTEX:
6102 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
6103 return false;
6104 break;
6105 case PIPE_SHADER_TESS_CTRL:
6106 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
6107 return false;
6108 break;
6109 case PIPE_SHADER_TESS_EVAL:
6110 break;
6111 case PIPE_SHADER_GEOMETRY:
6112 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
6113 return false;
6114 break;
6115 case PIPE_SHADER_FRAGMENT:
6116 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
6117 return false;
6118
6119 /* Make sure we have at least as many VGPRs as there
6120 * are allocated inputs.
6121 */
6122 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6123 shader->info.num_input_vgprs);
6124 break;
6125 default:;
6126 }
6127
6128 /* Update SGPR and VGPR counts. */
6129 if (shader->prolog) {
6130 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6131 shader->prolog->config.num_sgprs);
6132 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6133 shader->prolog->config.num_vgprs);
6134 }
6135 if (shader->previous_stage) {
6136 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6137 shader->previous_stage->config.num_sgprs);
6138 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6139 shader->previous_stage->config.num_vgprs);
6140 shader->config.spilled_sgprs =
6141 MAX2(shader->config.spilled_sgprs,
6142 shader->previous_stage->config.spilled_sgprs);
6143 shader->config.spilled_vgprs =
6144 MAX2(shader->config.spilled_vgprs,
6145 shader->previous_stage->config.spilled_vgprs);
6146 shader->info.private_mem_vgprs =
6147 MAX2(shader->info.private_mem_vgprs,
6148 shader->previous_stage->info.private_mem_vgprs);
6149 shader->config.scratch_bytes_per_wave =
6150 MAX2(shader->config.scratch_bytes_per_wave,
6151 shader->previous_stage->config.scratch_bytes_per_wave);
6152 shader->info.uses_instanceid |=
6153 shader->previous_stage->info.uses_instanceid;
6154 }
6155 if (shader->prolog2) {
6156 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6157 shader->prolog2->config.num_sgprs);
6158 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6159 shader->prolog2->config.num_vgprs);
6160 }
6161 if (shader->epilog) {
6162 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6163 shader->epilog->config.num_sgprs);
6164 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6165 shader->epilog->config.num_vgprs);
6166 }
6167 si_calculate_max_simd_waves(shader);
6168 }
6169
6170 if (shader->key.as_ngg) {
6171 assert(!shader->key.as_es && !shader->key.as_ls);
6172 gfx10_ngg_calculate_subgroup_info(shader);
6173 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
6174 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
6175 }
6176
6177 si_fix_resource_usage(sscreen, shader);
6178 si_shader_dump(sscreen, shader, debug, stderr, true);
6179
6180 /* Upload. */
6181 if (!si_shader_binary_upload(sscreen, shader, 0)) {
6182 fprintf(stderr, "LLVM failed to upload shader\n");
6183 return false;
6184 }
6185
6186 return true;
6187 }
6188
6189 void si_shader_destroy(struct si_shader *shader)
6190 {
6191 if (shader->scratch_bo)
6192 si_resource_reference(&shader->scratch_bo, NULL);
6193
6194 si_resource_reference(&shader->bo, NULL);
6195
6196 if (!shader->is_binary_shared)
6197 si_shader_binary_clean(&shader->binary);
6198
6199 free(shader->shader_log);
6200 }