radeonsi: move VS_STATE.LS_OUT_PATCH_SIZE a few bits higher to make space there
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <llvm/Config/llvm-config.h>
26
27 #include "util/u_memory.h"
28 #include "tgsi/tgsi_strings.h"
29 #include "tgsi/tgsi_from_mesa.h"
30
31 #include "ac_exp_param.h"
32 #include "ac_shader_util.h"
33 #include "ac_rtld.h"
34 #include "ac_llvm_util.h"
35 #include "si_shader_internal.h"
36 #include "si_pipe.h"
37 #include "sid.h"
38
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_serialize.h"
41
42 static const char scratch_rsrc_dword0_symbol[] =
43 "SCRATCH_RSRC_DWORD0";
44
45 static const char scratch_rsrc_dword1_symbol[] =
46 "SCRATCH_RSRC_DWORD1";
47
48 static void si_llvm_emit_barrier(struct si_shader_context *ctx);
49
50 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
51
52 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
53 union si_shader_part_key *key);
54 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
55 union si_shader_part_key *key);
56 static void si_fix_resource_usage(struct si_screen *sscreen,
57 struct si_shader *shader);
58
59 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
60 LLVMTypeRef type)
61 {
62 if (type == ctx->ac.i64 || type == ctx->ac.f64)
63 return true;
64
65 return false;
66 }
67
68 /** Whether the shader runs as a combination of multiple API shaders */
69 static bool is_multi_part_shader(struct si_shader_context *ctx)
70 {
71 if (ctx->screen->info.chip_class <= GFX8)
72 return false;
73
74 return ctx->shader->key.as_ls ||
75 ctx->shader->key.as_es ||
76 ctx->type == PIPE_SHADER_TESS_CTRL ||
77 ctx->type == PIPE_SHADER_GEOMETRY;
78 }
79
80 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
81 bool si_is_merged_shader(struct si_shader_context *ctx)
82 {
83 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
84 }
85
86 /**
87 * Returns a unique index for a per-patch semantic name and index. The index
88 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
89 * can be calculated.
90 */
91 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
92 {
93 switch (semantic_name) {
94 case TGSI_SEMANTIC_TESSOUTER:
95 return 0;
96 case TGSI_SEMANTIC_TESSINNER:
97 return 1;
98 case TGSI_SEMANTIC_PATCH:
99 assert(index < 30);
100 return 2 + index;
101
102 default:
103 assert(!"invalid semantic name");
104 return 0;
105 }
106 }
107
108 /**
109 * Returns a unique index for a semantic name and index. The index must be
110 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
111 * calculated.
112 */
113 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
114 unsigned is_varying)
115 {
116 switch (semantic_name) {
117 case TGSI_SEMANTIC_POSITION:
118 return 0;
119 case TGSI_SEMANTIC_GENERIC:
120 /* Since some shader stages use the the highest used IO index
121 * to determine the size to allocate for inputs/outputs
122 * (in LDS, tess and GS rings). GENERIC should be placed right
123 * after POSITION to make that size as small as possible.
124 */
125 if (index < SI_MAX_IO_GENERIC)
126 return 1 + index;
127
128 assert(!"invalid generic index");
129 return 0;
130 case TGSI_SEMANTIC_FOG:
131 return SI_MAX_IO_GENERIC + 1;
132 case TGSI_SEMANTIC_COLOR:
133 assert(index < 2);
134 return SI_MAX_IO_GENERIC + 2 + index;
135 case TGSI_SEMANTIC_BCOLOR:
136 assert(index < 2);
137 /* If it's a varying, COLOR and BCOLOR alias. */
138 if (is_varying)
139 return SI_MAX_IO_GENERIC + 2 + index;
140 else
141 return SI_MAX_IO_GENERIC + 4 + index;
142 case TGSI_SEMANTIC_TEXCOORD:
143 assert(index < 8);
144 return SI_MAX_IO_GENERIC + 6 + index;
145
146 /* These are rarely used between LS and HS or ES and GS. */
147 case TGSI_SEMANTIC_CLIPDIST:
148 assert(index < 2);
149 return SI_MAX_IO_GENERIC + 6 + 8 + index;
150 case TGSI_SEMANTIC_CLIPVERTEX:
151 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
152 case TGSI_SEMANTIC_PSIZE:
153 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
154
155 /* These can't be written by LS, HS, and ES. */
156 case TGSI_SEMANTIC_LAYER:
157 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
158 case TGSI_SEMANTIC_VIEWPORT_INDEX:
159 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
160 case TGSI_SEMANTIC_PRIMID:
161 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
162 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
163 default:
164 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
165 assert(!"invalid semantic name");
166 return 0;
167 }
168 }
169
170 /**
171 * Get the value of a shader input parameter and extract a bitfield.
172 */
173 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
174 LLVMValueRef value, unsigned rshift,
175 unsigned bitwidth)
176 {
177 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
178 value = ac_to_integer(&ctx->ac, value);
179
180 if (rshift)
181 value = LLVMBuildLShr(ctx->ac.builder, value,
182 LLVMConstInt(ctx->i32, rshift, 0), "");
183
184 if (rshift + bitwidth < 32) {
185 unsigned mask = (1 << bitwidth) - 1;
186 value = LLVMBuildAnd(ctx->ac.builder, value,
187 LLVMConstInt(ctx->i32, mask, 0), "");
188 }
189
190 return value;
191 }
192
193 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
194 struct ac_arg param, unsigned rshift,
195 unsigned bitwidth)
196 {
197 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
198
199 return unpack_llvm_param(ctx, value, rshift, bitwidth);
200 }
201
202 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
203 {
204 switch (ctx->type) {
205 case PIPE_SHADER_TESS_CTRL:
206 return si_unpack_param(ctx, ctx->args.tcs_rel_ids, 0, 8);
207
208 case PIPE_SHADER_TESS_EVAL:
209 return ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id);
210
211 default:
212 assert(0);
213 return NULL;
214 }
215 }
216
217 /* Tessellation shaders pass outputs to the next shader using LDS.
218 *
219 * LS outputs = TCS inputs
220 * TCS outputs = TES inputs
221 *
222 * The LDS layout is:
223 * - TCS inputs for patch 0
224 * - TCS inputs for patch 1
225 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
226 * - ...
227 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
228 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
229 * - TCS outputs for patch 1
230 * - Per-patch TCS outputs for patch 1
231 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
232 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
233 * - ...
234 *
235 * All three shaders VS(LS), TCS, TES share the same LDS space.
236 */
237
238 static LLVMValueRef
239 get_tcs_in_patch_stride(struct si_shader_context *ctx)
240 {
241 return si_unpack_param(ctx, ctx->vs_state_bits, 11, 13);
242 }
243
244 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
245 {
246 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
247
248 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
249 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
250
251 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
252 }
253
254 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
255 {
256 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
257
258 return LLVMConstInt(ctx->i32, stride, 0);
259 }
260
261 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
262 {
263 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
264 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 0, 13);
265
266 const struct si_shader_info *info = &ctx->shader->selector->info;
267 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
268 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
269 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
270 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
271 num_patch_outputs * 4;
272 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
273 }
274
275 static LLVMValueRef
276 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
277 {
278 return LLVMBuildMul(ctx->ac.builder,
279 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 0, 16),
280 LLVMConstInt(ctx->i32, 4, 0), "");
281 }
282
283 static LLVMValueRef
284 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
285 {
286 return LLVMBuildMul(ctx->ac.builder,
287 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 16, 16),
288 LLVMConstInt(ctx->i32, 4, 0), "");
289 }
290
291 static LLVMValueRef
292 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
293 {
294 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
295 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
296
297 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
298 }
299
300 static LLVMValueRef
301 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
302 {
303 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_offset);
308 }
309
310 static LLVMValueRef
311 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
312 {
313 LLVMValueRef patch0_patch_data_offset =
314 get_tcs_out_patch0_patch_data_offset(ctx);
315 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
316 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
317
318 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_patch_data_offset);
319 }
320
321 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
322 {
323 unsigned tcs_out_vertices =
324 ctx->shader->selector ?
325 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
326
327 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
328 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
329 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
330
331 return si_unpack_param(ctx, ctx->tcs_offchip_layout, 6, 6);
332 }
333
334 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
335 {
336 unsigned stride;
337
338 switch (ctx->type) {
339 case PIPE_SHADER_VERTEX:
340 stride = ctx->shader->selector->lshs_vertex_stride / 4;
341 return LLVMConstInt(ctx->i32, stride, 0);
342
343 case PIPE_SHADER_TESS_CTRL:
344 if (ctx->screen->info.chip_class >= GFX9 &&
345 ctx->shader->is_monolithic) {
346 stride = ctx->shader->key.part.tcs.ls->lshs_vertex_stride / 4;
347 return LLVMConstInt(ctx->i32, stride, 0);
348 }
349 return si_unpack_param(ctx, ctx->vs_state_bits, 24, 8);
350
351 default:
352 assert(0);
353 return NULL;
354 }
355 }
356
357 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
358 LLVMValueRef i32, unsigned index)
359 {
360 assert(index <= 1);
361
362 if (index == 1)
363 return LLVMBuildAShr(ctx->ac.builder, i32,
364 LLVMConstInt(ctx->i32, 16, 0), "");
365
366 return LLVMBuildSExt(ctx->ac.builder,
367 LLVMBuildTrunc(ctx->ac.builder, i32,
368 ctx->ac.i16, ""),
369 ctx->i32, "");
370 }
371
372 void si_llvm_load_input_vs(
373 struct si_shader_context *ctx,
374 unsigned input_index,
375 LLVMValueRef out[4])
376 {
377 const struct si_shader_info *info = &ctx->shader->selector->info;
378 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
379
380 if (vs_blit_property) {
381 LLVMValueRef vertex_id = ctx->abi.vertex_id;
382 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
383 LLVMIntULE, vertex_id,
384 ctx->i32_1, "");
385 /* Use LLVMIntNE, because we have 3 vertices and only
386 * the middle one should use y2.
387 */
388 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
389 LLVMIntNE, vertex_id,
390 ctx->i32_1, "");
391
392 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
393 if (input_index == 0) {
394 /* Position: */
395 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
396 param_vs_blit_inputs);
397 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
398 param_vs_blit_inputs + 1);
399
400 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
401 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
402 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
403 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
404
405 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
406 x1, x2, "");
407 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
408 y1, y2, "");
409
410 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
411 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
412 out[2] = LLVMGetParam(ctx->main_fn,
413 param_vs_blit_inputs + 2);
414 out[3] = ctx->ac.f32_1;
415 return;
416 }
417
418 /* Color or texture coordinates: */
419 assert(input_index == 1);
420
421 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
422 for (int i = 0; i < 4; i++) {
423 out[i] = LLVMGetParam(ctx->main_fn,
424 param_vs_blit_inputs + 3 + i);
425 }
426 } else {
427 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
428 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
429 param_vs_blit_inputs + 3);
430 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
431 param_vs_blit_inputs + 4);
432 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
433 param_vs_blit_inputs + 5);
434 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
435 param_vs_blit_inputs + 6);
436
437 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
438 x1, x2, "");
439 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
440 y1, y2, "");
441 out[2] = LLVMGetParam(ctx->main_fn,
442 param_vs_blit_inputs + 7);
443 out[3] = LLVMGetParam(ctx->main_fn,
444 param_vs_blit_inputs + 8);
445 }
446 return;
447 }
448
449 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
450 union si_vs_fix_fetch fix_fetch;
451 LLVMValueRef vb_desc;
452 LLVMValueRef vertex_index;
453 LLVMValueRef tmp;
454
455 if (input_index < num_vbos_in_user_sgprs) {
456 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
457 } else {
458 unsigned index= input_index - num_vbos_in_user_sgprs;
459 vb_desc = ac_build_load_to_sgpr(&ctx->ac,
460 ac_get_arg(&ctx->ac, ctx->vertex_buffers),
461 LLVMConstInt(ctx->i32, index, 0));
462 }
463
464 vertex_index = LLVMGetParam(ctx->main_fn,
465 ctx->vertex_index0.arg_index +
466 input_index);
467
468 /* Use the open-coded implementation for all loads of doubles and
469 * of dword-sized data that needs fixups. We need to insert conversion
470 * code anyway, and the amd/common code does it for us.
471 *
472 * Note: On LLVM <= 8, we can only open-code formats with
473 * channel size >= 4 bytes.
474 */
475 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
476 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
477 if (opencode ||
478 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
479 (fix_fetch.u.log_size == 2)) {
480 tmp = ac_build_opencoded_load_format(
481 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
482 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
483 vb_desc, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
484 for (unsigned i = 0; i < 4; ++i)
485 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
486 return;
487 }
488
489 /* Do multiple loads for special formats. */
490 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
491 LLVMValueRef fetches[4];
492 unsigned num_fetches;
493 unsigned fetch_stride;
494 unsigned channels_per_fetch;
495
496 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
497 num_fetches = MIN2(required_channels, 3);
498 fetch_stride = 1 << fix_fetch.u.log_size;
499 channels_per_fetch = 1;
500 } else {
501 num_fetches = 1;
502 fetch_stride = 0;
503 channels_per_fetch = required_channels;
504 }
505
506 for (unsigned i = 0; i < num_fetches; ++i) {
507 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
508 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
509 channels_per_fetch, 0, true);
510 }
511
512 if (num_fetches == 1 && channels_per_fetch > 1) {
513 LLVMValueRef fetch = fetches[0];
514 for (unsigned i = 0; i < channels_per_fetch; ++i) {
515 tmp = LLVMConstInt(ctx->i32, i, false);
516 fetches[i] = LLVMBuildExtractElement(
517 ctx->ac.builder, fetch, tmp, "");
518 }
519 num_fetches = channels_per_fetch;
520 channels_per_fetch = 1;
521 }
522
523 for (unsigned i = num_fetches; i < 4; ++i)
524 fetches[i] = LLVMGetUndef(ctx->f32);
525
526 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
527 required_channels == 4) {
528 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
529 fetches[3] = ctx->ac.i32_1;
530 else
531 fetches[3] = ctx->ac.f32_1;
532 } else if (fix_fetch.u.log_size == 3 &&
533 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
534 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
535 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
536 required_channels == 4) {
537 /* For 2_10_10_10, the hardware returns an unsigned value;
538 * convert it to a signed one.
539 */
540 LLVMValueRef tmp = fetches[3];
541 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
542
543 /* First, recover the sign-extended signed integer value. */
544 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
545 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
546 else
547 tmp = ac_to_integer(&ctx->ac, tmp);
548
549 /* For the integer-like cases, do a natural sign extension.
550 *
551 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
552 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
553 * exponent.
554 */
555 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
556 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
557 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
558 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
559
560 /* Convert back to the right type. */
561 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
562 LLVMValueRef clamp;
563 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
564 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
565 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
566 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
567 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
568 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
569 }
570
571 fetches[3] = tmp;
572 }
573
574 for (unsigned i = 0; i < 4; ++i)
575 out[i] = ac_to_float(&ctx->ac, fetches[i]);
576 }
577
578 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
579 unsigned swizzle)
580 {
581 if (swizzle > 0)
582 return ctx->i32_0;
583
584 switch (ctx->type) {
585 case PIPE_SHADER_VERTEX:
586 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
587 case PIPE_SHADER_TESS_CTRL:
588 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
589 case PIPE_SHADER_TESS_EVAL:
590 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
591 case PIPE_SHADER_GEOMETRY:
592 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
593 default:
594 assert(0);
595 return ctx->i32_0;
596 }
597 }
598
599 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
600 LLVMValueRef vertex_dw_stride,
601 LLVMValueRef base_addr,
602 LLVMValueRef vertex_index,
603 LLVMValueRef param_index,
604 ubyte name, ubyte index)
605 {
606 if (vertex_dw_stride) {
607 base_addr = ac_build_imad(&ctx->ac, vertex_index,
608 vertex_dw_stride, base_addr);
609 }
610
611 if (param_index) {
612 base_addr = ac_build_imad(&ctx->ac, param_index,
613 LLVMConstInt(ctx->i32, 4, 0), base_addr);
614 }
615
616 int param = name == TGSI_SEMANTIC_PATCH ||
617 name == TGSI_SEMANTIC_TESSINNER ||
618 name == TGSI_SEMANTIC_TESSOUTER ?
619 si_shader_io_get_unique_index_patch(name, index) :
620 si_shader_io_get_unique_index(name, index, false);
621
622 /* Add the base address of the element. */
623 return LLVMBuildAdd(ctx->ac.builder, base_addr,
624 LLVMConstInt(ctx->i32, param * 4, 0), "");
625 }
626
627 /* The offchip buffer layout for TCS->TES is
628 *
629 * - attribute 0 of patch 0 vertex 0
630 * - attribute 0 of patch 0 vertex 1
631 * - attribute 0 of patch 0 vertex 2
632 * ...
633 * - attribute 0 of patch 1 vertex 0
634 * - attribute 0 of patch 1 vertex 1
635 * ...
636 * - attribute 1 of patch 0 vertex 0
637 * - attribute 1 of patch 0 vertex 1
638 * ...
639 * - per patch attribute 0 of patch 0
640 * - per patch attribute 0 of patch 1
641 * ...
642 *
643 * Note that every attribute has 4 components.
644 */
645 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
646 LLVMValueRef rel_patch_id,
647 LLVMValueRef vertex_index,
648 LLVMValueRef param_index)
649 {
650 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
651 LLVMValueRef param_stride, constant16;
652
653 vertices_per_patch = get_num_tcs_out_vertices(ctx);
654 num_patches = si_unpack_param(ctx, ctx->tcs_offchip_layout, 0, 6);
655 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
656 num_patches, "");
657
658 constant16 = LLVMConstInt(ctx->i32, 16, 0);
659 if (vertex_index) {
660 base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
661 vertices_per_patch, vertex_index);
662 param_stride = total_vertices;
663 } else {
664 base_addr = rel_patch_id;
665 param_stride = num_patches;
666 }
667
668 base_addr = ac_build_imad(&ctx->ac, param_index, param_stride, base_addr);
669 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
670
671 if (!vertex_index) {
672 LLVMValueRef patch_data_offset =
673 si_unpack_param(ctx, ctx->tcs_offchip_layout, 12, 20);
674
675 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
676 patch_data_offset, "");
677 }
678 return base_addr;
679 }
680
681 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
682 struct si_shader_context *ctx,
683 LLVMValueRef vertex_index,
684 LLVMValueRef param_index,
685 ubyte name, ubyte index)
686 {
687 unsigned param_index_base;
688
689 param_index_base = name == TGSI_SEMANTIC_PATCH ||
690 name == TGSI_SEMANTIC_TESSINNER ||
691 name == TGSI_SEMANTIC_TESSOUTER ?
692 si_shader_io_get_unique_index_patch(name, index) :
693 si_shader_io_get_unique_index(name, index, false);
694
695 if (param_index) {
696 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
697 LLVMConstInt(ctx->i32, param_index_base, 0),
698 "");
699 } else {
700 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
701 }
702
703 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
704 vertex_index, param_index);
705 }
706
707 static LLVMValueRef si_build_gather_64bit(struct si_shader_context *ctx,
708 LLVMTypeRef type,
709 LLVMValueRef val1,
710 LLVMValueRef val2)
711 {
712 LLVMValueRef values[2] = {
713 ac_to_integer(&ctx->ac, val1),
714 ac_to_integer(&ctx->ac, val2),
715 };
716 LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, 2);
717 return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
718 }
719
720 static LLVMValueRef buffer_load(struct si_shader_context *ctx,
721 LLVMTypeRef type, unsigned swizzle,
722 LLVMValueRef buffer, LLVMValueRef offset,
723 LLVMValueRef base, bool can_speculate)
724 {
725 LLVMValueRef value, value2;
726 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
727
728 if (swizzle == ~0) {
729 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
730 0, ac_glc, can_speculate, false);
731
732 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
733 }
734
735 if (!llvm_type_is_64bit(ctx, type)) {
736 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
737 0, ac_glc, can_speculate, false);
738
739 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
740 return LLVMBuildExtractElement(ctx->ac.builder, value,
741 LLVMConstInt(ctx->i32, swizzle, 0), "");
742 }
743
744 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
745 swizzle * 4, ac_glc, can_speculate, false);
746
747 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
748 swizzle * 4 + 4, ac_glc, can_speculate, false);
749
750 return si_build_gather_64bit(ctx, type, value, value2);
751 }
752
753 /**
754 * Load from LSHS LDS storage.
755 *
756 * \param type output value type
757 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
758 * \param dw_addr address in dwords
759 */
760 static LLVMValueRef lshs_lds_load(struct si_shader_context *ctx,
761 LLVMTypeRef type, unsigned swizzle,
762 LLVMValueRef dw_addr)
763 {
764 LLVMValueRef value;
765
766 if (swizzle == ~0) {
767 LLVMValueRef values[4];
768
769 for (unsigned chan = 0; chan < 4; chan++)
770 values[chan] = lshs_lds_load(ctx, type, chan, dw_addr);
771
772 return ac_build_gather_values(&ctx->ac, values, 4);
773 }
774
775 /* Split 64-bit loads. */
776 if (llvm_type_is_64bit(ctx, type)) {
777 LLVMValueRef lo, hi;
778
779 lo = lshs_lds_load(ctx, ctx->i32, swizzle, dw_addr);
780 hi = lshs_lds_load(ctx, ctx->i32, swizzle + 1, dw_addr);
781 return si_build_gather_64bit(ctx, type, lo, hi);
782 }
783
784 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
785 LLVMConstInt(ctx->i32, swizzle, 0), "");
786
787 value = ac_lds_load(&ctx->ac, dw_addr);
788
789 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
790 }
791
792 /**
793 * Store to LSHS LDS storage.
794 *
795 * \param swizzle offset (typically 0..3)
796 * \param dw_addr address in dwords
797 * \param value value to store
798 */
799 static void lshs_lds_store(struct si_shader_context *ctx,
800 unsigned dw_offset_imm, LLVMValueRef dw_addr,
801 LLVMValueRef value)
802 {
803 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
804 LLVMConstInt(ctx->i32, dw_offset_imm, 0), "");
805
806 ac_lds_store(&ctx->ac, dw_addr, value);
807 }
808
809 enum si_tess_ring {
810 TCS_FACTOR_RING,
811 TESS_OFFCHIP_RING_TCS,
812 TESS_OFFCHIP_RING_TES,
813 };
814
815 static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx,
816 enum si_tess_ring ring)
817 {
818 LLVMBuilderRef builder = ctx->ac.builder;
819 LLVMValueRef addr = ac_get_arg(&ctx->ac,
820 ring == TESS_OFFCHIP_RING_TES ?
821 ctx->tes_offchip_addr :
822 ctx->tcs_out_lds_layout);
823
824 /* TCS only receives high 13 bits of the address. */
825 if (ring == TESS_OFFCHIP_RING_TCS || ring == TCS_FACTOR_RING) {
826 addr = LLVMBuildAnd(builder, addr,
827 LLVMConstInt(ctx->i32, 0xfff80000, 0), "");
828 }
829
830 if (ring == TCS_FACTOR_RING) {
831 unsigned tf_offset = ctx->screen->tess_offchip_ring_size;
832 addr = LLVMBuildAdd(builder, addr,
833 LLVMConstInt(ctx->i32, tf_offset, 0), "");
834 }
835
836 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
837 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
838 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
839 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
840
841 if (ctx->screen->info.chip_class >= GFX10)
842 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
843 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
844 S_008F0C_RESOURCE_LEVEL(1);
845 else
846 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
847 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
848
849 LLVMValueRef desc[4];
850 desc[0] = addr;
851 desc[1] = LLVMConstInt(ctx->i32,
852 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
853 desc[2] = LLVMConstInt(ctx->i32, 0xffffffff, 0);
854 desc[3] = LLVMConstInt(ctx->i32, rsrc3, false);
855
856 return ac_build_gather_values(&ctx->ac, desc, 4);
857 }
858
859 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
860 LLVMTypeRef type,
861 LLVMValueRef vertex_index,
862 LLVMValueRef param_index,
863 unsigned const_index,
864 unsigned location,
865 unsigned driver_location,
866 unsigned component,
867 unsigned num_components,
868 bool is_patch,
869 bool is_compact,
870 bool load_input)
871 {
872 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
873 struct si_shader_info *info = &ctx->shader->selector->info;
874 LLVMValueRef dw_addr, stride;
875 ubyte name, index;
876
877 driver_location = driver_location / 4;
878
879 if (load_input) {
880 name = info->input_semantic_name[driver_location];
881 index = info->input_semantic_index[driver_location];
882 } else {
883 name = info->output_semantic_name[driver_location];
884 index = info->output_semantic_index[driver_location];
885 }
886
887 assert((name == TGSI_SEMANTIC_PATCH ||
888 name == TGSI_SEMANTIC_TESSINNER ||
889 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
890
891 if (load_input) {
892 stride = get_tcs_in_vertex_dw_stride(ctx);
893 dw_addr = get_tcs_in_current_patch_offset(ctx);
894 } else {
895 if (is_patch) {
896 stride = NULL;
897 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
898 } else {
899 stride = get_tcs_out_vertex_dw_stride(ctx);
900 dw_addr = get_tcs_out_current_patch_offset(ctx);
901 }
902 }
903
904 if (!param_index) {
905 param_index = LLVMConstInt(ctx->i32, const_index, 0);
906 }
907
908 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
909 vertex_index, param_index,
910 name, index);
911
912 LLVMValueRef value[4];
913 for (unsigned i = 0; i < num_components; i++) {
914 unsigned offset = i;
915 if (llvm_type_is_64bit(ctx, type))
916 offset *= 2;
917
918 offset += component;
919 value[i + component] = lshs_lds_load(ctx, type, offset, dw_addr);
920 }
921
922 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
923 }
924
925 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
926 LLVMTypeRef type,
927 LLVMValueRef vertex_index,
928 LLVMValueRef param_index,
929 unsigned const_index,
930 unsigned location,
931 unsigned driver_location,
932 unsigned component,
933 unsigned num_components,
934 bool is_patch,
935 bool is_compact,
936 bool load_input)
937 {
938 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
939 struct si_shader_info *info = &ctx->shader->selector->info;
940 LLVMValueRef base, addr;
941
942 driver_location = driver_location / 4;
943 ubyte name = info->input_semantic_name[driver_location];
944 ubyte index = info->input_semantic_index[driver_location];
945
946 assert((name == TGSI_SEMANTIC_PATCH ||
947 name == TGSI_SEMANTIC_TESSINNER ||
948 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
949
950 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
951
952 if (!param_index) {
953 param_index = LLVMConstInt(ctx->i32, const_index, 0);
954 }
955
956 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
957 param_index,
958 name, index);
959
960 /* TODO: This will generate rather ordinary llvm code, although it
961 * should be easy for the optimiser to fix up. In future we might want
962 * to refactor buffer_load().
963 */
964 LLVMValueRef value[4];
965 for (unsigned i = 0; i < num_components; i++) {
966 unsigned offset = i;
967 if (llvm_type_is_64bit(ctx, type)) {
968 offset *= 2;
969 if (offset == 4) {
970 ubyte name = info->input_semantic_name[driver_location + 1];
971 ubyte index = info->input_semantic_index[driver_location + 1];
972 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
973 vertex_index,
974 param_index,
975 name, index);
976 }
977
978 offset = offset % 4;
979 }
980
981 offset += component;
982 value[i + component] = buffer_load(ctx, type, offset,
983 ctx->tess_offchip_ring, base, addr, true);
984 }
985
986 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
987 }
988
989 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
990 const struct nir_variable *var,
991 LLVMValueRef vertex_index,
992 LLVMValueRef param_index,
993 unsigned const_index,
994 LLVMValueRef src,
995 unsigned writemask)
996 {
997 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
998 struct si_shader_info *info = &ctx->shader->selector->info;
999 const unsigned component = var->data.location_frac;
1000 unsigned driver_location = var->data.driver_location;
1001 LLVMValueRef dw_addr, stride;
1002 LLVMValueRef buffer, base, addr;
1003 LLVMValueRef values[8];
1004 bool skip_lds_store;
1005 bool is_tess_factor = false, is_tess_inner = false;
1006
1007 driver_location = driver_location / 4;
1008 ubyte name = info->output_semantic_name[driver_location];
1009 ubyte index = info->output_semantic_index[driver_location];
1010
1011 bool is_const = !param_index;
1012 if (!param_index)
1013 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1014
1015 const bool is_patch = var->data.patch ||
1016 var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
1017 var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER;
1018
1019 assert((name == TGSI_SEMANTIC_PATCH ||
1020 name == TGSI_SEMANTIC_TESSINNER ||
1021 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
1022
1023 if (!is_patch) {
1024 stride = get_tcs_out_vertex_dw_stride(ctx);
1025 dw_addr = get_tcs_out_current_patch_offset(ctx);
1026 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1027 vertex_index, param_index,
1028 name, index);
1029
1030 skip_lds_store = !info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1034 vertex_index, param_index,
1035 name, index);
1036
1037 skip_lds_store = !info->reads_perpatch_outputs;
1038
1039 if (is_const && const_index == 0) {
1040 int name = info->output_semantic_name[driver_location];
1041
1042 /* Always write tess factors into LDS for the TCS epilog. */
1043 if (name == TGSI_SEMANTIC_TESSINNER ||
1044 name == TGSI_SEMANTIC_TESSOUTER) {
1045 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1046 skip_lds_store = !info->reads_tessfactor_outputs &&
1047 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs;
1048 is_tess_factor = true;
1049 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1050 }
1051 }
1052 }
1053
1054 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1055
1056 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1057
1058 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1059 param_index, name, index);
1060
1061 for (unsigned chan = component; chan < 8; chan++) {
1062 if (!(writemask & (1 << chan)))
1063 continue;
1064 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1065
1066 unsigned buffer_store_offset = chan % 4;
1067 if (chan == 4) {
1068 ubyte name = info->output_semantic_name[driver_location + 1];
1069 ubyte index = info->output_semantic_index[driver_location + 1];
1070 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
1071 vertex_index,
1072 param_index,
1073 name, index);
1074 }
1075
1076 /* Skip LDS stores if there is no LDS read of this output. */
1077 if (!skip_lds_store)
1078 lshs_lds_store(ctx, chan, dw_addr, value);
1079
1080 value = ac_to_integer(&ctx->ac, value);
1081 values[chan] = value;
1082
1083 if (writemask != 0xF && !is_tess_factor) {
1084 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1085 addr, base,
1086 4 * buffer_store_offset,
1087 ac_glc);
1088 }
1089
1090 /* Write tess factors into VGPRs for the epilog. */
1091 if (is_tess_factor &&
1092 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
1093 if (!is_tess_inner) {
1094 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1095 ctx->invoc0_tess_factors[chan]);
1096 } else if (chan < 2) {
1097 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1098 ctx->invoc0_tess_factors[4 + chan]);
1099 }
1100 }
1101 }
1102
1103 if (writemask == 0xF && !is_tess_factor) {
1104 LLVMValueRef value = ac_build_gather_values(&ctx->ac,
1105 values, 4);
1106 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1107 base, 0, ac_glc);
1108 }
1109 }
1110
1111 static LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1112 unsigned input_index,
1113 unsigned vtx_offset_param,
1114 LLVMTypeRef type,
1115 unsigned swizzle)
1116 {
1117 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1118 struct si_shader *shader = ctx->shader;
1119 LLVMValueRef vtx_offset, soffset;
1120 struct si_shader_info *info = &shader->selector->info;
1121 unsigned semantic_name = info->input_semantic_name[input_index];
1122 unsigned semantic_index = info->input_semantic_index[input_index];
1123 unsigned param;
1124 LLVMValueRef value;
1125
1126 param = si_shader_io_get_unique_index(semantic_name, semantic_index, false);
1127
1128 /* GFX9 has the ESGS ring in LDS. */
1129 if (ctx->screen->info.chip_class >= GFX9) {
1130 unsigned index = vtx_offset_param;
1131
1132 switch (index / 2) {
1133 case 0:
1134 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx01_offset,
1135 index % 2 ? 16 : 0, 16);
1136 break;
1137 case 1:
1138 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx23_offset,
1139 index % 2 ? 16 : 0, 16);
1140 break;
1141 case 2:
1142 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx45_offset,
1143 index % 2 ? 16 : 0, 16);
1144 break;
1145 default:
1146 assert(0);
1147 return NULL;
1148 }
1149
1150 unsigned offset = param * 4 + swizzle;
1151 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1152 LLVMConstInt(ctx->i32, offset, false), "");
1153
1154 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->esgs_ring, vtx_offset);
1155 LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, ptr, "");
1156 if (llvm_type_is_64bit(ctx, type)) {
1157 ptr = LLVMBuildGEP(ctx->ac.builder, ptr,
1158 &ctx->ac.i32_1, 1, "");
1159 LLVMValueRef values[2] = {
1160 value,
1161 LLVMBuildLoad(ctx->ac.builder, ptr, "")
1162 };
1163 value = ac_build_gather_values(&ctx->ac, values, 2);
1164 }
1165 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1166 }
1167
1168 /* GFX6: input load from the ESGS ring in memory. */
1169 if (swizzle == ~0) {
1170 LLVMValueRef values[4];
1171 unsigned chan;
1172 for (chan = 0; chan < 4; chan++) {
1173 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1174 type, chan);
1175 }
1176 return ac_build_gather_values(&ctx->ac, values, 4);
1177 }
1178
1179 /* Get the vertex offset parameter on GFX6. */
1180 LLVMValueRef gs_vtx_offset = ac_get_arg(&ctx->ac,
1181 ctx->gs_vtx_offset[vtx_offset_param]);
1182
1183 vtx_offset = LLVMBuildMul(ctx->ac.builder, gs_vtx_offset,
1184 LLVMConstInt(ctx->i32, 4, 0), "");
1185
1186 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1187
1188 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1189 vtx_offset, soffset, 0, ac_glc, true, false);
1190 if (llvm_type_is_64bit(ctx, type)) {
1191 LLVMValueRef value2;
1192 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1193
1194 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1195 ctx->i32_0, vtx_offset, soffset,
1196 0, ac_glc, true, false);
1197 return si_build_gather_64bit(ctx, type, value, value2);
1198 }
1199 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1200 }
1201
1202 static LLVMValueRef si_nir_load_input_gs(struct ac_shader_abi *abi,
1203 unsigned location,
1204 unsigned driver_location,
1205 unsigned component,
1206 unsigned num_components,
1207 unsigned vertex_index,
1208 unsigned const_index,
1209 LLVMTypeRef type)
1210 {
1211 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1212
1213 LLVMValueRef value[4];
1214 for (unsigned i = 0; i < num_components; i++) {
1215 unsigned offset = i;
1216 if (llvm_type_is_64bit(ctx, type))
1217 offset *= 2;
1218
1219 offset += component;
1220 value[i + component] = si_llvm_load_input_gs(&ctx->abi, driver_location / 4 + const_index,
1221 vertex_index, type, offset);
1222 }
1223
1224 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1225 }
1226
1227 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
1228 {
1229 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1230
1231 /* For non-indexed draws, the base vertex set by the driver
1232 * (for direct draws) or the CP (for indirect draws) is the
1233 * first vertex ID, but GLSL expects 0 to be returned.
1234 */
1235 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
1236 ctx->vs_state_bits);
1237 LLVMValueRef indexed;
1238
1239 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
1240 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
1241
1242 return LLVMBuildSelect(ctx->ac.builder, indexed,
1243 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
1244 ctx->i32_0, "");
1245 }
1246
1247 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
1248 {
1249 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1250
1251 LLVMValueRef values[3];
1252 LLVMValueRef result;
1253 unsigned i;
1254 unsigned *properties = ctx->shader->selector->info.properties;
1255
1256 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1257 unsigned sizes[3] = {
1258 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1259 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1260 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1261 };
1262
1263 for (i = 0; i < 3; ++i)
1264 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1265
1266 result = ac_build_gather_values(&ctx->ac, values, 3);
1267 } else {
1268 result = ac_get_arg(&ctx->ac, ctx->block_size);
1269 }
1270
1271 return result;
1272 }
1273
1274 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi)
1275 {
1276 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1277 LLVMValueRef coord[4] = {
1278 ac_get_arg(&ctx->ac, ctx->tes_u),
1279 ac_get_arg(&ctx->ac, ctx->tes_v),
1280 ctx->ac.f32_0,
1281 ctx->ac.f32_0
1282 };
1283
1284 /* For triangles, the vector should be (u, v, 1-u-v). */
1285 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1286 PIPE_PRIM_TRIANGLES) {
1287 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1288 LLVMBuildFAdd(ctx->ac.builder,
1289 coord[0], coord[1], ""), "");
1290 }
1291 return ac_build_gather_values(&ctx->ac, coord, 4);
1292 }
1293
1294 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1295 unsigned semantic_name)
1296 {
1297 LLVMValueRef base, addr;
1298
1299 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1300
1301 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1302 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1303 LLVMConstInt(ctx->i32, param, 0));
1304
1305 return buffer_load(ctx, ctx->f32,
1306 ~0, ctx->tess_offchip_ring, base, addr, true);
1307
1308 }
1309
1310 static LLVMValueRef load_tess_level_default(struct si_shader_context *ctx,
1311 unsigned semantic_name)
1312 {
1313 LLVMValueRef buf, slot, val[4];
1314 int i, offset;
1315
1316 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1317 buf = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1318 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
1319 offset = semantic_name == TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL ? 4 : 0;
1320
1321 for (i = 0; i < 4; i++)
1322 val[i] = si_buffer_load_const(ctx, buf,
1323 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1324 return ac_build_gather_values(&ctx->ac, val, 4);
1325 }
1326
1327 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1328 unsigned varying_id,
1329 bool load_default_state)
1330 {
1331 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1332 unsigned semantic_name;
1333
1334 if (load_default_state) {
1335 switch (varying_id) {
1336 case VARYING_SLOT_TESS_LEVEL_INNER:
1337 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL;
1338 break;
1339 case VARYING_SLOT_TESS_LEVEL_OUTER:
1340 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL;
1341 break;
1342 default:
1343 unreachable("unknown tess level");
1344 }
1345 return load_tess_level_default(ctx, semantic_name);
1346 }
1347
1348 switch (varying_id) {
1349 case VARYING_SLOT_TESS_LEVEL_INNER:
1350 semantic_name = TGSI_SEMANTIC_TESSINNER;
1351 break;
1352 case VARYING_SLOT_TESS_LEVEL_OUTER:
1353 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1354 break;
1355 default:
1356 unreachable("unknown tess level");
1357 }
1358
1359 return load_tess_level(ctx, semantic_name);
1360
1361 }
1362
1363 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
1364 {
1365 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1366 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1367 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 13, 6);
1368 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1369 return get_num_tcs_out_vertices(ctx);
1370 else
1371 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1372 }
1373
1374 void si_declare_compute_memory(struct si_shader_context *ctx)
1375 {
1376 struct si_shader_selector *sel = ctx->shader->selector;
1377 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
1378
1379 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
1380 LLVMValueRef var;
1381
1382 assert(!ctx->ac.lds);
1383
1384 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1385 LLVMArrayType(ctx->i8, lds_size),
1386 "compute_lds",
1387 AC_ADDR_SPACE_LDS);
1388 LLVMSetAlignment(var, 64 * 1024);
1389
1390 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
1391 }
1392
1393 static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *ctx)
1394 {
1395 LLVMValueRef ptr =
1396 ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1397 struct si_shader_selector *sel = ctx->shader->selector;
1398
1399 /* Do the bounds checking with a descriptor, because
1400 * doing computation and manual bounds checking of 64-bit
1401 * addresses generates horrible VALU code with very high
1402 * VGPR usage and very low SIMD occupancy.
1403 */
1404 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.intptr, "");
1405
1406 LLVMValueRef desc0, desc1;
1407 desc0 = ptr;
1408 desc1 = LLVMConstInt(ctx->i32,
1409 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
1410
1411 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1412 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1413 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1414 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1415
1416 if (ctx->screen->info.chip_class >= GFX10)
1417 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1418 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1419 S_008F0C_RESOURCE_LEVEL(1);
1420 else
1421 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1422 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1423
1424 LLVMValueRef desc_elems[] = {
1425 desc0,
1426 desc1,
1427 LLVMConstInt(ctx->i32, sel->info.constbuf0_num_slots * 16, 0),
1428 LLVMConstInt(ctx->i32, rsrc3, false)
1429 };
1430
1431 return ac_build_gather_values(&ctx->ac, desc_elems, 4);
1432 }
1433
1434 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
1435 {
1436 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1437 struct si_shader_selector *sel = ctx->shader->selector;
1438
1439 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1440
1441 if (sel->info.const_buffers_declared == 1 &&
1442 sel->info.shader_buffers_declared == 0) {
1443 return load_const_buffer_desc_fast_path(ctx);
1444 }
1445
1446 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
1447 index = LLVMBuildAdd(ctx->ac.builder, index,
1448 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1449
1450 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
1451 }
1452
1453 static LLVMValueRef
1454 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
1455 {
1456 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1457 LLVMValueRef rsrc_ptr = ac_get_arg(&ctx->ac,
1458 ctx->const_and_shader_buffers);
1459
1460 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
1461 index = LLVMBuildSub(ctx->ac.builder,
1462 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
1463 index, "");
1464
1465 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
1466 }
1467
1468 /* Initialize arguments for the shader export intrinsic */
1469 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx,
1470 LLVMValueRef *values,
1471 unsigned target,
1472 struct ac_export_args *args)
1473 {
1474 args->enabled_channels = 0xf; /* writemask - default is 0xf */
1475 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
1476 args->done = 0; /* Specify whether this is the last export */
1477 args->target = target; /* Specify the target we are exporting */
1478 args->compr = false;
1479
1480 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
1481 }
1482
1483 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
1484 struct ac_export_args *pos, LLVMValueRef *out_elts)
1485 {
1486 unsigned reg_index;
1487 unsigned chan;
1488 unsigned const_chan;
1489 LLVMValueRef base_elt;
1490 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1491 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
1492 SI_VS_CONST_CLIP_PLANES, 0);
1493 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
1494
1495 for (reg_index = 0; reg_index < 2; reg_index ++) {
1496 struct ac_export_args *args = &pos[2 + reg_index];
1497
1498 args->out[0] =
1499 args->out[1] =
1500 args->out[2] =
1501 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
1502
1503 /* Compute dot products of position and user clip plane vectors */
1504 for (chan = 0; chan < 4; chan++) {
1505 for (const_chan = 0; const_chan < 4; const_chan++) {
1506 LLVMValueRef addr =
1507 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
1508 const_chan) * 4, 0);
1509 base_elt = si_buffer_load_const(ctx, const_resource,
1510 addr);
1511 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
1512 out_elts[const_chan], args->out[chan]);
1513 }
1514 }
1515
1516 args->enabled_channels = 0xf;
1517 args->valid_mask = 0;
1518 args->done = 0;
1519 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
1520 args->compr = 0;
1521 }
1522 }
1523
1524 static void si_dump_streamout(struct pipe_stream_output_info *so)
1525 {
1526 unsigned i;
1527
1528 if (so->num_outputs)
1529 fprintf(stderr, "STREAMOUT\n");
1530
1531 for (i = 0; i < so->num_outputs; i++) {
1532 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
1533 so->output[i].start_component;
1534 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
1535 i, so->output[i].output_buffer,
1536 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
1537 so->output[i].register_index,
1538 mask & 1 ? "x" : "",
1539 mask & 2 ? "y" : "",
1540 mask & 4 ? "z" : "",
1541 mask & 8 ? "w" : "");
1542 }
1543 }
1544
1545 void si_emit_streamout_output(struct si_shader_context *ctx,
1546 LLVMValueRef const *so_buffers,
1547 LLVMValueRef const *so_write_offsets,
1548 struct pipe_stream_output *stream_out,
1549 struct si_shader_output_values *shader_out)
1550 {
1551 unsigned buf_idx = stream_out->output_buffer;
1552 unsigned start = stream_out->start_component;
1553 unsigned num_comps = stream_out->num_components;
1554 LLVMValueRef out[4];
1555
1556 assert(num_comps && num_comps <= 4);
1557 if (!num_comps || num_comps > 4)
1558 return;
1559
1560 /* Load the output as int. */
1561 for (int j = 0; j < num_comps; j++) {
1562 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
1563
1564 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
1565 }
1566
1567 /* Pack the output. */
1568 LLVMValueRef vdata = NULL;
1569
1570 switch (num_comps) {
1571 case 1: /* as i32 */
1572 vdata = out[0];
1573 break;
1574 case 2: /* as v2i32 */
1575 case 3: /* as v3i32 */
1576 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
1577 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
1578 break;
1579 }
1580 /* as v4i32 (aligned to 4) */
1581 out[3] = LLVMGetUndef(ctx->i32);
1582 /* fall through */
1583 case 4: /* as v4i32 */
1584 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
1585 break;
1586 }
1587
1588 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
1589 vdata, num_comps,
1590 so_write_offsets[buf_idx],
1591 ctx->i32_0,
1592 stream_out->dst_offset * 4, ac_glc | ac_slc);
1593 }
1594
1595 /**
1596 * Write streamout data to buffers for vertex stream @p stream (different
1597 * vertex streams can occur for GS copy shaders).
1598 */
1599 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
1600 struct si_shader_output_values *outputs,
1601 unsigned noutput, unsigned stream)
1602 {
1603 struct si_shader_selector *sel = ctx->shader->selector;
1604 struct pipe_stream_output_info *so = &sel->so;
1605 LLVMBuilderRef builder = ctx->ac.builder;
1606 int i;
1607
1608 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1609 LLVMValueRef so_vtx_count =
1610 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
1611
1612 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
1613
1614 /* can_emit = tid < so_vtx_count; */
1615 LLVMValueRef can_emit =
1616 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
1617
1618 /* Emit the streamout code conditionally. This actually avoids
1619 * out-of-bounds buffer access. The hw tells us via the SGPR
1620 * (so_vtx_count) which threads are allowed to emit streamout data. */
1621 ac_build_ifcc(&ctx->ac, can_emit, 6501);
1622 {
1623 /* The buffer offset is computed as follows:
1624 * ByteOffset = streamout_offset[buffer_id]*4 +
1625 * (streamout_write_index + thread_id)*stride[buffer_id] +
1626 * attrib_offset
1627 */
1628
1629 LLVMValueRef so_write_index =
1630 ac_get_arg(&ctx->ac,
1631 ctx->streamout_write_index);
1632
1633 /* Compute (streamout_write_index + thread_id). */
1634 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
1635
1636 /* Load the descriptor and compute the write offset for each
1637 * enabled buffer. */
1638 LLVMValueRef so_write_offset[4] = {};
1639 LLVMValueRef so_buffers[4];
1640 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
1641 ctx->rw_buffers);
1642
1643 for (i = 0; i < 4; i++) {
1644 if (!so->stride[i])
1645 continue;
1646
1647 LLVMValueRef offset = LLVMConstInt(ctx->i32,
1648 SI_VS_STREAMOUT_BUF0 + i, 0);
1649
1650 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
1651
1652 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
1653 ctx->streamout_offset[i]);
1654 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
1655
1656 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
1657 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
1658 so_offset);
1659 }
1660
1661 /* Write streamout data. */
1662 for (i = 0; i < so->num_outputs; i++) {
1663 unsigned reg = so->output[i].register_index;
1664
1665 if (reg >= noutput)
1666 continue;
1667
1668 if (stream != so->output[i].stream)
1669 continue;
1670
1671 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
1672 &so->output[i], &outputs[reg]);
1673 }
1674 }
1675 ac_build_endif(&ctx->ac, 6501);
1676 }
1677
1678 static void si_export_param(struct si_shader_context *ctx, unsigned index,
1679 LLVMValueRef *values)
1680 {
1681 struct ac_export_args args;
1682
1683 si_llvm_init_vs_export_args(ctx, values,
1684 V_008DFC_SQ_EXP_PARAM + index, &args);
1685 ac_build_export(&ctx->ac, &args);
1686 }
1687
1688 static void si_build_param_exports(struct si_shader_context *ctx,
1689 struct si_shader_output_values *outputs,
1690 unsigned noutput)
1691 {
1692 struct si_shader *shader = ctx->shader;
1693 unsigned param_count = 0;
1694
1695 for (unsigned i = 0; i < noutput; i++) {
1696 unsigned semantic_name = outputs[i].semantic_name;
1697 unsigned semantic_index = outputs[i].semantic_index;
1698
1699 if (outputs[i].vertex_stream[0] != 0 &&
1700 outputs[i].vertex_stream[1] != 0 &&
1701 outputs[i].vertex_stream[2] != 0 &&
1702 outputs[i].vertex_stream[3] != 0)
1703 continue;
1704
1705 switch (semantic_name) {
1706 case TGSI_SEMANTIC_LAYER:
1707 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1708 case TGSI_SEMANTIC_CLIPDIST:
1709 case TGSI_SEMANTIC_COLOR:
1710 case TGSI_SEMANTIC_BCOLOR:
1711 case TGSI_SEMANTIC_PRIMID:
1712 case TGSI_SEMANTIC_FOG:
1713 case TGSI_SEMANTIC_TEXCOORD:
1714 case TGSI_SEMANTIC_GENERIC:
1715 break;
1716 default:
1717 continue;
1718 }
1719
1720 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
1721 semantic_index < SI_MAX_IO_GENERIC) &&
1722 shader->key.opt.kill_outputs &
1723 (1ull << si_shader_io_get_unique_index(semantic_name,
1724 semantic_index, true)))
1725 continue;
1726
1727 si_export_param(ctx, param_count, outputs[i].values);
1728
1729 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
1730 shader->info.vs_output_param_offset[i] = param_count++;
1731 }
1732
1733 shader->info.nr_param_exports = param_count;
1734 }
1735
1736 /**
1737 * Vertex color clamping.
1738 *
1739 * This uses a state constant loaded in a user data SGPR and
1740 * an IF statement is added that clamps all colors if the constant
1741 * is true.
1742 */
1743 static void si_vertex_color_clamping(struct si_shader_context *ctx,
1744 struct si_shader_output_values *outputs,
1745 unsigned noutput)
1746 {
1747 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
1748 bool has_colors = false;
1749
1750 /* Store original colors to alloca variables. */
1751 for (unsigned i = 0; i < noutput; i++) {
1752 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1753 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1754 continue;
1755
1756 for (unsigned j = 0; j < 4; j++) {
1757 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->f32, "");
1758 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
1759 }
1760 has_colors = true;
1761 }
1762
1763 if (!has_colors)
1764 return;
1765
1766 /* The state is in the first bit of the user SGPR. */
1767 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
1768 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
1769
1770 ac_build_ifcc(&ctx->ac, cond, 6502);
1771
1772 /* Store clamped colors to alloca variables within the conditional block. */
1773 for (unsigned i = 0; i < noutput; i++) {
1774 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1775 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1776 continue;
1777
1778 for (unsigned j = 0; j < 4; j++) {
1779 LLVMBuildStore(ctx->ac.builder,
1780 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
1781 addr[i][j]);
1782 }
1783 }
1784 ac_build_endif(&ctx->ac, 6502);
1785
1786 /* Load clamped colors */
1787 for (unsigned i = 0; i < noutput; i++) {
1788 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1789 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1790 continue;
1791
1792 for (unsigned j = 0; j < 4; j++) {
1793 outputs[i].values[j] =
1794 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
1795 }
1796 }
1797 }
1798
1799 /* Generate export instructions for hardware VS shader stage or NGG GS stage
1800 * (position and parameter data only).
1801 */
1802 void si_llvm_export_vs(struct si_shader_context *ctx,
1803 struct si_shader_output_values *outputs,
1804 unsigned noutput)
1805 {
1806 struct si_shader *shader = ctx->shader;
1807 struct ac_export_args pos_args[4] = {};
1808 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
1809 unsigned pos_idx;
1810 int i;
1811
1812 si_vertex_color_clamping(ctx, outputs, noutput);
1813
1814 /* Build position exports. */
1815 for (i = 0; i < noutput; i++) {
1816 switch (outputs[i].semantic_name) {
1817 case TGSI_SEMANTIC_POSITION:
1818 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1819 V_008DFC_SQ_EXP_POS, &pos_args[0]);
1820 break;
1821 case TGSI_SEMANTIC_PSIZE:
1822 psize_value = outputs[i].values[0];
1823 break;
1824 case TGSI_SEMANTIC_LAYER:
1825 layer_value = outputs[i].values[0];
1826 break;
1827 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1828 viewport_index_value = outputs[i].values[0];
1829 break;
1830 case TGSI_SEMANTIC_EDGEFLAG:
1831 edgeflag_value = outputs[i].values[0];
1832 break;
1833 case TGSI_SEMANTIC_CLIPDIST:
1834 if (!shader->key.opt.clip_disable) {
1835 unsigned index = 2 + outputs[i].semantic_index;
1836 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1837 V_008DFC_SQ_EXP_POS + index,
1838 &pos_args[index]);
1839 }
1840 break;
1841 case TGSI_SEMANTIC_CLIPVERTEX:
1842 if (!shader->key.opt.clip_disable) {
1843 si_llvm_emit_clipvertex(ctx, pos_args,
1844 outputs[i].values);
1845 }
1846 break;
1847 }
1848 }
1849
1850 /* We need to add the position output manually if it's missing. */
1851 if (!pos_args[0].out[0]) {
1852 pos_args[0].enabled_channels = 0xf; /* writemask */
1853 pos_args[0].valid_mask = 0; /* EXEC mask */
1854 pos_args[0].done = 0; /* last export? */
1855 pos_args[0].target = V_008DFC_SQ_EXP_POS;
1856 pos_args[0].compr = 0; /* COMPR flag */
1857 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
1858 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
1859 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
1860 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
1861 }
1862
1863 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
1864 !shader->key.as_ngg;
1865
1866 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1867 if (shader->selector->info.writes_psize ||
1868 pos_writes_edgeflag ||
1869 shader->selector->info.writes_viewport_index ||
1870 shader->selector->info.writes_layer) {
1871 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
1872 (pos_writes_edgeflag << 1) |
1873 (shader->selector->info.writes_layer << 2);
1874
1875 pos_args[1].valid_mask = 0; /* EXEC mask */
1876 pos_args[1].done = 0; /* last export? */
1877 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
1878 pos_args[1].compr = 0; /* COMPR flag */
1879 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
1880 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
1881 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
1882 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
1883
1884 if (shader->selector->info.writes_psize)
1885 pos_args[1].out[0] = psize_value;
1886
1887 if (pos_writes_edgeflag) {
1888 /* The output is a float, but the hw expects an integer
1889 * with the first bit containing the edge flag. */
1890 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
1891 edgeflag_value,
1892 ctx->i32, "");
1893 edgeflag_value = ac_build_umin(&ctx->ac,
1894 edgeflag_value,
1895 ctx->i32_1);
1896
1897 /* The LLVM intrinsic expects a float. */
1898 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
1899 }
1900
1901 if (ctx->screen->info.chip_class >= GFX9) {
1902 /* GFX9 has the layer in out.z[10:0] and the viewport
1903 * index in out.z[19:16].
1904 */
1905 if (shader->selector->info.writes_layer)
1906 pos_args[1].out[2] = layer_value;
1907
1908 if (shader->selector->info.writes_viewport_index) {
1909 LLVMValueRef v = viewport_index_value;
1910
1911 v = ac_to_integer(&ctx->ac, v);
1912 v = LLVMBuildShl(ctx->ac.builder, v,
1913 LLVMConstInt(ctx->i32, 16, 0), "");
1914 v = LLVMBuildOr(ctx->ac.builder, v,
1915 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
1916 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
1917 pos_args[1].enabled_channels |= 1 << 2;
1918 }
1919 } else {
1920 if (shader->selector->info.writes_layer)
1921 pos_args[1].out[2] = layer_value;
1922
1923 if (shader->selector->info.writes_viewport_index) {
1924 pos_args[1].out[3] = viewport_index_value;
1925 pos_args[1].enabled_channels |= 1 << 3;
1926 }
1927 }
1928 }
1929
1930 for (i = 0; i < 4; i++)
1931 if (pos_args[i].out[0])
1932 shader->info.nr_pos_exports++;
1933
1934 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
1935 * Setting valid_mask=1 prevents it and has no other effect.
1936 */
1937 if (ctx->screen->info.family == CHIP_NAVI10 ||
1938 ctx->screen->info.family == CHIP_NAVI12 ||
1939 ctx->screen->info.family == CHIP_NAVI14)
1940 pos_args[0].valid_mask = 1;
1941
1942 pos_idx = 0;
1943 for (i = 0; i < 4; i++) {
1944 if (!pos_args[i].out[0])
1945 continue;
1946
1947 /* Specify the target we are exporting */
1948 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
1949
1950 if (pos_idx == shader->info.nr_pos_exports)
1951 /* Specify that this is the last export */
1952 pos_args[i].done = 1;
1953
1954 ac_build_export(&ctx->ac, &pos_args[i]);
1955 }
1956
1957 /* Build parameter exports. */
1958 si_build_param_exports(ctx, outputs, noutput);
1959 }
1960
1961 /**
1962 * Forward all outputs from the vertex shader to the TES. This is only used
1963 * for the fixed function TCS.
1964 */
1965 static void si_copy_tcs_inputs(struct si_shader_context *ctx)
1966 {
1967 LLVMValueRef invocation_id, buffer, buffer_offset;
1968 LLVMValueRef lds_vertex_stride, lds_base;
1969 uint64_t inputs;
1970
1971 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
1972 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1973 buffer_offset = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1974
1975 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
1976 lds_base = get_tcs_in_current_patch_offset(ctx);
1977 lds_base = ac_build_imad(&ctx->ac, invocation_id, lds_vertex_stride,
1978 lds_base);
1979
1980 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
1981 while (inputs) {
1982 unsigned i = u_bit_scan64(&inputs);
1983
1984 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
1985 LLVMConstInt(ctx->i32, 4 * i, 0),
1986 "");
1987
1988 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
1989 get_rel_patch_id(ctx),
1990 invocation_id,
1991 LLVMConstInt(ctx->i32, i, 0));
1992
1993 LLVMValueRef value = lshs_lds_load(ctx, ctx->ac.i32, ~0, lds_ptr);
1994
1995 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
1996 buffer_offset, 0, ac_glc);
1997 }
1998 }
1999
2000 static void si_write_tess_factors(struct si_shader_context *ctx,
2001 LLVMValueRef rel_patch_id,
2002 LLVMValueRef invocation_id,
2003 LLVMValueRef tcs_out_current_patch_data_offset,
2004 LLVMValueRef invoc0_tf_outer[4],
2005 LLVMValueRef invoc0_tf_inner[2])
2006 {
2007 struct si_shader *shader = ctx->shader;
2008 unsigned tess_inner_index, tess_outer_index;
2009 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2010 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2011 unsigned stride, outer_comps, inner_comps, i, offset;
2012
2013 /* Add a barrier before loading tess factors from LDS. */
2014 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
2015 si_llvm_emit_barrier(ctx);
2016
2017 /* Do this only for invocation 0, because the tess levels are per-patch,
2018 * not per-vertex.
2019 *
2020 * This can't jump, because invocation 0 executes this. It should
2021 * at least mask out the loads and stores for other invocations.
2022 */
2023 ac_build_ifcc(&ctx->ac,
2024 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2025 invocation_id, ctx->i32_0, ""), 6503);
2026
2027 /* Determine the layout of one tess factor element in the buffer. */
2028 switch (shader->key.part.tcs.epilog.prim_mode) {
2029 case PIPE_PRIM_LINES:
2030 stride = 2; /* 2 dwords, 1 vec2 store */
2031 outer_comps = 2;
2032 inner_comps = 0;
2033 break;
2034 case PIPE_PRIM_TRIANGLES:
2035 stride = 4; /* 4 dwords, 1 vec4 store */
2036 outer_comps = 3;
2037 inner_comps = 1;
2038 break;
2039 case PIPE_PRIM_QUADS:
2040 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2041 outer_comps = 4;
2042 inner_comps = 2;
2043 break;
2044 default:
2045 assert(0);
2046 return;
2047 }
2048
2049 for (i = 0; i < 4; i++) {
2050 inner[i] = LLVMGetUndef(ctx->i32);
2051 outer[i] = LLVMGetUndef(ctx->i32);
2052 }
2053
2054 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
2055 /* Tess factors are in VGPRs. */
2056 for (i = 0; i < outer_comps; i++)
2057 outer[i] = out[i] = invoc0_tf_outer[i];
2058 for (i = 0; i < inner_comps; i++)
2059 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
2060 } else {
2061 /* Load tess_inner and tess_outer from LDS.
2062 * Any invocation can write them, so we can't get them from a temporary.
2063 */
2064 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2065 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2066
2067 lds_base = tcs_out_current_patch_data_offset;
2068 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2069 LLVMConstInt(ctx->i32,
2070 tess_inner_index * 4, 0), "");
2071 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2072 LLVMConstInt(ctx->i32,
2073 tess_outer_index * 4, 0), "");
2074
2075 for (i = 0; i < outer_comps; i++) {
2076 outer[i] = out[i] =
2077 lshs_lds_load(ctx, ctx->ac.i32, i, lds_outer);
2078 }
2079 for (i = 0; i < inner_comps; i++) {
2080 inner[i] = out[outer_comps+i] =
2081 lshs_lds_load(ctx, ctx->ac.i32, i, lds_inner);
2082 }
2083 }
2084
2085 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2086 /* For isolines, the hardware expects tess factors in the
2087 * reverse order from what NIR specifies.
2088 */
2089 LLVMValueRef tmp = out[0];
2090 out[0] = out[1];
2091 out[1] = tmp;
2092 }
2093
2094 /* Convert the outputs to vectors for stores. */
2095 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2096 vec1 = NULL;
2097
2098 if (stride > 4)
2099 vec1 = ac_build_gather_values(&ctx->ac, out+4, stride - 4);
2100
2101 /* Get the buffer. */
2102 buffer = get_tess_ring_descriptor(ctx, TCS_FACTOR_RING);
2103
2104 /* Get the offset. */
2105 tf_base = ac_get_arg(&ctx->ac,
2106 ctx->tcs_factor_offset);
2107 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2108 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2109
2110 ac_build_ifcc(&ctx->ac,
2111 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2112 rel_patch_id, ctx->i32_0, ""), 6504);
2113
2114 /* Store the dynamic HS control word. */
2115 offset = 0;
2116 if (ctx->screen->info.chip_class <= GFX8) {
2117 ac_build_buffer_store_dword(&ctx->ac, buffer,
2118 LLVMConstInt(ctx->i32, 0x80000000, 0),
2119 1, ctx->i32_0, tf_base,
2120 offset, ac_glc);
2121 offset += 4;
2122 }
2123
2124 ac_build_endif(&ctx->ac, 6504);
2125
2126 /* Store the tessellation factors. */
2127 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2128 MIN2(stride, 4), byteoffset, tf_base,
2129 offset, ac_glc);
2130 offset += 16;
2131 if (vec1)
2132 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2133 stride - 4, byteoffset, tf_base,
2134 offset, ac_glc);
2135
2136 /* Store the tess factors into the offchip buffer if TES reads them. */
2137 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2138 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2139 LLVMValueRef tf_inner_offset;
2140 unsigned param_outer, param_inner;
2141
2142 buf = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
2143 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
2144
2145 param_outer = si_shader_io_get_unique_index_patch(
2146 TGSI_SEMANTIC_TESSOUTER, 0);
2147 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2148 LLVMConstInt(ctx->i32, param_outer, 0));
2149
2150 unsigned outer_vec_size =
2151 ac_has_vec3_support(ctx->screen->info.chip_class, false) ?
2152 outer_comps : util_next_power_of_two(outer_comps);
2153 outer_vec = ac_build_gather_values(&ctx->ac, outer, outer_vec_size);
2154
2155 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2156 outer_comps, tf_outer_offset,
2157 base, 0, ac_glc);
2158 if (inner_comps) {
2159 param_inner = si_shader_io_get_unique_index_patch(
2160 TGSI_SEMANTIC_TESSINNER, 0);
2161 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2162 LLVMConstInt(ctx->i32, param_inner, 0));
2163
2164 inner_vec = inner_comps == 1 ? inner[0] :
2165 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2166 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2167 inner_comps, tf_inner_offset,
2168 base, 0, ac_glc);
2169 }
2170 }
2171
2172 ac_build_endif(&ctx->ac, 6503);
2173 }
2174
2175 LLVMValueRef si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2176 struct ac_arg param, unsigned return_index)
2177 {
2178 return LLVMBuildInsertValue(ctx->ac.builder, ret,
2179 ac_get_arg(&ctx->ac, param),
2180 return_index, "");
2181 }
2182
2183 LLVMValueRef si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2184 struct ac_arg param, unsigned return_index)
2185 {
2186 LLVMBuilderRef builder = ctx->ac.builder;
2187 LLVMValueRef p = ac_get_arg(&ctx->ac, param);
2188
2189 return LLVMBuildInsertValue(builder, ret,
2190 ac_to_float(&ctx->ac, p),
2191 return_index, "");
2192 }
2193
2194 LLVMValueRef si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
2195 struct ac_arg param, unsigned return_index)
2196 {
2197 LLVMBuilderRef builder = ctx->ac.builder;
2198 LLVMValueRef ptr = ac_get_arg(&ctx->ac, param);
2199 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i32, "");
2200 return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
2201 }
2202
2203 /* This only writes the tessellation factor levels. */
2204 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
2205 unsigned max_outputs,
2206 LLVMValueRef *addrs)
2207 {
2208 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2209 LLVMBuilderRef builder = ctx->ac.builder;
2210 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2211
2212 si_copy_tcs_inputs(ctx);
2213
2214 rel_patch_id = get_rel_patch_id(ctx);
2215 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
2216 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2217
2218 if (ctx->screen->info.chip_class >= GFX9) {
2219 LLVMBasicBlockRef blocks[2] = {
2220 LLVMGetInsertBlock(builder),
2221 ctx->merged_wrap_if_entry_block
2222 };
2223 LLVMValueRef values[2];
2224
2225 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2226
2227 values[0] = rel_patch_id;
2228 values[1] = LLVMGetUndef(ctx->i32);
2229 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2230
2231 values[0] = tf_lds_offset;
2232 values[1] = LLVMGetUndef(ctx->i32);
2233 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2234
2235 values[0] = invocation_id;
2236 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
2237 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2238 }
2239
2240 /* Return epilog parameters from this function. */
2241 LLVMValueRef ret = ctx->return_value;
2242 unsigned vgpr;
2243
2244 if (ctx->screen->info.chip_class >= GFX9) {
2245 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2246 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2247 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2248 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2249 /* Tess offchip and tess factor offsets are at the beginning. */
2250 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2251 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2252 vgpr = 8 + GFX9_SGPR_TCS_OUT_LAYOUT + 1;
2253 } else {
2254 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2255 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2256 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2257 GFX6_SGPR_TCS_OUT_LAYOUT);
2258 /* Tess offchip and tess factor offsets are after user SGPRs. */
2259 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset,
2260 GFX6_TCS_NUM_USER_SGPR);
2261 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset,
2262 GFX6_TCS_NUM_USER_SGPR + 1);
2263 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2264 }
2265
2266 /* VGPRs */
2267 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
2268 invocation_id = ac_to_float(&ctx->ac, invocation_id);
2269 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
2270
2271 /* Leave a hole corresponding to the two input VGPRs. This ensures that
2272 * the invocation_id output does not alias the tcs_rel_ids input,
2273 * which saves a V_MOV on gfx9.
2274 */
2275 vgpr += 2;
2276
2277 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2278 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2279
2280 if (ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
2281 vgpr++; /* skip the tess factor LDS offset */
2282 for (unsigned i = 0; i < 6; i++) {
2283 LLVMValueRef value =
2284 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
2285 value = ac_to_float(&ctx->ac, value);
2286 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
2287 }
2288 } else {
2289 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2290 }
2291 ctx->return_value = ret;
2292 }
2293
2294 /* Pass TCS inputs from LS to TCS on GFX9. */
2295 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2296 {
2297 LLVMValueRef ret = ctx->return_value;
2298
2299 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2300 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2301 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2302 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2303 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2304 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2305
2306 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2307 8 + SI_SGPR_RW_BUFFERS);
2308 ret = si_insert_input_ptr(ctx, ret,
2309 ctx->bindless_samplers_and_images,
2310 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2311
2312 ret = si_insert_input_ret(ctx, ret, ctx->vs_state_bits,
2313 8 + SI_SGPR_VS_STATE_BITS);
2314
2315 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2316 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2317 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_offsets,
2318 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2319 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2320 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2321
2322 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2323 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2324 ac_to_float(&ctx->ac,
2325 ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id)),
2326 vgpr++, "");
2327 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2328 ac_to_float(&ctx->ac,
2329 ac_get_arg(&ctx->ac, ctx->args.tcs_rel_ids)),
2330 vgpr++, "");
2331 ctx->return_value = ret;
2332 }
2333
2334 /* Pass GS inputs from ES to GS on GFX9. */
2335 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2336 {
2337 LLVMValueRef ret = ctx->return_value;
2338
2339 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2340 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2341 if (ctx->shader->key.as_ngg)
2342 ret = si_insert_input_ptr(ctx, ret, ctx->gs_tg_info, 2);
2343 else
2344 ret = si_insert_input_ret(ctx, ret, ctx->gs2vs_offset, 2);
2345 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2346 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2347
2348 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2349 8 + SI_SGPR_RW_BUFFERS);
2350 ret = si_insert_input_ptr(ctx, ret,
2351 ctx->bindless_samplers_and_images,
2352 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2353 if (ctx->screen->use_ngg) {
2354 ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits,
2355 8 + SI_SGPR_VS_STATE_BITS);
2356 }
2357
2358 unsigned vgpr;
2359 if (ctx->type == PIPE_SHADER_VERTEX)
2360 vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR;
2361 else
2362 vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
2363
2364 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx01_offset, vgpr++);
2365 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx23_offset, vgpr++);
2366 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
2367 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
2368 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx45_offset, vgpr++);
2369 ctx->return_value = ret;
2370 }
2371
2372 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
2373 unsigned max_outputs,
2374 LLVMValueRef *addrs)
2375 {
2376 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2377 struct si_shader *shader = ctx->shader;
2378 struct si_shader_info *info = &shader->selector->info;
2379 unsigned i, chan;
2380 LLVMValueRef vertex_id = ac_get_arg(&ctx->ac, ctx->rel_auto_id);
2381 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
2382 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2383 vertex_dw_stride, "");
2384
2385 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2386 * its inputs from it. */
2387 for (i = 0; i < info->num_outputs; i++) {
2388 unsigned name = info->output_semantic_name[i];
2389 unsigned index = info->output_semantic_index[i];
2390
2391 /* The ARB_shader_viewport_layer_array spec contains the
2392 * following issue:
2393 *
2394 * 2) What happens if gl_ViewportIndex or gl_Layer is
2395 * written in the vertex shader and a geometry shader is
2396 * present?
2397 *
2398 * RESOLVED: The value written by the last vertex processing
2399 * stage is used. If the last vertex processing stage
2400 * (vertex, tessellation evaluation or geometry) does not
2401 * statically assign to gl_ViewportIndex or gl_Layer, index
2402 * or layer zero is assumed.
2403 *
2404 * So writes to those outputs in VS-as-LS are simply ignored.
2405 */
2406 if (name == TGSI_SEMANTIC_LAYER ||
2407 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2408 continue;
2409
2410 int param = si_shader_io_get_unique_index(name, index, false);
2411 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2412 LLVMConstInt(ctx->i32, param * 4, 0), "");
2413
2414 for (chan = 0; chan < 4; chan++) {
2415 if (!(info->output_usagemask[i] & (1 << chan)))
2416 continue;
2417
2418 lshs_lds_store(ctx, chan, dw_addr,
2419 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
2420 }
2421 }
2422
2423 if (ctx->screen->info.chip_class >= GFX9)
2424 si_set_ls_return_value_for_tcs(ctx);
2425 }
2426
2427 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
2428 unsigned max_outputs,
2429 LLVMValueRef *addrs)
2430 {
2431 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2432 struct si_shader *es = ctx->shader;
2433 struct si_shader_info *info = &es->selector->info;
2434 LLVMValueRef lds_base = NULL;
2435 unsigned chan;
2436 int i;
2437
2438 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
2439 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2440 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2441 LLVMValueRef wave_idx = si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
2442 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2443 LLVMBuildMul(ctx->ac.builder, wave_idx,
2444 LLVMConstInt(ctx->i32, ctx->ac.wave_size, false), ""), "");
2445 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2446 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2447 }
2448
2449 for (i = 0; i < info->num_outputs; i++) {
2450 int param;
2451
2452 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2453 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2454 continue;
2455
2456 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2457 info->output_semantic_index[i], false);
2458
2459 for (chan = 0; chan < 4; chan++) {
2460 if (!(info->output_usagemask[i] & (1 << chan)))
2461 continue;
2462
2463 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2464 out_val = ac_to_integer(&ctx->ac, out_val);
2465
2466 /* GFX9 has the ESGS ring in LDS. */
2467 if (ctx->screen->info.chip_class >= GFX9) {
2468 LLVMValueRef idx = LLVMConstInt(ctx->i32, param * 4 + chan, false);
2469 idx = LLVMBuildAdd(ctx->ac.builder, lds_base, idx, "");
2470 ac_build_indexed_store(&ctx->ac, ctx->esgs_ring, idx, out_val);
2471 continue;
2472 }
2473
2474 ac_build_buffer_store_dword(&ctx->ac,
2475 ctx->esgs_ring,
2476 out_val, 1, NULL,
2477 ac_get_arg(&ctx->ac, ctx->es2gs_offset),
2478 (4 * param + chan) * 4,
2479 ac_glc | ac_slc | ac_swizzled);
2480 }
2481 }
2482
2483 if (ctx->screen->info.chip_class >= GFX9)
2484 si_set_es_return_value_for_gs(ctx);
2485 }
2486
2487 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2488 {
2489 if (ctx->screen->info.chip_class >= GFX9)
2490 return si_unpack_param(ctx, ctx->merged_wave_info, 16, 8);
2491 else
2492 return ac_get_arg(&ctx->ac, ctx->gs_wave_id);
2493 }
2494
2495 static void emit_gs_epilogue(struct si_shader_context *ctx)
2496 {
2497 if (ctx->shader->key.as_ngg) {
2498 gfx10_ngg_gs_emit_epilogue(ctx);
2499 return;
2500 }
2501
2502 if (ctx->screen->info.chip_class >= GFX10)
2503 LLVMBuildFence(ctx->ac.builder, LLVMAtomicOrderingRelease, false, "");
2504
2505 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2506 si_get_gs_wave_id(ctx));
2507
2508 if (ctx->screen->info.chip_class >= GFX9)
2509 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2510 }
2511
2512 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
2513 unsigned max_outputs,
2514 LLVMValueRef *addrs)
2515 {
2516 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2517 struct si_shader_info UNUSED *info = &ctx->shader->selector->info;
2518
2519 assert(info->num_outputs <= max_outputs);
2520
2521 emit_gs_epilogue(ctx);
2522 }
2523
2524 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
2525 unsigned max_outputs,
2526 LLVMValueRef *addrs)
2527 {
2528 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2529 struct si_shader_info *info = &ctx->shader->selector->info;
2530 struct si_shader_output_values *outputs = NULL;
2531 int i,j;
2532
2533 assert(!ctx->shader->is_gs_copy_shader);
2534 assert(info->num_outputs <= max_outputs);
2535
2536 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2537
2538 for (i = 0; i < info->num_outputs; i++) {
2539 outputs[i].semantic_name = info->output_semantic_name[i];
2540 outputs[i].semantic_index = info->output_semantic_index[i];
2541
2542 for (j = 0; j < 4; j++) {
2543 outputs[i].values[j] =
2544 LLVMBuildLoad(ctx->ac.builder,
2545 addrs[4 * i + j],
2546 "");
2547 outputs[i].vertex_stream[j] =
2548 (info->output_streams[i] >> (2 * j)) & 3;
2549 }
2550 }
2551
2552 if (!ctx->screen->use_ngg_streamout &&
2553 ctx->shader->selector->so.num_outputs)
2554 si_llvm_emit_streamout(ctx, outputs, i, 0);
2555
2556 /* Export PrimitiveID. */
2557 if (ctx->shader->key.mono.u.vs_export_prim_id) {
2558 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
2559 outputs[i].semantic_index = 0;
2560 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
2561 for (j = 1; j < 4; j++)
2562 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
2563
2564 memset(outputs[i].vertex_stream, 0,
2565 sizeof(outputs[i].vertex_stream));
2566 i++;
2567 }
2568
2569 si_llvm_export_vs(ctx, outputs, i);
2570 FREE(outputs);
2571 }
2572
2573 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
2574 unsigned max_outputs,
2575 LLVMValueRef *addrs)
2576 {
2577 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2578 struct si_shader_info *info = &ctx->shader->selector->info;
2579 LLVMValueRef pos[4] = {};
2580
2581 assert(info->num_outputs <= max_outputs);
2582
2583 for (unsigned i = 0; i < info->num_outputs; i++) {
2584 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
2585 continue;
2586
2587 for (unsigned chan = 0; chan < 4; chan++)
2588 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2589 break;
2590 }
2591 assert(pos[0] != NULL);
2592
2593 /* Return the position output. */
2594 LLVMValueRef ret = ctx->return_value;
2595 for (unsigned chan = 0; chan < 4; chan++)
2596 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
2597 ctx->return_value = ret;
2598 }
2599
2600 /* Emit one vertex from the geometry shader */
2601 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
2602 unsigned stream,
2603 LLVMValueRef *addrs)
2604 {
2605 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2606
2607 if (ctx->shader->key.as_ngg) {
2608 gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
2609 return;
2610 }
2611
2612 struct si_shader_info *info = &ctx->shader->selector->info;
2613 struct si_shader *shader = ctx->shader;
2614 LLVMValueRef soffset = ac_get_arg(&ctx->ac, ctx->gs2vs_offset);
2615 LLVMValueRef gs_next_vertex;
2616 LLVMValueRef can_emit;
2617 unsigned chan, offset;
2618 int i;
2619
2620 /* Write vertex attribute values to GSVS ring */
2621 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
2622 ctx->gs_next_vertex[stream],
2623 "");
2624
2625 /* If this thread has already emitted the declared maximum number of
2626 * vertices, skip the write: excessive vertex emissions are not
2627 * supposed to have any effect.
2628 *
2629 * If the shader has no writes to memory, kill it instead. This skips
2630 * further memory loads and may allow LLVM to skip to the end
2631 * altogether.
2632 */
2633 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
2634 LLVMConstInt(ctx->i32,
2635 shader->selector->gs_max_out_vertices, 0), "");
2636
2637 bool use_kill = !info->writes_memory;
2638 if (use_kill) {
2639 ac_build_kill_if_false(&ctx->ac, can_emit);
2640 } else {
2641 ac_build_ifcc(&ctx->ac, can_emit, 6505);
2642 }
2643
2644 offset = 0;
2645 for (i = 0; i < info->num_outputs; i++) {
2646 for (chan = 0; chan < 4; chan++) {
2647 if (!(info->output_usagemask[i] & (1 << chan)) ||
2648 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
2649 continue;
2650
2651 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2652 LLVMValueRef voffset =
2653 LLVMConstInt(ctx->i32, offset *
2654 shader->selector->gs_max_out_vertices, 0);
2655 offset++;
2656
2657 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
2658 voffset = LLVMBuildMul(ctx->ac.builder, voffset,
2659 LLVMConstInt(ctx->i32, 4, 0), "");
2660
2661 out_val = ac_to_integer(&ctx->ac, out_val);
2662
2663 ac_build_buffer_store_dword(&ctx->ac,
2664 ctx->gsvs_ring[stream],
2665 out_val, 1,
2666 voffset, soffset, 0,
2667 ac_glc | ac_slc | ac_swizzled);
2668 }
2669 }
2670
2671 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex, ctx->i32_1, "");
2672 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
2673
2674 /* Signal vertex emission if vertex data was written. */
2675 if (offset) {
2676 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
2677 si_get_gs_wave_id(ctx));
2678 }
2679
2680 if (!use_kill)
2681 ac_build_endif(&ctx->ac, 6505);
2682 }
2683
2684 /* Cut one primitive from the geometry shader */
2685 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
2686 unsigned stream)
2687 {
2688 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2689
2690 if (ctx->shader->key.as_ngg) {
2691 LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
2692 return;
2693 }
2694
2695 /* Signal primitive cut */
2696 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
2697 si_get_gs_wave_id(ctx));
2698 }
2699
2700 static void si_llvm_emit_barrier(struct si_shader_context *ctx)
2701 {
2702 /* GFX6 only (thanks to a hw bug workaround):
2703 * The real barrier instruction isn’t needed, because an entire patch
2704 * always fits into a single wave.
2705 */
2706 if (ctx->screen->info.chip_class == GFX6 &&
2707 ctx->type == PIPE_SHADER_TESS_CTRL) {
2708 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
2709 return;
2710 }
2711
2712 ac_build_s_barrier(&ctx->ac);
2713 }
2714
2715 static void declare_streamout_params(struct si_shader_context *ctx,
2716 struct pipe_stream_output_info *so)
2717 {
2718 if (ctx->screen->use_ngg_streamout) {
2719 if (ctx->type == PIPE_SHADER_TESS_EVAL)
2720 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2721 return;
2722 }
2723
2724 /* Streamout SGPRs. */
2725 if (so->num_outputs) {
2726 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
2727 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
2728 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
2729 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2730 }
2731
2732 /* A streamout buffer offset is loaded if the stride is non-zero. */
2733 for (int i = 0; i < 4; i++) {
2734 if (!so->stride[i])
2735 continue;
2736
2737 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
2738 }
2739 }
2740
2741 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
2742 {
2743 switch (shader->selector->type) {
2744 case PIPE_SHADER_VERTEX:
2745 case PIPE_SHADER_TESS_EVAL:
2746 return shader->key.as_ngg ? 128 : 0;
2747
2748 case PIPE_SHADER_TESS_CTRL:
2749 /* Return this so that LLVM doesn't remove s_barrier
2750 * instructions on chips where we use s_barrier. */
2751 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
2752
2753 case PIPE_SHADER_GEOMETRY:
2754 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
2755
2756 case PIPE_SHADER_COMPUTE:
2757 break; /* see below */
2758
2759 default:
2760 return 0;
2761 }
2762
2763 const unsigned *properties = shader->selector->info.properties;
2764 unsigned max_work_group_size =
2765 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
2766 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
2767 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
2768
2769 if (!max_work_group_size) {
2770 /* This is a variable group size compute shader,
2771 * compile it for the maximum possible group size.
2772 */
2773 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
2774 }
2775 return max_work_group_size;
2776 }
2777
2778 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
2779 bool assign_params)
2780 {
2781 enum ac_arg_type const_shader_buf_type;
2782
2783 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
2784 ctx->shader->selector->info.shader_buffers_declared == 0)
2785 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
2786 else
2787 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
2788
2789 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
2790 assign_params ? &ctx->const_and_shader_buffers :
2791 &ctx->other_const_and_shader_buffers);
2792 }
2793
2794 static void declare_samplers_and_images(struct si_shader_context *ctx,
2795 bool assign_params)
2796 {
2797 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2798 assign_params ? &ctx->samplers_and_images :
2799 &ctx->other_samplers_and_images);
2800 }
2801
2802 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
2803 bool assign_params)
2804 {
2805 declare_const_and_shader_buffers(ctx, assign_params);
2806 declare_samplers_and_images(ctx, assign_params);
2807 }
2808
2809 static void declare_global_desc_pointers(struct si_shader_context *ctx)
2810 {
2811 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
2812 &ctx->rw_buffers);
2813 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2814 &ctx->bindless_samplers_and_images);
2815 }
2816
2817 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
2818 {
2819 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
2820 if (!ctx->shader->is_gs_copy_shader) {
2821 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
2822 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
2823 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
2824 }
2825 }
2826
2827 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
2828 {
2829 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
2830
2831 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
2832 if (num_vbos_in_user_sgprs) {
2833 unsigned user_sgprs = ctx->args.num_sgprs_used;
2834
2835 if (si_is_merged_shader(ctx))
2836 user_sgprs -= 8;
2837 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
2838
2839 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
2840 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
2841 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
2842
2843 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
2844 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
2845 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
2846 }
2847 }
2848
2849 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
2850 unsigned *num_prolog_vgprs)
2851 {
2852 struct si_shader *shader = ctx->shader;
2853
2854 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
2855 if (shader->key.as_ls) {
2856 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
2857 if (ctx->screen->info.chip_class >= GFX10) {
2858 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
2859 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2860 } else {
2861 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2862 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
2863 }
2864 } else if (ctx->screen->info.chip_class >= GFX10) {
2865 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
2866 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
2867 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
2868 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2869 } else {
2870 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2871 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
2872 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
2873 }
2874
2875 if (!shader->is_gs_copy_shader) {
2876 /* Vertex load indices. */
2877 if (shader->selector->info.num_inputs) {
2878 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
2879 &ctx->vertex_index0);
2880 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
2881 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
2882 }
2883 *num_prolog_vgprs += shader->selector->info.num_inputs;
2884 }
2885 }
2886
2887 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
2888 unsigned vs_blit_property)
2889 {
2890 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
2891 &ctx->vs_blit_inputs); /* i16 x1, y1 */
2892 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
2893 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
2894
2895 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
2896 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
2897 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
2898 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
2899 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
2900 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
2901 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
2902 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
2903 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
2904 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
2905 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
2906 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
2907 }
2908 }
2909
2910 static void declare_tes_input_vgprs(struct si_shader_context *ctx)
2911 {
2912 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
2913 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
2914 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
2915 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
2916 }
2917
2918 enum {
2919 /* Convenient merged shader definitions. */
2920 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
2921 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
2922 };
2923
2924 void si_add_arg_checked(struct ac_shader_args *args,
2925 enum ac_arg_regfile file,
2926 unsigned registers, enum ac_arg_type type,
2927 struct ac_arg *arg,
2928 unsigned idx)
2929 {
2930 assert(args->arg_count == idx);
2931 ac_add_arg(args, file, registers, type, arg);
2932 }
2933
2934 static void create_function(struct si_shader_context *ctx)
2935 {
2936 struct si_shader *shader = ctx->shader;
2937 LLVMTypeRef returns[AC_MAX_ARGS];
2938 unsigned i, num_return_sgprs;
2939 unsigned num_returns = 0;
2940 unsigned num_prolog_vgprs = 0;
2941 unsigned type = ctx->type;
2942 unsigned vs_blit_property =
2943 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
2944
2945 memset(&ctx->args, 0, sizeof(ctx->args));
2946
2947 /* Set MERGED shaders. */
2948 if (ctx->screen->info.chip_class >= GFX9) {
2949 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
2950 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
2951 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
2952 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
2953 }
2954
2955 switch (type) {
2956 case PIPE_SHADER_VERTEX:
2957 declare_global_desc_pointers(ctx);
2958
2959 if (vs_blit_property) {
2960 declare_vs_blit_inputs(ctx, vs_blit_property);
2961
2962 /* VGPRs */
2963 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
2964 break;
2965 }
2966
2967 declare_per_stage_desc_pointers(ctx, true);
2968 declare_vs_specific_input_sgprs(ctx);
2969 if (!shader->is_gs_copy_shader)
2970 declare_vb_descriptor_input_sgprs(ctx);
2971
2972 if (shader->key.as_es) {
2973 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
2974 &ctx->es2gs_offset);
2975 } else if (shader->key.as_ls) {
2976 /* no extra parameters */
2977 } else {
2978 /* The locations of the other parameters are assigned dynamically. */
2979 declare_streamout_params(ctx, &shader->selector->so);
2980 }
2981
2982 /* VGPRs */
2983 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
2984
2985 /* Return values */
2986 if (shader->key.opt.vs_as_prim_discard_cs) {
2987 for (i = 0; i < 4; i++)
2988 returns[num_returns++] = ctx->f32; /* VGPRs */
2989 }
2990 break;
2991
2992 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
2993 declare_global_desc_pointers(ctx);
2994 declare_per_stage_desc_pointers(ctx, true);
2995 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
2996 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
2997 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
2998 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
2999 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3000 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
3001
3002 /* VGPRs */
3003 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
3004 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
3005
3006 /* param_tcs_offchip_offset and param_tcs_factor_offset are
3007 * placed after the user SGPRs.
3008 */
3009 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
3010 returns[num_returns++] = ctx->i32; /* SGPRs */
3011 for (i = 0; i < 11; i++)
3012 returns[num_returns++] = ctx->f32; /* VGPRs */
3013 break;
3014
3015 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
3016 /* Merged stages have 8 system SGPRs at the beginning. */
3017 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
3018 declare_per_stage_desc_pointers(ctx,
3019 ctx->type == PIPE_SHADER_TESS_CTRL);
3020 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3021 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
3022 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
3023 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
3024 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
3025 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
3026
3027 declare_global_desc_pointers(ctx);
3028 declare_per_stage_desc_pointers(ctx,
3029 ctx->type == PIPE_SHADER_VERTEX);
3030 declare_vs_specific_input_sgprs(ctx);
3031
3032 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3033 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
3034 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
3035 declare_vb_descriptor_input_sgprs(ctx);
3036
3037 /* VGPRs (first TCS, then VS) */
3038 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
3039 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
3040
3041 if (ctx->type == PIPE_SHADER_VERTEX) {
3042 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3043
3044 /* LS return values are inputs to the TCS main shader part. */
3045 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
3046 returns[num_returns++] = ctx->i32; /* SGPRs */
3047 for (i = 0; i < 2; i++)
3048 returns[num_returns++] = ctx->f32; /* VGPRs */
3049 } else {
3050 /* TCS return values are inputs to the TCS epilog.
3051 *
3052 * param_tcs_offchip_offset, param_tcs_factor_offset,
3053 * param_tcs_offchip_layout, and param_rw_buffers
3054 * should be passed to the epilog.
3055 */
3056 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
3057 returns[num_returns++] = ctx->i32; /* SGPRs */
3058 for (i = 0; i < 11; i++)
3059 returns[num_returns++] = ctx->f32; /* VGPRs */
3060 }
3061 break;
3062
3063 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
3064 /* Merged stages have 8 system SGPRs at the beginning. */
3065 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
3066 declare_per_stage_desc_pointers(ctx,
3067 ctx->type == PIPE_SHADER_GEOMETRY);
3068
3069 if (ctx->shader->key.as_ngg)
3070 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
3071 else
3072 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3073
3074 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
3075 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3076 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
3077 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
3078 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
3079
3080 declare_global_desc_pointers(ctx);
3081 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
3082 declare_per_stage_desc_pointers(ctx,
3083 (ctx->type == PIPE_SHADER_VERTEX ||
3084 ctx->type == PIPE_SHADER_TESS_EVAL));
3085 }
3086
3087 if (ctx->type == PIPE_SHADER_VERTEX) {
3088 if (vs_blit_property)
3089 declare_vs_blit_inputs(ctx, vs_blit_property);
3090 else
3091 declare_vs_specific_input_sgprs(ctx);
3092 } else {
3093 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3094 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3095 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3096 /* Declare as many input SGPRs as the VS has. */
3097 }
3098
3099 if (ctx->type == PIPE_SHADER_VERTEX)
3100 declare_vb_descriptor_input_sgprs(ctx);
3101
3102 /* VGPRs (first GS, then VS/TES) */
3103 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
3104 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
3105 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3106 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3107 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
3108
3109 if (ctx->type == PIPE_SHADER_VERTEX) {
3110 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3111 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3112 declare_tes_input_vgprs(ctx);
3113 }
3114
3115 if (ctx->shader->key.as_es &&
3116 (ctx->type == PIPE_SHADER_VERTEX ||
3117 ctx->type == PIPE_SHADER_TESS_EVAL)) {
3118 unsigned num_user_sgprs;
3119
3120 if (ctx->type == PIPE_SHADER_VERTEX)
3121 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
3122 else
3123 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
3124
3125 /* ES return values are inputs to GS. */
3126 for (i = 0; i < 8 + num_user_sgprs; i++)
3127 returns[num_returns++] = ctx->i32; /* SGPRs */
3128 for (i = 0; i < 5; i++)
3129 returns[num_returns++] = ctx->f32; /* VGPRs */
3130 }
3131 break;
3132
3133 case PIPE_SHADER_TESS_EVAL:
3134 declare_global_desc_pointers(ctx);
3135 declare_per_stage_desc_pointers(ctx, true);
3136 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3137 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3138 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3139
3140 if (shader->key.as_es) {
3141 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3142 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
3143 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
3144 } else {
3145 declare_streamout_params(ctx, &shader->selector->so);
3146 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3147 }
3148
3149 /* VGPRs */
3150 declare_tes_input_vgprs(ctx);
3151 break;
3152
3153 case PIPE_SHADER_GEOMETRY:
3154 declare_global_desc_pointers(ctx);
3155 declare_per_stage_desc_pointers(ctx, true);
3156 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3157 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
3158
3159 /* VGPRs */
3160 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
3161 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
3162 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3163 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
3164 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
3165 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
3166 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
3167 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3168 break;
3169
3170 case PIPE_SHADER_FRAGMENT:
3171 declare_global_desc_pointers(ctx);
3172 declare_per_stage_desc_pointers(ctx, true);
3173 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
3174 SI_PARAM_ALPHA_REF);
3175 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3176 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
3177
3178 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
3179 SI_PARAM_PERSP_SAMPLE);
3180 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3181 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
3182 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3183 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
3184 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3185 NULL, SI_PARAM_PERSP_PULL_MODEL);
3186 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3187 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
3188 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3189 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
3190 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3191 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
3192 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
3193 NULL, SI_PARAM_LINE_STIPPLE_TEX);
3194 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3195 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
3196 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3197 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
3198 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3199 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
3200 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3201 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
3202 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
3203 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3204 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
3205 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
3206 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3207 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
3208 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3209 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
3210 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3211 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
3212
3213 /* Color inputs from the prolog. */
3214 if (shader->selector->info.colors_read) {
3215 unsigned num_color_elements =
3216 util_bitcount(shader->selector->info.colors_read);
3217
3218 for (i = 0; i < num_color_elements; i++)
3219 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
3220
3221 num_prolog_vgprs += num_color_elements;
3222 }
3223
3224 /* Outputs for the epilog. */
3225 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
3226 num_returns =
3227 num_return_sgprs +
3228 util_bitcount(shader->selector->info.colors_written) * 4 +
3229 shader->selector->info.writes_z +
3230 shader->selector->info.writes_stencil +
3231 shader->selector->info.writes_samplemask +
3232 1 /* SampleMaskIn */;
3233
3234 num_returns = MAX2(num_returns,
3235 num_return_sgprs +
3236 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
3237
3238 for (i = 0; i < num_return_sgprs; i++)
3239 returns[i] = ctx->i32;
3240 for (; i < num_returns; i++)
3241 returns[i] = ctx->f32;
3242 break;
3243
3244 case PIPE_SHADER_COMPUTE:
3245 declare_global_desc_pointers(ctx);
3246 declare_per_stage_desc_pointers(ctx, true);
3247 if (shader->selector->info.uses_grid_size)
3248 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
3249 &ctx->args.num_work_groups);
3250 if (shader->selector->info.uses_block_size &&
3251 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
3252 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
3253
3254 unsigned cs_user_data_dwords =
3255 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
3256 if (cs_user_data_dwords) {
3257 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
3258 &ctx->cs_user_data);
3259 }
3260
3261 /* Hardware SGPRs. */
3262 for (i = 0; i < 3; i++) {
3263 if (shader->selector->info.uses_block_id[i]) {
3264 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3265 &ctx->args.workgroup_ids[i]);
3266 }
3267 }
3268 if (shader->selector->info.uses_subgroup_info)
3269 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
3270
3271 /* Hardware VGPRs. */
3272 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3273 &ctx->args.local_invocation_ids);
3274 break;
3275 default:
3276 assert(0 && "unimplemented shader");
3277 return;
3278 }
3279
3280 si_llvm_create_func(ctx, "main", returns, num_returns,
3281 si_get_max_workgroup_size(shader));
3282
3283 /* Reserve register locations for VGPR inputs the PS prolog may need. */
3284 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
3285 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
3286 "InitialPSInputAddr",
3287 S_0286D0_PERSP_SAMPLE_ENA(1) |
3288 S_0286D0_PERSP_CENTER_ENA(1) |
3289 S_0286D0_PERSP_CENTROID_ENA(1) |
3290 S_0286D0_LINEAR_SAMPLE_ENA(1) |
3291 S_0286D0_LINEAR_CENTER_ENA(1) |
3292 S_0286D0_LINEAR_CENTROID_ENA(1) |
3293 S_0286D0_FRONT_FACE_ENA(1) |
3294 S_0286D0_ANCILLARY_ENA(1) |
3295 S_0286D0_POS_FIXED_PT_ENA(1));
3296 }
3297
3298 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
3299 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
3300
3301 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
3302 shader->info.num_input_vgprs -= num_prolog_vgprs;
3303
3304 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
3305 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3306 /* The LSHS size is not known until draw time, so we append it
3307 * at the end of whatever LDS use there may be in the rest of
3308 * the shader (currently none, unless LLVM decides to do its
3309 * own LDS-based lowering).
3310 */
3311 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
3312 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3313 "__lds_end", AC_ADDR_SPACE_LDS);
3314 LLVMSetAlignment(ctx->ac.lds, 256);
3315 } else {
3316 ac_declare_lds_as_pointer(&ctx->ac);
3317 }
3318 }
3319
3320 /* Unlike radv, we override these arguments in the prolog, so to the
3321 * API shader they appear as normal arguments.
3322 */
3323 if (ctx->type == PIPE_SHADER_VERTEX) {
3324 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
3325 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
3326 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
3327 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
3328 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
3329 }
3330 }
3331
3332 /* Ensure that the esgs ring is declared.
3333 *
3334 * We declare it with 64KB alignment as a hint that the
3335 * pointer value will always be 0.
3336 */
3337 static void declare_esgs_ring(struct si_shader_context *ctx)
3338 {
3339 if (ctx->esgs_ring)
3340 return;
3341
3342 assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
3343
3344 ctx->esgs_ring = LLVMAddGlobalInAddressSpace(
3345 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3346 "esgs_ring",
3347 AC_ADDR_SPACE_LDS);
3348 LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
3349 LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
3350 }
3351
3352 /**
3353 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
3354 * for later use.
3355 */
3356 static void preload_ring_buffers(struct si_shader_context *ctx)
3357 {
3358 LLVMBuilderRef builder = ctx->ac.builder;
3359
3360 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
3361
3362 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY) {
3363 if (ctx->screen->info.chip_class <= GFX8) {
3364 unsigned ring =
3365 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
3366 : SI_ES_RING_ESGS;
3367 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
3368
3369 ctx->esgs_ring =
3370 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3371 } else {
3372 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3373 /* Declare the ESGS ring as an explicit LDS symbol. */
3374 declare_esgs_ring(ctx);
3375 } else {
3376 ac_declare_lds_as_pointer(&ctx->ac);
3377 ctx->esgs_ring = ctx->ac.lds;
3378 }
3379 }
3380 }
3381
3382 if (ctx->shader->is_gs_copy_shader) {
3383 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3384
3385 ctx->gsvs_ring[0] =
3386 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3387 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
3388 const struct si_shader_selector *sel = ctx->shader->selector;
3389 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3390 LLVMValueRef base_ring;
3391
3392 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3393
3394 /* The conceptual layout of the GSVS ring is
3395 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3396 * but the real memory layout is swizzled across
3397 * threads:
3398 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3399 * t16v0c0 ..
3400 * Override the buffer descriptor accordingly.
3401 */
3402 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
3403 uint64_t stream_offset = 0;
3404
3405 for (unsigned stream = 0; stream < 4; ++stream) {
3406 unsigned num_components;
3407 unsigned stride;
3408 unsigned num_records;
3409 LLVMValueRef ring, tmp;
3410
3411 num_components = sel->info.num_stream_output_components[stream];
3412 if (!num_components)
3413 continue;
3414
3415 stride = 4 * num_components * sel->gs_max_out_vertices;
3416
3417 /* Limit on the stride field for <= GFX7. */
3418 assert(stride < (1 << 14));
3419
3420 num_records = ctx->ac.wave_size;
3421
3422 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
3423 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
3424 tmp = LLVMBuildAdd(builder, tmp,
3425 LLVMConstInt(ctx->i64,
3426 stream_offset, 0), "");
3427 stream_offset += stride * ctx->ac.wave_size;
3428
3429 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
3430 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
3431 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
3432 tmp = LLVMBuildOr(builder, tmp,
3433 LLVMConstInt(ctx->i32,
3434 S_008F04_STRIDE(stride) |
3435 S_008F04_SWIZZLE_ENABLE(1), 0), "");
3436 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
3437 ring = LLVMBuildInsertElement(builder, ring,
3438 LLVMConstInt(ctx->i32, num_records, 0),
3439 LLVMConstInt(ctx->i32, 2, 0), "");
3440
3441 uint32_t rsrc3 =
3442 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
3443 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3444 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
3445 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
3446 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
3447 S_008F0C_ADD_TID_ENABLE(1);
3448
3449 if (ctx->ac.chip_class >= GFX10) {
3450 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
3451 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
3452 S_008F0C_RESOURCE_LEVEL(1);
3453 } else {
3454 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3455 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
3456 S_008F0C_ELEMENT_SIZE(1); /* element_size = 4 (bytes) */
3457 }
3458
3459 ring = LLVMBuildInsertElement(builder, ring,
3460 LLVMConstInt(ctx->i32, rsrc3, false),
3461 LLVMConstInt(ctx->i32, 3, 0), "");
3462
3463 ctx->gsvs_ring[stream] = ring;
3464 }
3465 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3466 ctx->tess_offchip_ring = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TES);
3467 }
3468 }
3469
3470 /* For the UMR disassembler. */
3471 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
3472 #define DEBUGGER_NUM_MARKERS 5
3473
3474 static bool si_shader_binary_open(struct si_screen *screen,
3475 struct si_shader *shader,
3476 struct ac_rtld_binary *rtld)
3477 {
3478 const struct si_shader_selector *sel = shader->selector;
3479 const char *part_elfs[5];
3480 size_t part_sizes[5];
3481 unsigned num_parts = 0;
3482
3483 #define add_part(shader_or_part) \
3484 if (shader_or_part) { \
3485 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
3486 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
3487 num_parts++; \
3488 }
3489
3490 add_part(shader->prolog);
3491 add_part(shader->previous_stage);
3492 add_part(shader->prolog2);
3493 add_part(shader);
3494 add_part(shader->epilog);
3495
3496 #undef add_part
3497
3498 struct ac_rtld_symbol lds_symbols[2];
3499 unsigned num_lds_symbols = 0;
3500
3501 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
3502 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
3503 /* We add this symbol even on LLVM <= 8 to ensure that
3504 * shader->config.lds_size is set correctly below.
3505 */
3506 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
3507 sym->name = "esgs_ring";
3508 sym->size = shader->gs_info.esgs_ring_size;
3509 sym->align = 64 * 1024;
3510 }
3511
3512 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
3513 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
3514 sym->name = "ngg_emit";
3515 sym->size = shader->ngg.ngg_emit_size * 4;
3516 sym->align = 4;
3517 }
3518
3519 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
3520 .info = &screen->info,
3521 .options = {
3522 .halt_at_entry = screen->options.halt_shaders,
3523 },
3524 .shader_type = tgsi_processor_to_shader_stage(sel->type),
3525 .wave_size = si_get_shader_wave_size(shader),
3526 .num_parts = num_parts,
3527 .elf_ptrs = part_elfs,
3528 .elf_sizes = part_sizes,
3529 .num_shared_lds_symbols = num_lds_symbols,
3530 .shared_lds_symbols = lds_symbols });
3531
3532 if (rtld->lds_size > 0) {
3533 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
3534 shader->config.lds_size =
3535 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
3536 }
3537
3538 return ok;
3539 }
3540
3541 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
3542 {
3543 struct ac_rtld_binary rtld;
3544 si_shader_binary_open(screen, shader, &rtld);
3545 return rtld.exec_size;
3546 }
3547
3548 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
3549 {
3550 uint64_t *scratch_va = data;
3551
3552 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
3553 *value = (uint32_t)*scratch_va;
3554 return true;
3555 }
3556 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
3557 /* Enable scratch coalescing. */
3558 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
3559 S_008F04_SWIZZLE_ENABLE(1);
3560 return true;
3561 }
3562
3563 return false;
3564 }
3565
3566 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
3567 uint64_t scratch_va)
3568 {
3569 struct ac_rtld_binary binary;
3570 if (!si_shader_binary_open(sscreen, shader, &binary))
3571 return false;
3572
3573 si_resource_reference(&shader->bo, NULL);
3574 shader->bo = si_aligned_buffer_create(&sscreen->b,
3575 sscreen->info.cpdma_prefetch_writes_memory ?
3576 0 : SI_RESOURCE_FLAG_READ_ONLY,
3577 PIPE_USAGE_IMMUTABLE,
3578 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
3579 256);
3580 if (!shader->bo)
3581 return false;
3582
3583 /* Upload. */
3584 struct ac_rtld_upload_info u = {};
3585 u.binary = &binary;
3586 u.get_external_symbol = si_get_external_symbol;
3587 u.cb_data = &scratch_va;
3588 u.rx_va = shader->bo->gpu_address;
3589 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
3590 PIPE_TRANSFER_READ_WRITE |
3591 PIPE_TRANSFER_UNSYNCHRONIZED |
3592 RADEON_TRANSFER_TEMPORARY);
3593 if (!u.rx_ptr)
3594 return false;
3595
3596 bool ok = ac_rtld_upload(&u);
3597
3598 sscreen->ws->buffer_unmap(shader->bo->buf);
3599 ac_rtld_close(&binary);
3600
3601 return ok;
3602 }
3603
3604 static void si_shader_dump_disassembly(struct si_screen *screen,
3605 const struct si_shader_binary *binary,
3606 enum pipe_shader_type shader_type,
3607 unsigned wave_size,
3608 struct pipe_debug_callback *debug,
3609 const char *name, FILE *file)
3610 {
3611 struct ac_rtld_binary rtld_binary;
3612
3613 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
3614 .info = &screen->info,
3615 .shader_type = tgsi_processor_to_shader_stage(shader_type),
3616 .wave_size = wave_size,
3617 .num_parts = 1,
3618 .elf_ptrs = &binary->elf_buffer,
3619 .elf_sizes = &binary->elf_size }))
3620 return;
3621
3622 const char *disasm;
3623 size_t nbytes;
3624
3625 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
3626 goto out;
3627
3628 if (nbytes > INT_MAX)
3629 goto out;
3630
3631 if (debug && debug->debug_message) {
3632 /* Very long debug messages are cut off, so send the
3633 * disassembly one line at a time. This causes more
3634 * overhead, but on the plus side it simplifies
3635 * parsing of resulting logs.
3636 */
3637 pipe_debug_message(debug, SHADER_INFO,
3638 "Shader Disassembly Begin");
3639
3640 uint64_t line = 0;
3641 while (line < nbytes) {
3642 int count = nbytes - line;
3643 const char *nl = memchr(disasm + line, '\n', nbytes - line);
3644 if (nl)
3645 count = nl - (disasm + line);
3646
3647 if (count) {
3648 pipe_debug_message(debug, SHADER_INFO,
3649 "%.*s", count, disasm + line);
3650 }
3651
3652 line += count + 1;
3653 }
3654
3655 pipe_debug_message(debug, SHADER_INFO,
3656 "Shader Disassembly End");
3657 }
3658
3659 if (file) {
3660 fprintf(file, "Shader %s disassembly:\n", name);
3661 fprintf(file, "%*s", (int)nbytes, disasm);
3662 }
3663
3664 out:
3665 ac_rtld_close(&rtld_binary);
3666 }
3667
3668 static void si_calculate_max_simd_waves(struct si_shader *shader)
3669 {
3670 struct si_screen *sscreen = shader->selector->screen;
3671 struct ac_shader_config *conf = &shader->config;
3672 unsigned num_inputs = shader->selector->info.num_inputs;
3673 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
3674 unsigned lds_per_wave = 0;
3675 unsigned max_simd_waves;
3676
3677 max_simd_waves = sscreen->info.max_wave64_per_simd;
3678
3679 /* Compute LDS usage for PS. */
3680 switch (shader->selector->type) {
3681 case PIPE_SHADER_FRAGMENT:
3682 /* The minimum usage per wave is (num_inputs * 48). The maximum
3683 * usage is (num_inputs * 48 * 16).
3684 * We can get anything in between and it varies between waves.
3685 *
3686 * The 48 bytes per input for a single primitive is equal to
3687 * 4 bytes/component * 4 components/input * 3 points.
3688 *
3689 * Other stages don't know the size at compile time or don't
3690 * allocate LDS per wave, but instead they do it per thread group.
3691 */
3692 lds_per_wave = conf->lds_size * lds_increment +
3693 align(num_inputs * 48, lds_increment);
3694 break;
3695 case PIPE_SHADER_COMPUTE:
3696 if (shader->selector) {
3697 unsigned max_workgroup_size =
3698 si_get_max_workgroup_size(shader);
3699 lds_per_wave = (conf->lds_size * lds_increment) /
3700 DIV_ROUND_UP(max_workgroup_size,
3701 sscreen->compute_wave_size);
3702 }
3703 break;
3704 default:;
3705 }
3706
3707 /* Compute the per-SIMD wave counts. */
3708 if (conf->num_sgprs) {
3709 max_simd_waves =
3710 MIN2(max_simd_waves,
3711 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
3712 }
3713
3714 if (conf->num_vgprs) {
3715 /* Always print wave limits as Wave64, so that we can compare
3716 * Wave32 and Wave64 with shader-db fairly. */
3717 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
3718 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
3719 }
3720
3721 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
3722 * 16KB makes some SIMDs unoccupied).
3723 *
3724 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
3725 */
3726 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
3727 unsigned max_lds_per_simd = max_lds_size / 4;
3728 if (lds_per_wave)
3729 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
3730
3731 shader->info.max_simd_waves = max_simd_waves;
3732 }
3733
3734 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
3735 struct si_shader *shader,
3736 struct pipe_debug_callback *debug)
3737 {
3738 const struct ac_shader_config *conf = &shader->config;
3739
3740 if (screen->options.debug_disassembly)
3741 si_shader_dump_disassembly(screen, &shader->binary,
3742 shader->selector->type,
3743 si_get_shader_wave_size(shader),
3744 debug, "main", NULL);
3745
3746 pipe_debug_message(debug, SHADER_INFO,
3747 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
3748 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
3749 "Spilled VGPRs: %d PrivMem VGPRs: %d",
3750 conf->num_sgprs, conf->num_vgprs,
3751 si_get_shader_binary_size(screen, shader),
3752 conf->lds_size, conf->scratch_bytes_per_wave,
3753 shader->info.max_simd_waves, conf->spilled_sgprs,
3754 conf->spilled_vgprs, shader->info.private_mem_vgprs);
3755 }
3756
3757 static void si_shader_dump_stats(struct si_screen *sscreen,
3758 struct si_shader *shader,
3759 FILE *file,
3760 bool check_debug_option)
3761 {
3762 const struct ac_shader_config *conf = &shader->config;
3763
3764 if (!check_debug_option ||
3765 si_can_dump_shader(sscreen, shader->selector->type)) {
3766 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
3767 fprintf(file, "*** SHADER CONFIG ***\n"
3768 "SPI_PS_INPUT_ADDR = 0x%04x\n"
3769 "SPI_PS_INPUT_ENA = 0x%04x\n",
3770 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
3771 }
3772
3773 fprintf(file, "*** SHADER STATS ***\n"
3774 "SGPRS: %d\n"
3775 "VGPRS: %d\n"
3776 "Spilled SGPRs: %d\n"
3777 "Spilled VGPRs: %d\n"
3778 "Private memory VGPRs: %d\n"
3779 "Code Size: %d bytes\n"
3780 "LDS: %d blocks\n"
3781 "Scratch: %d bytes per wave\n"
3782 "Max Waves: %d\n"
3783 "********************\n\n\n",
3784 conf->num_sgprs, conf->num_vgprs,
3785 conf->spilled_sgprs, conf->spilled_vgprs,
3786 shader->info.private_mem_vgprs,
3787 si_get_shader_binary_size(sscreen, shader),
3788 conf->lds_size, conf->scratch_bytes_per_wave,
3789 shader->info.max_simd_waves);
3790 }
3791 }
3792
3793 const char *si_get_shader_name(const struct si_shader *shader)
3794 {
3795 switch (shader->selector->type) {
3796 case PIPE_SHADER_VERTEX:
3797 if (shader->key.as_es)
3798 return "Vertex Shader as ES";
3799 else if (shader->key.as_ls)
3800 return "Vertex Shader as LS";
3801 else if (shader->key.opt.vs_as_prim_discard_cs)
3802 return "Vertex Shader as Primitive Discard CS";
3803 else if (shader->key.as_ngg)
3804 return "Vertex Shader as ESGS";
3805 else
3806 return "Vertex Shader as VS";
3807 case PIPE_SHADER_TESS_CTRL:
3808 return "Tessellation Control Shader";
3809 case PIPE_SHADER_TESS_EVAL:
3810 if (shader->key.as_es)
3811 return "Tessellation Evaluation Shader as ES";
3812 else if (shader->key.as_ngg)
3813 return "Tessellation Evaluation Shader as ESGS";
3814 else
3815 return "Tessellation Evaluation Shader as VS";
3816 case PIPE_SHADER_GEOMETRY:
3817 if (shader->is_gs_copy_shader)
3818 return "GS Copy Shader as VS";
3819 else
3820 return "Geometry Shader";
3821 case PIPE_SHADER_FRAGMENT:
3822 return "Pixel Shader";
3823 case PIPE_SHADER_COMPUTE:
3824 return "Compute Shader";
3825 default:
3826 return "Unknown Shader";
3827 }
3828 }
3829
3830 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
3831 struct pipe_debug_callback *debug,
3832 FILE *file, bool check_debug_option)
3833 {
3834 enum pipe_shader_type shader_type = shader->selector->type;
3835
3836 if (!check_debug_option ||
3837 si_can_dump_shader(sscreen, shader_type))
3838 si_dump_shader_key(shader, file);
3839
3840 if (!check_debug_option && shader->binary.llvm_ir_string) {
3841 if (shader->previous_stage &&
3842 shader->previous_stage->binary.llvm_ir_string) {
3843 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
3844 si_get_shader_name(shader));
3845 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
3846 }
3847
3848 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
3849 si_get_shader_name(shader));
3850 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
3851 }
3852
3853 if (!check_debug_option ||
3854 (si_can_dump_shader(sscreen, shader_type) &&
3855 !(sscreen->debug_flags & DBG(NO_ASM)))) {
3856 unsigned wave_size = si_get_shader_wave_size(shader);
3857
3858 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
3859
3860 if (shader->prolog)
3861 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
3862 shader_type, wave_size, debug, "prolog", file);
3863 if (shader->previous_stage)
3864 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
3865 shader_type, wave_size, debug, "previous stage", file);
3866 if (shader->prolog2)
3867 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
3868 shader_type, wave_size, debug, "prolog2", file);
3869
3870 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
3871 wave_size, debug, "main", file);
3872
3873 if (shader->epilog)
3874 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
3875 shader_type, wave_size, debug, "epilog", file);
3876 fprintf(file, "\n");
3877 }
3878
3879 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
3880 }
3881
3882 static int si_compile_llvm(struct si_screen *sscreen,
3883 struct si_shader_binary *binary,
3884 struct ac_shader_config *conf,
3885 struct ac_llvm_compiler *compiler,
3886 LLVMModuleRef mod,
3887 struct pipe_debug_callback *debug,
3888 enum pipe_shader_type shader_type,
3889 unsigned wave_size,
3890 const char *name,
3891 bool less_optimized)
3892 {
3893 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
3894
3895 if (si_can_dump_shader(sscreen, shader_type)) {
3896 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
3897
3898 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
3899 fprintf(stderr, "%s LLVM IR:\n\n", name);
3900 ac_dump_module(mod);
3901 fprintf(stderr, "\n");
3902 }
3903 }
3904
3905 if (sscreen->record_llvm_ir) {
3906 char *ir = LLVMPrintModuleToString(mod);
3907 binary->llvm_ir_string = strdup(ir);
3908 LLVMDisposeMessage(ir);
3909 }
3910
3911 if (!si_replace_shader(count, binary)) {
3912 unsigned r = si_llvm_compile(mod, binary, compiler, debug,
3913 less_optimized, wave_size);
3914 if (r)
3915 return r;
3916 }
3917
3918 struct ac_rtld_binary rtld;
3919 if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
3920 .info = &sscreen->info,
3921 .shader_type = tgsi_processor_to_shader_stage(shader_type),
3922 .wave_size = wave_size,
3923 .num_parts = 1,
3924 .elf_ptrs = &binary->elf_buffer,
3925 .elf_sizes = &binary->elf_size }))
3926 return -1;
3927
3928 bool ok = ac_rtld_read_config(&rtld, conf);
3929 ac_rtld_close(&rtld);
3930 if (!ok)
3931 return -1;
3932
3933 /* Enable 64-bit and 16-bit denormals, because there is no performance
3934 * cost.
3935 *
3936 * If denormals are enabled, all floating-point output modifiers are
3937 * ignored.
3938 *
3939 * Don't enable denormals for 32-bit floats, because:
3940 * - Floating-point output modifiers would be ignored by the hw.
3941 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3942 * have to stop using those.
3943 * - GFX6 & GFX7 would be very slow.
3944 */
3945 conf->float_mode |= V_00B028_FP_64_DENORMS;
3946
3947 return 0;
3948 }
3949
3950 /* Generate code for the hardware VS shader stage to go with a geometry shader */
3951 struct si_shader *
3952 si_generate_gs_copy_shader(struct si_screen *sscreen,
3953 struct ac_llvm_compiler *compiler,
3954 struct si_shader_selector *gs_selector,
3955 struct pipe_debug_callback *debug)
3956 {
3957 struct si_shader_context ctx;
3958 struct si_shader *shader;
3959 LLVMBuilderRef builder;
3960 struct si_shader_output_values outputs[SI_MAX_VS_OUTPUTS];
3961 struct si_shader_info *gsinfo = &gs_selector->info;
3962 int i;
3963
3964
3965 shader = CALLOC_STRUCT(si_shader);
3966 if (!shader)
3967 return NULL;
3968
3969 /* We can leave the fence as permanently signaled because the GS copy
3970 * shader only becomes visible globally after it has been compiled. */
3971 util_queue_fence_init(&shader->ready);
3972
3973 shader->selector = gs_selector;
3974 shader->is_gs_copy_shader = true;
3975
3976 si_llvm_context_init(&ctx, sscreen, compiler,
3977 si_get_wave_size(sscreen, PIPE_SHADER_VERTEX, false, false));
3978 ctx.shader = shader;
3979 ctx.type = PIPE_SHADER_VERTEX;
3980
3981 builder = ctx.ac.builder;
3982
3983 create_function(&ctx);
3984 preload_ring_buffers(&ctx);
3985
3986 LLVMValueRef voffset =
3987 LLVMBuildMul(ctx.ac.builder, ctx.abi.vertex_id,
3988 LLVMConstInt(ctx.i32, 4, 0), "");
3989
3990 /* Fetch the vertex stream ID.*/
3991 LLVMValueRef stream_id;
3992
3993 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs)
3994 stream_id = si_unpack_param(&ctx, ctx.streamout_config, 24, 2);
3995 else
3996 stream_id = ctx.i32_0;
3997
3998 /* Fill in output information. */
3999 for (i = 0; i < gsinfo->num_outputs; ++i) {
4000 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
4001 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
4002
4003 for (int chan = 0; chan < 4; chan++) {
4004 outputs[i].vertex_stream[chan] =
4005 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
4006 }
4007 }
4008
4009 LLVMBasicBlockRef end_bb;
4010 LLVMValueRef switch_inst;
4011
4012 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
4013 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
4014
4015 for (int stream = 0; stream < 4; stream++) {
4016 LLVMBasicBlockRef bb;
4017 unsigned offset;
4018
4019 if (!gsinfo->num_stream_output_components[stream])
4020 continue;
4021
4022 if (stream > 0 && !gs_selector->so.num_outputs)
4023 continue;
4024
4025 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
4026 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
4027 LLVMPositionBuilderAtEnd(builder, bb);
4028
4029 /* Fetch vertex data from GSVS ring */
4030 offset = 0;
4031 for (i = 0; i < gsinfo->num_outputs; ++i) {
4032 for (unsigned chan = 0; chan < 4; chan++) {
4033 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
4034 outputs[i].vertex_stream[chan] != stream) {
4035 outputs[i].values[chan] = LLVMGetUndef(ctx.f32);
4036 continue;
4037 }
4038
4039 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
4040 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
4041 offset++;
4042
4043 outputs[i].values[chan] =
4044 ac_build_buffer_load(&ctx.ac,
4045 ctx.gsvs_ring[0], 1,
4046 ctx.i32_0, voffset,
4047 soffset, 0, ac_glc | ac_slc,
4048 true, false);
4049 }
4050 }
4051
4052 /* Streamout and exports. */
4053 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs) {
4054 si_llvm_emit_streamout(&ctx, outputs,
4055 gsinfo->num_outputs,
4056 stream);
4057 }
4058
4059 if (stream == 0)
4060 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
4061
4062 LLVMBuildBr(builder, end_bb);
4063 }
4064
4065 LLVMPositionBuilderAtEnd(builder, end_bb);
4066
4067 LLVMBuildRetVoid(ctx.ac.builder);
4068
4069 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
4070 si_llvm_optimize_module(&ctx);
4071
4072 bool ok = false;
4073 if (si_compile_llvm(sscreen, &ctx.shader->binary,
4074 &ctx.shader->config, ctx.compiler,
4075 ctx.ac.module,
4076 debug, PIPE_SHADER_GEOMETRY, ctx.ac.wave_size,
4077 "GS Copy Shader", false) == 0) {
4078 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
4079 fprintf(stderr, "GS Copy Shader:\n");
4080 si_shader_dump(sscreen, ctx.shader, debug, stderr, true);
4081
4082 if (!ctx.shader->config.scratch_bytes_per_wave)
4083 ok = si_shader_binary_upload(sscreen, ctx.shader, 0);
4084 else
4085 ok = true;
4086 }
4087
4088 si_llvm_dispose(&ctx);
4089
4090 if (!ok) {
4091 FREE(shader);
4092 shader = NULL;
4093 } else {
4094 si_fix_resource_usage(sscreen, shader);
4095 }
4096 return shader;
4097 }
4098
4099 static void si_dump_shader_key_vs(const struct si_shader_key *key,
4100 const struct si_vs_prolog_bits *prolog,
4101 const char *prefix, FILE *f)
4102 {
4103 fprintf(f, " %s.instance_divisor_is_one = %u\n",
4104 prefix, prolog->instance_divisor_is_one);
4105 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
4106 prefix, prolog->instance_divisor_is_fetched);
4107 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
4108 prefix, prolog->unpack_instance_id_from_vertex_id);
4109 fprintf(f, " %s.ls_vgpr_fix = %u\n",
4110 prefix, prolog->ls_vgpr_fix);
4111
4112 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
4113 fprintf(f, " mono.vs.fix_fetch = {");
4114 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
4115 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
4116 if (i)
4117 fprintf(f, ", ");
4118 if (!fix.bits)
4119 fprintf(f, "0");
4120 else
4121 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
4122 fix.u.num_channels_m1, fix.u.format);
4123 }
4124 fprintf(f, "}\n");
4125 }
4126
4127 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
4128 {
4129 const struct si_shader_key *key = &shader->key;
4130 enum pipe_shader_type shader_type = shader->selector->type;
4131
4132 fprintf(f, "SHADER KEY\n");
4133
4134 switch (shader_type) {
4135 case PIPE_SHADER_VERTEX:
4136 si_dump_shader_key_vs(key, &key->part.vs.prolog,
4137 "part.vs.prolog", f);
4138 fprintf(f, " as_es = %u\n", key->as_es);
4139 fprintf(f, " as_ls = %u\n", key->as_ls);
4140 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4141 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4142 key->mono.u.vs_export_prim_id);
4143 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
4144 key->opt.vs_as_prim_discard_cs);
4145 fprintf(f, " opt.cs_prim_type = %s\n",
4146 tgsi_primitive_names[key->opt.cs_prim_type]);
4147 fprintf(f, " opt.cs_indexed = %u\n",
4148 key->opt.cs_indexed);
4149 fprintf(f, " opt.cs_instancing = %u\n",
4150 key->opt.cs_instancing);
4151 fprintf(f, " opt.cs_primitive_restart = %u\n",
4152 key->opt.cs_primitive_restart);
4153 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
4154 key->opt.cs_provoking_vertex_first);
4155 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
4156 key->opt.cs_need_correct_orientation);
4157 fprintf(f, " opt.cs_cull_front = %u\n",
4158 key->opt.cs_cull_front);
4159 fprintf(f, " opt.cs_cull_back = %u\n",
4160 key->opt.cs_cull_back);
4161 fprintf(f, " opt.cs_cull_z = %u\n",
4162 key->opt.cs_cull_z);
4163 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
4164 key->opt.cs_halfz_clip_space);
4165 break;
4166
4167 case PIPE_SHADER_TESS_CTRL:
4168 if (shader->selector->screen->info.chip_class >= GFX9) {
4169 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
4170 "part.tcs.ls_prolog", f);
4171 }
4172 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
4173 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
4174 break;
4175
4176 case PIPE_SHADER_TESS_EVAL:
4177 fprintf(f, " as_es = %u\n", key->as_es);
4178 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4179 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4180 key->mono.u.vs_export_prim_id);
4181 break;
4182
4183 case PIPE_SHADER_GEOMETRY:
4184 if (shader->is_gs_copy_shader)
4185 break;
4186
4187 if (shader->selector->screen->info.chip_class >= GFX9 &&
4188 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
4189 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
4190 "part.gs.vs_prolog", f);
4191 }
4192 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
4193 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
4194 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4195 break;
4196
4197 case PIPE_SHADER_COMPUTE:
4198 break;
4199
4200 case PIPE_SHADER_FRAGMENT:
4201 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
4202 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
4203 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
4204 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
4205 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
4206 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
4207 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
4208 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
4209 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
4210 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
4211 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
4212 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
4213 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
4214 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
4215 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
4216 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
4217 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
4218 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
4219 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
4220 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
4221 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
4222 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
4223 break;
4224
4225 default:
4226 assert(0);
4227 }
4228
4229 if ((shader_type == PIPE_SHADER_GEOMETRY ||
4230 shader_type == PIPE_SHADER_TESS_EVAL ||
4231 shader_type == PIPE_SHADER_VERTEX) &&
4232 !key->as_es && !key->as_ls) {
4233 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
4234 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
4235 }
4236 }
4237
4238 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
4239 {
4240 struct si_shader *shader = ctx->shader;
4241 struct si_shader_info *info = &shader->selector->info;
4242
4243 if ((ctx->type != PIPE_SHADER_VERTEX &&
4244 ctx->type != PIPE_SHADER_TESS_EVAL) ||
4245 shader->key.as_ls ||
4246 shader->key.as_es)
4247 return;
4248
4249 ac_optimize_vs_outputs(&ctx->ac,
4250 ctx->main_fn,
4251 shader->info.vs_output_param_offset,
4252 info->num_outputs,
4253 &shader->info.nr_param_exports);
4254 }
4255
4256 static void si_init_exec_from_input(struct si_shader_context *ctx,
4257 struct ac_arg param, unsigned bitoffset)
4258 {
4259 LLVMValueRef args[] = {
4260 ac_get_arg(&ctx->ac, param),
4261 LLVMConstInt(ctx->i32, bitoffset, 0),
4262 };
4263 ac_build_intrinsic(&ctx->ac,
4264 "llvm.amdgcn.init.exec.from.input",
4265 ctx->voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
4266 }
4267
4268 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
4269 const struct si_vs_prolog_bits *key)
4270 {
4271 /* VGPR initialization fixup for Vega10 and Raven is always done in the
4272 * VS prolog. */
4273 return sel->vs_needs_prolog ||
4274 key->ls_vgpr_fix ||
4275 key->unpack_instance_id_from_vertex_id;
4276 }
4277
4278 LLVMValueRef si_is_es_thread(struct si_shader_context *ctx)
4279 {
4280 /* Return true if the current thread should execute an ES thread. */
4281 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4282 ac_get_thread_id(&ctx->ac),
4283 si_unpack_param(ctx, ctx->merged_wave_info, 0, 8), "");
4284 }
4285
4286 LLVMValueRef si_is_gs_thread(struct si_shader_context *ctx)
4287 {
4288 /* Return true if the current thread should execute a GS thread. */
4289 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4290 ac_get_thread_id(&ctx->ac),
4291 si_unpack_param(ctx, ctx->merged_wave_info, 8, 8), "");
4292 }
4293
4294 static bool si_build_main_function(struct si_shader_context *ctx,
4295 struct nir_shader *nir, bool free_nir)
4296 {
4297 struct si_shader *shader = ctx->shader;
4298 struct si_shader_selector *sel = shader->selector;
4299
4300 switch (ctx->type) {
4301 case PIPE_SHADER_VERTEX:
4302 if (shader->key.as_ls)
4303 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
4304 else if (shader->key.as_es)
4305 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4306 else if (shader->key.opt.vs_as_prim_discard_cs)
4307 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
4308 else if (shader->key.as_ngg)
4309 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4310 else
4311 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4312 ctx->abi.load_base_vertex = get_base_vertex;
4313 break;
4314 case PIPE_SHADER_TESS_CTRL:
4315 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
4316 ctx->abi.load_tess_level = si_load_tess_level;
4317 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
4318 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
4319 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4320 break;
4321 case PIPE_SHADER_TESS_EVAL:
4322 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
4323 ctx->abi.load_tess_coord = si_load_tess_coord;
4324 ctx->abi.load_tess_level = si_load_tess_level;
4325 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4326 if (shader->key.as_es)
4327 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4328 else if (shader->key.as_ngg)
4329 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4330 else
4331 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4332 break;
4333 case PIPE_SHADER_GEOMETRY:
4334 ctx->abi.load_inputs = si_nir_load_input_gs;
4335 ctx->abi.emit_vertex = si_llvm_emit_vertex;
4336 ctx->abi.emit_primitive = si_llvm_emit_primitive;
4337 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
4338 break;
4339 case PIPE_SHADER_FRAGMENT:
4340 si_llvm_init_ps_callbacks(ctx);
4341 break;
4342 case PIPE_SHADER_COMPUTE:
4343 ctx->abi.load_local_group_size = get_block_size;
4344 break;
4345 default:
4346 assert(!"Unsupported shader type");
4347 return false;
4348 }
4349
4350 ctx->abi.load_ubo = load_ubo;
4351 ctx->abi.load_ssbo = load_ssbo;
4352
4353 create_function(ctx);
4354 preload_ring_buffers(ctx);
4355
4356 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
4357 sel->info.tessfactors_are_def_in_all_invocs) {
4358 for (unsigned i = 0; i < 6; i++) {
4359 ctx->invoc0_tess_factors[i] =
4360 ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
4361 }
4362 }
4363
4364 if (ctx->type == PIPE_SHADER_GEOMETRY) {
4365 for (unsigned i = 0; i < 4; i++) {
4366 ctx->gs_next_vertex[i] =
4367 ac_build_alloca(&ctx->ac, ctx->i32, "");
4368 }
4369 if (shader->key.as_ngg) {
4370 for (unsigned i = 0; i < 4; ++i) {
4371 ctx->gs_curprim_verts[i] =
4372 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4373 ctx->gs_generated_prims[i] =
4374 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4375 }
4376
4377 unsigned scratch_size = 8;
4378 if (sel->so.num_outputs)
4379 scratch_size = 44;
4380
4381 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
4382 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4383 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4384 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
4385 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4386
4387 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4388 LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
4389 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
4390 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
4391 }
4392 }
4393
4394 if (ctx->type != PIPE_SHADER_GEOMETRY &&
4395 (shader->key.as_ngg && !shader->key.as_es)) {
4396 /* Unconditionally declare scratch space base for streamout and
4397 * vertex compaction. Whether space is actually allocated is
4398 * determined during linking / PM4 creation.
4399 *
4400 * Add an extra dword per vertex to ensure an odd stride, which
4401 * avoids bank conflicts for SoA accesses.
4402 */
4403 if (!gfx10_is_ngg_passthrough(shader))
4404 declare_esgs_ring(ctx);
4405
4406 /* This is really only needed when streamout and / or vertex
4407 * compaction is enabled.
4408 */
4409 if (sel->so.num_outputs && !ctx->gs_ngg_scratch) {
4410 LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
4411 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4412 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4413 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
4414 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4415 }
4416 }
4417
4418 /* For GFX9 merged shaders:
4419 * - Set EXEC for the first shader. If the prolog is present, set
4420 * EXEC there instead.
4421 * - Add a barrier before the second shader.
4422 * - In the second shader, reset EXEC to ~0 and wrap the main part in
4423 * an if-statement. This is required for correctness in geometry
4424 * shaders, to ensure that empty GS waves do not send GS_EMIT and
4425 * GS_CUT messages.
4426 *
4427 * For monolithic merged shaders, the first shader is wrapped in an
4428 * if-block together with its prolog in si_build_wrapper_function.
4429 *
4430 * NGG vertex and tess eval shaders running as the last
4431 * vertex/geometry stage handle execution explicitly using
4432 * if-statements.
4433 */
4434 if (ctx->screen->info.chip_class >= GFX9) {
4435 if (!shader->is_monolithic &&
4436 (shader->key.as_es || shader->key.as_ls) &&
4437 (ctx->type == PIPE_SHADER_TESS_EVAL ||
4438 (ctx->type == PIPE_SHADER_VERTEX &&
4439 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
4440 si_init_exec_from_input(ctx,
4441 ctx->merged_wave_info, 0);
4442 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
4443 ctx->type == PIPE_SHADER_GEOMETRY ||
4444 (shader->key.as_ngg && !shader->key.as_es)) {
4445 LLVMValueRef thread_enabled;
4446 bool nested_barrier;
4447
4448 if (!shader->is_monolithic ||
4449 (ctx->type == PIPE_SHADER_TESS_EVAL &&
4450 (shader->key.as_ngg && !shader->key.as_es)))
4451 ac_init_exec_full_mask(&ctx->ac);
4452
4453 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
4454 ctx->type == PIPE_SHADER_GEOMETRY) {
4455 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
4456 gfx10_ngg_gs_emit_prologue(ctx);
4457 nested_barrier = false;
4458 } else {
4459 nested_barrier = true;
4460 }
4461
4462 thread_enabled = si_is_gs_thread(ctx);
4463 } else {
4464 thread_enabled = si_is_es_thread(ctx);
4465 nested_barrier = false;
4466 }
4467
4468 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
4469 ctx->merged_wrap_if_label = 11500;
4470 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
4471
4472 if (nested_barrier) {
4473 /* Execute a barrier before the second shader in
4474 * a merged shader.
4475 *
4476 * Execute the barrier inside the conditional block,
4477 * so that empty waves can jump directly to s_endpgm,
4478 * which will also signal the barrier.
4479 *
4480 * This is possible in gfx9, because an empty wave
4481 * for the second shader does not participate in
4482 * the epilogue. With NGG, empty waves may still
4483 * be required to export data (e.g. GS output vertices),
4484 * so we cannot let them exit early.
4485 *
4486 * If the shader is TCS and the TCS epilog is present
4487 * and contains a barrier, it will wait there and then
4488 * reach s_endpgm.
4489 */
4490 si_llvm_emit_barrier(ctx);
4491 }
4492 }
4493 }
4494
4495 if (sel->force_correct_derivs_after_kill) {
4496 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
4497 /* true = don't kill. */
4498 LLVMBuildStore(ctx->ac.builder, ctx->i1true,
4499 ctx->postponed_kill);
4500 }
4501
4502 bool success = si_nir_build_llvm(ctx, nir);
4503 if (free_nir)
4504 ralloc_free(nir);
4505 if (!success) {
4506 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
4507 return false;
4508 }
4509
4510 si_llvm_build_ret(ctx, ctx->return_value);
4511 return true;
4512 }
4513
4514 /**
4515 * Compute the VS prolog key, which contains all the information needed to
4516 * build the VS prolog function, and set shader->info bits where needed.
4517 *
4518 * \param info Shader info of the vertex shader.
4519 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
4520 * \param prolog_key Key of the VS prolog
4521 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
4522 * \param key Output shader part key.
4523 */
4524 static void si_get_vs_prolog_key(const struct si_shader_info *info,
4525 unsigned num_input_sgprs,
4526 const struct si_vs_prolog_bits *prolog_key,
4527 struct si_shader *shader_out,
4528 union si_shader_part_key *key)
4529 {
4530 memset(key, 0, sizeof(*key));
4531 key->vs_prolog.states = *prolog_key;
4532 key->vs_prolog.num_input_sgprs = num_input_sgprs;
4533 key->vs_prolog.num_inputs = info->num_inputs;
4534 key->vs_prolog.as_ls = shader_out->key.as_ls;
4535 key->vs_prolog.as_es = shader_out->key.as_es;
4536 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
4537
4538 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
4539 key->vs_prolog.as_ls = 1;
4540 key->vs_prolog.num_merged_next_stage_vgprs = 2;
4541 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
4542 key->vs_prolog.as_es = 1;
4543 key->vs_prolog.num_merged_next_stage_vgprs = 5;
4544 } else if (shader_out->key.as_ngg) {
4545 key->vs_prolog.num_merged_next_stage_vgprs = 5;
4546 }
4547
4548 /* Enable loading the InstanceID VGPR. */
4549 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
4550
4551 if ((key->vs_prolog.states.instance_divisor_is_one |
4552 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
4553 shader_out->info.uses_instanceid = true;
4554 }
4555
4556 /**
4557 * Build the GS prolog function. Rotate the input vertices for triangle strips
4558 * with adjacency.
4559 */
4560 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
4561 union si_shader_part_key *key)
4562 {
4563 unsigned num_sgprs, num_vgprs;
4564 LLVMBuilderRef builder = ctx->ac.builder;
4565 LLVMTypeRef returns[AC_MAX_ARGS];
4566 LLVMValueRef func, ret;
4567
4568 memset(&ctx->args, 0, sizeof(ctx->args));
4569
4570 if (ctx->screen->info.chip_class >= GFX9) {
4571 if (key->gs_prolog.states.gfx9_prev_is_vs)
4572 num_sgprs = 8 + GFX9_VSGS_NUM_USER_SGPR;
4573 else
4574 num_sgprs = 8 + GFX9_TESGS_NUM_USER_SGPR;
4575 num_vgprs = 5; /* ES inputs are not needed by GS */
4576 } else {
4577 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
4578 num_vgprs = 8;
4579 }
4580
4581 for (unsigned i = 0; i < num_sgprs; ++i) {
4582 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
4583 returns[i] = ctx->i32;
4584 }
4585
4586 for (unsigned i = 0; i < num_vgprs; ++i) {
4587 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
4588 returns[num_sgprs + i] = ctx->f32;
4589 }
4590
4591 /* Create the function. */
4592 si_llvm_create_func(ctx, "gs_prolog", returns, num_sgprs + num_vgprs, 0);
4593 func = ctx->main_fn;
4594
4595 /* Set the full EXEC mask for the prolog, because we are only fiddling
4596 * with registers here. The main shader part will set the correct EXEC
4597 * mask.
4598 */
4599 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
4600 ac_init_exec_full_mask(&ctx->ac);
4601
4602 /* Copy inputs to outputs. This should be no-op, as the registers match,
4603 * but it will prevent the compiler from overwriting them unintentionally.
4604 */
4605 ret = ctx->return_value;
4606 for (unsigned i = 0; i < num_sgprs; i++) {
4607 LLVMValueRef p = LLVMGetParam(func, i);
4608 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
4609 }
4610 for (unsigned i = 0; i < num_vgprs; i++) {
4611 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
4612 p = ac_to_float(&ctx->ac, p);
4613 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
4614 }
4615
4616 if (key->gs_prolog.states.tri_strip_adj_fix) {
4617 /* Remap the input vertices for every other primitive. */
4618 const struct ac_arg gfx6_vtx_params[6] = {
4619 { .used = true, .arg_index = num_sgprs },
4620 { .used = true, .arg_index = num_sgprs + 1 },
4621 { .used = true, .arg_index = num_sgprs + 3 },
4622 { .used = true, .arg_index = num_sgprs + 4 },
4623 { .used = true, .arg_index = num_sgprs + 5 },
4624 { .used = true, .arg_index = num_sgprs + 6 },
4625 };
4626 const struct ac_arg gfx9_vtx_params[3] = {
4627 { .used = true, .arg_index = num_sgprs },
4628 { .used = true, .arg_index = num_sgprs + 1 },
4629 { .used = true, .arg_index = num_sgprs + 4 },
4630 };
4631 LLVMValueRef vtx_in[6], vtx_out[6];
4632 LLVMValueRef prim_id, rotate;
4633
4634 if (ctx->screen->info.chip_class >= GFX9) {
4635 for (unsigned i = 0; i < 3; i++) {
4636 vtx_in[i*2] = si_unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
4637 vtx_in[i*2+1] = si_unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
4638 }
4639 } else {
4640 for (unsigned i = 0; i < 6; i++)
4641 vtx_in[i] = ac_get_arg(&ctx->ac, gfx6_vtx_params[i]);
4642 }
4643
4644 prim_id = LLVMGetParam(func, num_sgprs + 2);
4645 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
4646
4647 for (unsigned i = 0; i < 6; ++i) {
4648 LLVMValueRef base, rotated;
4649 base = vtx_in[i];
4650 rotated = vtx_in[(i + 4) % 6];
4651 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
4652 }
4653
4654 if (ctx->screen->info.chip_class >= GFX9) {
4655 for (unsigned i = 0; i < 3; i++) {
4656 LLVMValueRef hi, out;
4657
4658 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
4659 LLVMConstInt(ctx->i32, 16, 0), "");
4660 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
4661 out = ac_to_float(&ctx->ac, out);
4662 ret = LLVMBuildInsertValue(builder, ret, out,
4663 gfx9_vtx_params[i].arg_index, "");
4664 }
4665 } else {
4666 for (unsigned i = 0; i < 6; i++) {
4667 LLVMValueRef out;
4668
4669 out = ac_to_float(&ctx->ac, vtx_out[i]);
4670 ret = LLVMBuildInsertValue(builder, ret, out,
4671 gfx6_vtx_params[i].arg_index, "");
4672 }
4673 }
4674 }
4675
4676 LLVMBuildRet(builder, ret);
4677 }
4678
4679 /**
4680 * Given a list of shader part functions, build a wrapper function that
4681 * runs them in sequence to form a monolithic shader.
4682 */
4683 void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
4684 unsigned num_parts, unsigned main_part,
4685 unsigned next_shader_first_part)
4686 {
4687 LLVMBuilderRef builder = ctx->ac.builder;
4688 /* PS epilog has one arg per color component; gfx9 merged shader
4689 * prologs need to forward 40 SGPRs.
4690 */
4691 LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
4692 LLVMTypeRef function_type;
4693 unsigned num_first_params;
4694 unsigned num_out, initial_num_out;
4695 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
4696 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
4697 unsigned num_sgprs, num_vgprs;
4698 unsigned gprs;
4699
4700 memset(&ctx->args, 0, sizeof(ctx->args));
4701
4702 for (unsigned i = 0; i < num_parts; ++i) {
4703 ac_add_function_attr(ctx->ac.context, parts[i], -1,
4704 AC_FUNC_ATTR_ALWAYSINLINE);
4705 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
4706 }
4707
4708 /* The parameters of the wrapper function correspond to those of the
4709 * first part in terms of SGPRs and VGPRs, but we use the types of the
4710 * main part to get the right types. This is relevant for the
4711 * dereferenceable attribute on descriptor table pointers.
4712 */
4713 num_sgprs = 0;
4714 num_vgprs = 0;
4715
4716 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
4717 num_first_params = LLVMCountParamTypes(function_type);
4718
4719 for (unsigned i = 0; i < num_first_params; ++i) {
4720 LLVMValueRef param = LLVMGetParam(parts[0], i);
4721
4722 if (ac_is_sgpr_param(param)) {
4723 assert(num_vgprs == 0);
4724 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
4725 } else {
4726 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
4727 }
4728 }
4729
4730 gprs = 0;
4731 while (gprs < num_sgprs + num_vgprs) {
4732 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
4733 LLVMTypeRef type = LLVMTypeOf(param);
4734 unsigned size = ac_get_type_size(type) / 4;
4735
4736 /* This is going to get casted anyways, so we don't have to
4737 * have the exact same type. But we do have to preserve the
4738 * pointer-ness so that LLVM knows about it.
4739 */
4740 enum ac_arg_type arg_type = AC_ARG_INT;
4741 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
4742 arg_type = AC_ARG_CONST_PTR;
4743 }
4744
4745 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR,
4746 size, arg_type, NULL);
4747
4748 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
4749 assert(gprs + size <= num_sgprs + num_vgprs &&
4750 (gprs >= num_sgprs || gprs + size <= num_sgprs));
4751
4752 gprs += size;
4753 }
4754
4755 /* Prepare the return type. */
4756 unsigned num_returns = 0;
4757 LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
4758
4759 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
4760 return_type = LLVMGetReturnType(last_func_type);
4761
4762 switch (LLVMGetTypeKind(return_type)) {
4763 case LLVMStructTypeKind:
4764 num_returns = LLVMCountStructElementTypes(return_type);
4765 assert(num_returns <= ARRAY_SIZE(returns));
4766 LLVMGetStructElementTypes(return_type, returns);
4767 break;
4768 case LLVMVoidTypeKind:
4769 break;
4770 default:
4771 unreachable("unexpected type");
4772 }
4773
4774 si_llvm_create_func(ctx, "wrapper", returns, num_returns,
4775 si_get_max_workgroup_size(ctx->shader));
4776
4777 if (si_is_merged_shader(ctx))
4778 ac_init_exec_full_mask(&ctx->ac);
4779
4780 /* Record the arguments of the function as if they were an output of
4781 * a previous part.
4782 */
4783 num_out = 0;
4784 num_out_sgpr = 0;
4785
4786 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
4787 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
4788 LLVMTypeRef param_type = LLVMTypeOf(param);
4789 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->i32 : ctx->f32;
4790 unsigned size = ac_get_type_size(param_type) / 4;
4791
4792 if (size == 1) {
4793 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4794 param = LLVMBuildPtrToInt(builder, param, ctx->i32, "");
4795 param_type = ctx->i32;
4796 }
4797
4798 if (param_type != out_type)
4799 param = LLVMBuildBitCast(builder, param, out_type, "");
4800 out[num_out++] = param;
4801 } else {
4802 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
4803
4804 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4805 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
4806 param_type = ctx->i64;
4807 }
4808
4809 if (param_type != vector_type)
4810 param = LLVMBuildBitCast(builder, param, vector_type, "");
4811
4812 for (unsigned j = 0; j < size; ++j)
4813 out[num_out++] = LLVMBuildExtractElement(
4814 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
4815 }
4816
4817 if (ctx->args.args[i].file == AC_ARG_SGPR)
4818 num_out_sgpr = num_out;
4819 }
4820
4821 memcpy(initial, out, sizeof(out));
4822 initial_num_out = num_out;
4823 initial_num_out_sgpr = num_out_sgpr;
4824
4825 /* Now chain the parts. */
4826 LLVMValueRef ret = NULL;
4827 for (unsigned part = 0; part < num_parts; ++part) {
4828 LLVMValueRef in[AC_MAX_ARGS];
4829 LLVMTypeRef ret_type;
4830 unsigned out_idx = 0;
4831 unsigned num_params = LLVMCountParams(parts[part]);
4832
4833 /* Merged shaders are executed conditionally depending
4834 * on the number of enabled threads passed in the input SGPRs. */
4835 if (is_multi_part_shader(ctx) && part == 0) {
4836 LLVMValueRef ena, count = initial[3];
4837
4838 count = LLVMBuildAnd(builder, count,
4839 LLVMConstInt(ctx->i32, 0x7f, 0), "");
4840 ena = LLVMBuildICmp(builder, LLVMIntULT,
4841 ac_get_thread_id(&ctx->ac), count, "");
4842 ac_build_ifcc(&ctx->ac, ena, 6506);
4843 }
4844
4845 /* Derive arguments for the next part from outputs of the
4846 * previous one.
4847 */
4848 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
4849 LLVMValueRef param;
4850 LLVMTypeRef param_type;
4851 bool is_sgpr;
4852 unsigned param_size;
4853 LLVMValueRef arg = NULL;
4854
4855 param = LLVMGetParam(parts[part], param_idx);
4856 param_type = LLVMTypeOf(param);
4857 param_size = ac_get_type_size(param_type) / 4;
4858 is_sgpr = ac_is_sgpr_param(param);
4859
4860 if (is_sgpr) {
4861 ac_add_function_attr(ctx->ac.context, parts[part],
4862 param_idx + 1, AC_FUNC_ATTR_INREG);
4863 } else if (out_idx < num_out_sgpr) {
4864 /* Skip returned SGPRs the current part doesn't
4865 * declare on the input. */
4866 out_idx = num_out_sgpr;
4867 }
4868
4869 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
4870
4871 if (param_size == 1)
4872 arg = out[out_idx];
4873 else
4874 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
4875
4876 if (LLVMTypeOf(arg) != param_type) {
4877 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4878 if (LLVMGetPointerAddressSpace(param_type) ==
4879 AC_ADDR_SPACE_CONST_32BIT) {
4880 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
4881 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
4882 } else {
4883 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
4884 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
4885 }
4886 } else {
4887 arg = LLVMBuildBitCast(builder, arg, param_type, "");
4888 }
4889 }
4890
4891 in[param_idx] = arg;
4892 out_idx += param_size;
4893 }
4894
4895 ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
4896
4897 if (is_multi_part_shader(ctx) &&
4898 part + 1 == next_shader_first_part) {
4899 ac_build_endif(&ctx->ac, 6506);
4900
4901 /* The second half of the merged shader should use
4902 * the inputs from the toplevel (wrapper) function,
4903 * not the return value from the last call.
4904 *
4905 * That's because the last call was executed condi-
4906 * tionally, so we can't consume it in the main
4907 * block.
4908 */
4909 memcpy(out, initial, sizeof(initial));
4910 num_out = initial_num_out;
4911 num_out_sgpr = initial_num_out_sgpr;
4912 continue;
4913 }
4914
4915 /* Extract the returned GPRs. */
4916 ret_type = LLVMTypeOf(ret);
4917 num_out = 0;
4918 num_out_sgpr = 0;
4919
4920 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
4921 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
4922
4923 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
4924
4925 for (unsigned i = 0; i < ret_size; ++i) {
4926 LLVMValueRef val =
4927 LLVMBuildExtractValue(builder, ret, i, "");
4928
4929 assert(num_out < ARRAY_SIZE(out));
4930 out[num_out++] = val;
4931
4932 if (LLVMTypeOf(val) == ctx->i32) {
4933 assert(num_out_sgpr + 1 == num_out);
4934 num_out_sgpr = num_out;
4935 }
4936 }
4937 }
4938 }
4939
4940 /* Return the value from the last part. */
4941 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
4942 LLVMBuildRetVoid(builder);
4943 else
4944 LLVMBuildRet(builder, ret);
4945 }
4946
4947 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
4948 struct si_shader_selector *sel)
4949 {
4950 if (!compiler->low_opt_passes)
4951 return false;
4952
4953 /* Assume a slow CPU. */
4954 assert(!sel->screen->info.has_dedicated_vram &&
4955 sel->screen->info.chip_class <= GFX8);
4956
4957 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
4958 * buffer stores. */
4959 return sel->type == PIPE_SHADER_COMPUTE &&
4960 sel->info.num_memory_instructions > 1000;
4961 }
4962
4963 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
4964 bool *free_nir)
4965 {
4966 *free_nir = false;
4967
4968 if (sel->nir) {
4969 return sel->nir;
4970 } else if (sel->nir_binary) {
4971 struct pipe_screen *screen = &sel->screen->b;
4972 const void *options =
4973 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
4974 sel->type);
4975
4976 struct blob_reader blob_reader;
4977 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
4978 *free_nir = true;
4979 return nir_deserialize(NULL, options, &blob_reader);
4980 }
4981 return NULL;
4982 }
4983
4984 int si_compile_shader(struct si_screen *sscreen,
4985 struct ac_llvm_compiler *compiler,
4986 struct si_shader *shader,
4987 struct pipe_debug_callback *debug)
4988 {
4989 struct si_shader_selector *sel = shader->selector;
4990 struct si_shader_context ctx;
4991 bool free_nir;
4992 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
4993 int r = -1;
4994
4995 /* Dump NIR before doing NIR->LLVM conversion in case the
4996 * conversion fails. */
4997 if (si_can_dump_shader(sscreen, sel->type) &&
4998 !(sscreen->debug_flags & DBG(NO_NIR))) {
4999 nir_print_shader(nir, stderr);
5000 si_dump_streamout(&sel->so);
5001 }
5002
5003 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
5004 si_llvm_context_set_ir(&ctx, shader);
5005
5006 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
5007 sizeof(shader->info.vs_output_param_offset));
5008
5009 shader->info.uses_instanceid = sel->info.uses_instanceid;
5010
5011 if (!si_build_main_function(&ctx, nir, free_nir)) {
5012 si_llvm_dispose(&ctx);
5013 return -1;
5014 }
5015
5016 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
5017 LLVMValueRef parts[2];
5018 bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog);
5019
5020 parts[1] = ctx.main_fn;
5021
5022 if (need_prolog) {
5023 union si_shader_part_key prolog_key;
5024 si_get_vs_prolog_key(&sel->info,
5025 shader->info.num_input_sgprs,
5026 &shader->key.part.vs.prolog,
5027 shader, &prolog_key);
5028 prolog_key.vs_prolog.is_monolithic = true;
5029 si_build_vs_prolog_function(&ctx, &prolog_key);
5030 parts[0] = ctx.main_fn;
5031 }
5032
5033 si_build_wrapper_function(&ctx, parts + !need_prolog,
5034 1 + need_prolog, need_prolog, 0);
5035
5036 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
5037 si_build_prim_discard_compute_shader(&ctx);
5038 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
5039 if (sscreen->info.chip_class >= GFX9) {
5040 struct si_shader_selector *ls = shader->key.part.tcs.ls;
5041 LLVMValueRef parts[4];
5042 bool vs_needs_prolog =
5043 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
5044
5045 /* TCS main part */
5046 parts[2] = ctx.main_fn;
5047
5048 /* TCS epilog */
5049 union si_shader_part_key tcs_epilog_key;
5050 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
5051 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5052 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
5053 parts[3] = ctx.main_fn;
5054
5055 /* VS as LS main part */
5056 nir = get_nir_shader(ls, &free_nir);
5057 struct si_shader shader_ls = {};
5058 shader_ls.selector = ls;
5059 shader_ls.key.as_ls = 1;
5060 shader_ls.key.mono = shader->key.mono;
5061 shader_ls.key.opt = shader->key.opt;
5062 shader_ls.is_monolithic = true;
5063 si_llvm_context_set_ir(&ctx, &shader_ls);
5064
5065 if (!si_build_main_function(&ctx, nir, free_nir)) {
5066 si_llvm_dispose(&ctx);
5067 return -1;
5068 }
5069 shader->info.uses_instanceid |= ls->info.uses_instanceid;
5070 parts[1] = ctx.main_fn;
5071
5072 /* LS prolog */
5073 if (vs_needs_prolog) {
5074 union si_shader_part_key vs_prolog_key;
5075 si_get_vs_prolog_key(&ls->info,
5076 shader_ls.info.num_input_sgprs,
5077 &shader->key.part.tcs.ls_prolog,
5078 shader, &vs_prolog_key);
5079 vs_prolog_key.vs_prolog.is_monolithic = true;
5080 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5081 parts[0] = ctx.main_fn;
5082 }
5083
5084 /* Reset the shader context. */
5085 ctx.shader = shader;
5086 ctx.type = PIPE_SHADER_TESS_CTRL;
5087
5088 si_build_wrapper_function(&ctx,
5089 parts + !vs_needs_prolog,
5090 4 - !vs_needs_prolog, vs_needs_prolog,
5091 vs_needs_prolog ? 2 : 1);
5092 } else {
5093 LLVMValueRef parts[2];
5094 union si_shader_part_key epilog_key;
5095
5096 parts[0] = ctx.main_fn;
5097
5098 memset(&epilog_key, 0, sizeof(epilog_key));
5099 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5100 si_build_tcs_epilog_function(&ctx, &epilog_key);
5101 parts[1] = ctx.main_fn;
5102
5103 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
5104 }
5105 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
5106 if (ctx.screen->info.chip_class >= GFX9) {
5107 struct si_shader_selector *es = shader->key.part.gs.es;
5108 LLVMValueRef es_prolog = NULL;
5109 LLVMValueRef es_main = NULL;
5110 LLVMValueRef gs_prolog = NULL;
5111 LLVMValueRef gs_main = ctx.main_fn;
5112
5113 /* GS prolog */
5114 union si_shader_part_key gs_prolog_key;
5115 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
5116 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5117 gs_prolog_key.gs_prolog.is_monolithic = true;
5118 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
5119 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
5120 gs_prolog = ctx.main_fn;
5121
5122 /* ES main part */
5123 nir = get_nir_shader(es, &free_nir);
5124 struct si_shader shader_es = {};
5125 shader_es.selector = es;
5126 shader_es.key.as_es = 1;
5127 shader_es.key.as_ngg = shader->key.as_ngg;
5128 shader_es.key.mono = shader->key.mono;
5129 shader_es.key.opt = shader->key.opt;
5130 shader_es.is_monolithic = true;
5131 si_llvm_context_set_ir(&ctx, &shader_es);
5132
5133 if (!si_build_main_function(&ctx, nir, free_nir)) {
5134 si_llvm_dispose(&ctx);
5135 return -1;
5136 }
5137 shader->info.uses_instanceid |= es->info.uses_instanceid;
5138 es_main = ctx.main_fn;
5139
5140 /* ES prolog */
5141 if (es->type == PIPE_SHADER_VERTEX &&
5142 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog)) {
5143 union si_shader_part_key vs_prolog_key;
5144 si_get_vs_prolog_key(&es->info,
5145 shader_es.info.num_input_sgprs,
5146 &shader->key.part.gs.vs_prolog,
5147 shader, &vs_prolog_key);
5148 vs_prolog_key.vs_prolog.is_monolithic = true;
5149 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5150 es_prolog = ctx.main_fn;
5151 }
5152
5153 /* Reset the shader context. */
5154 ctx.shader = shader;
5155 ctx.type = PIPE_SHADER_GEOMETRY;
5156
5157 /* Prepare the array of shader parts. */
5158 LLVMValueRef parts[4];
5159 unsigned num_parts = 0, main_part, next_first_part;
5160
5161 if (es_prolog)
5162 parts[num_parts++] = es_prolog;
5163
5164 parts[main_part = num_parts++] = es_main;
5165 parts[next_first_part = num_parts++] = gs_prolog;
5166 parts[num_parts++] = gs_main;
5167
5168 si_build_wrapper_function(&ctx, parts, num_parts,
5169 main_part, next_first_part);
5170 } else {
5171 LLVMValueRef parts[2];
5172 union si_shader_part_key prolog_key;
5173
5174 parts[1] = ctx.main_fn;
5175
5176 memset(&prolog_key, 0, sizeof(prolog_key));
5177 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5178 si_build_gs_prolog_function(&ctx, &prolog_key);
5179 parts[0] = ctx.main_fn;
5180
5181 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
5182 }
5183 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
5184 si_llvm_build_monolithic_ps(&ctx, shader);
5185 }
5186
5187 si_llvm_optimize_module(&ctx);
5188
5189 /* Post-optimization transformations and analysis. */
5190 si_optimize_vs_outputs(&ctx);
5191
5192 if ((debug && debug->debug_message) ||
5193 si_can_dump_shader(sscreen, ctx.type)) {
5194 ctx.shader->info.private_mem_vgprs =
5195 ac_count_scratch_private_memory(ctx.main_fn);
5196 }
5197
5198 /* Make sure the input is a pointer and not integer followed by inttoptr. */
5199 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
5200 LLVMPointerTypeKind);
5201
5202 /* Compile to bytecode. */
5203 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
5204 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
5205 si_get_shader_name(shader),
5206 si_should_optimize_less(compiler, shader->selector));
5207 si_llvm_dispose(&ctx);
5208 if (r) {
5209 fprintf(stderr, "LLVM failed to compile shader\n");
5210 return r;
5211 }
5212
5213 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
5214 * LLVM 3.9svn has this bug.
5215 */
5216 if (sel->type == PIPE_SHADER_COMPUTE) {
5217 unsigned wave_size = sscreen->compute_wave_size;
5218 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
5219 (wave_size == 32 ? 2 : 1);
5220 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
5221 unsigned max_sgprs_per_wave = 128;
5222 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
5223 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
5224 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
5225 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
5226
5227 max_vgprs = max_vgprs / waves_per_simd;
5228 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
5229
5230 if (shader->config.num_sgprs > max_sgprs ||
5231 shader->config.num_vgprs > max_vgprs) {
5232 fprintf(stderr, "LLVM failed to compile a shader correctly: "
5233 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
5234 shader->config.num_sgprs, shader->config.num_vgprs,
5235 max_sgprs, max_vgprs);
5236
5237 /* Just terminate the process, because dependent
5238 * shaders can hang due to bad input data, but use
5239 * the env var to allow shader-db to work.
5240 */
5241 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
5242 abort();
5243 }
5244 }
5245
5246 /* Add the scratch offset to input SGPRs. */
5247 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(&ctx))
5248 shader->info.num_input_sgprs += 1; /* scratch byte offset */
5249
5250 /* Calculate the number of fragment input VGPRs. */
5251 if (ctx.type == PIPE_SHADER_FRAGMENT) {
5252 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
5253 &shader->info.face_vgpr_index,
5254 &shader->info.ancillary_vgpr_index);
5255 }
5256
5257 si_calculate_max_simd_waves(shader);
5258 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
5259 return 0;
5260 }
5261
5262 /**
5263 * Create, compile and return a shader part (prolog or epilog).
5264 *
5265 * \param sscreen screen
5266 * \param list list of shader parts of the same category
5267 * \param type shader type
5268 * \param key shader part key
5269 * \param prolog whether the part being requested is a prolog
5270 * \param tm LLVM target machine
5271 * \param debug debug callback
5272 * \param build the callback responsible for building the main function
5273 * \return non-NULL on success
5274 */
5275 static struct si_shader_part *
5276 si_get_shader_part(struct si_screen *sscreen,
5277 struct si_shader_part **list,
5278 enum pipe_shader_type type,
5279 bool prolog,
5280 union si_shader_part_key *key,
5281 struct ac_llvm_compiler *compiler,
5282 struct pipe_debug_callback *debug,
5283 void (*build)(struct si_shader_context *,
5284 union si_shader_part_key *),
5285 const char *name)
5286 {
5287 struct si_shader_part *result;
5288
5289 simple_mtx_lock(&sscreen->shader_parts_mutex);
5290
5291 /* Find existing. */
5292 for (result = *list; result; result = result->next) {
5293 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
5294 simple_mtx_unlock(&sscreen->shader_parts_mutex);
5295 return result;
5296 }
5297 }
5298
5299 /* Compile a new one. */
5300 result = CALLOC_STRUCT(si_shader_part);
5301 result->key = *key;
5302
5303 struct si_shader shader = {};
5304
5305 switch (type) {
5306 case PIPE_SHADER_VERTEX:
5307 shader.key.as_ls = key->vs_prolog.as_ls;
5308 shader.key.as_es = key->vs_prolog.as_es;
5309 shader.key.as_ngg = key->vs_prolog.as_ngg;
5310 break;
5311 case PIPE_SHADER_TESS_CTRL:
5312 assert(!prolog);
5313 shader.key.part.tcs.epilog = key->tcs_epilog.states;
5314 break;
5315 case PIPE_SHADER_GEOMETRY:
5316 assert(prolog);
5317 shader.key.as_ngg = key->gs_prolog.as_ngg;
5318 break;
5319 case PIPE_SHADER_FRAGMENT:
5320 if (prolog)
5321 shader.key.part.ps.prolog = key->ps_prolog.states;
5322 else
5323 shader.key.part.ps.epilog = key->ps_epilog.states;
5324 break;
5325 default:
5326 unreachable("bad shader part");
5327 }
5328
5329 struct si_shader_context ctx;
5330 si_llvm_context_init(&ctx, sscreen, compiler,
5331 si_get_wave_size(sscreen, type, shader.key.as_ngg,
5332 shader.key.as_es));
5333 ctx.shader = &shader;
5334 ctx.type = type;
5335
5336 build(&ctx, key);
5337
5338 /* Compile. */
5339 si_llvm_optimize_module(&ctx);
5340
5341 if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
5342 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
5343 name, false)) {
5344 FREE(result);
5345 result = NULL;
5346 goto out;
5347 }
5348
5349 result->next = *list;
5350 *list = result;
5351
5352 out:
5353 si_llvm_dispose(&ctx);
5354 simple_mtx_unlock(&sscreen->shader_parts_mutex);
5355 return result;
5356 }
5357
5358 /**
5359 * Build the vertex shader prolog function.
5360 *
5361 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
5362 * All inputs are returned unmodified. The vertex load indices are
5363 * stored after them, which will be used by the API VS for fetching inputs.
5364 *
5365 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
5366 * input_v0,
5367 * input_v1,
5368 * input_v2,
5369 * input_v3,
5370 * (VertexID + BaseVertex),
5371 * (InstanceID + StartInstance),
5372 * (InstanceID / 2 + StartInstance)
5373 */
5374 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
5375 union si_shader_part_key *key)
5376 {
5377 LLVMTypeRef *returns;
5378 LLVMValueRef ret, func;
5379 int num_returns, i;
5380 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
5381 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
5382 struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
5383 struct ac_arg input_vgpr_param[9];
5384 LLVMValueRef input_vgprs[9];
5385 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
5386 num_input_vgprs;
5387 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
5388
5389 memset(&ctx->args, 0, sizeof(ctx->args));
5390
5391 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
5392 returns = alloca((num_all_input_regs + key->vs_prolog.num_inputs) *
5393 sizeof(LLVMTypeRef));
5394 num_returns = 0;
5395
5396 /* Declare input and output SGPRs. */
5397 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5398 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5399 &input_sgpr_param[i]);
5400 returns[num_returns++] = ctx->i32;
5401 }
5402
5403 struct ac_arg merged_wave_info = input_sgpr_param[3];
5404
5405 /* Preloaded VGPRs (outputs must be floats) */
5406 for (i = 0; i < num_input_vgprs; i++) {
5407 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &input_vgpr_param[i]);
5408 returns[num_returns++] = ctx->f32;
5409 }
5410
5411 /* Vertex load indices. */
5412 for (i = 0; i < key->vs_prolog.num_inputs; i++)
5413 returns[num_returns++] = ctx->f32;
5414
5415 /* Create the function. */
5416 si_llvm_create_func(ctx, "vs_prolog", returns, num_returns, 0);
5417 func = ctx->main_fn;
5418
5419 for (i = 0; i < num_input_vgprs; i++) {
5420 input_vgprs[i] = ac_get_arg(&ctx->ac, input_vgpr_param[i]);
5421 }
5422
5423 if (key->vs_prolog.num_merged_next_stage_vgprs) {
5424 if (!key->vs_prolog.is_monolithic)
5425 si_init_exec_from_input(ctx, merged_wave_info, 0);
5426
5427 if (key->vs_prolog.as_ls &&
5428 ctx->screen->info.has_ls_vgpr_init_bug) {
5429 /* If there are no HS threads, SPI loads the LS VGPRs
5430 * starting at VGPR 0. Shift them back to where they
5431 * belong.
5432 */
5433 LLVMValueRef has_hs_threads =
5434 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
5435 si_unpack_param(ctx, input_sgpr_param[3], 8, 8),
5436 ctx->i32_0, "");
5437
5438 for (i = 4; i > 0; --i) {
5439 input_vgprs[i + 1] =
5440 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
5441 input_vgprs[i + 1],
5442 input_vgprs[i - 1], "");
5443 }
5444 }
5445 }
5446
5447 unsigned vertex_id_vgpr = first_vs_vgpr;
5448 unsigned instance_id_vgpr =
5449 ctx->screen->info.chip_class >= GFX10 ?
5450 first_vs_vgpr + 3 :
5451 first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
5452
5453 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
5454 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
5455
5456 /* InstanceID = VertexID >> 16;
5457 * VertexID = VertexID & 0xffff;
5458 */
5459 if (key->vs_prolog.states.unpack_instance_id_from_vertex_id) {
5460 ctx->abi.instance_id = LLVMBuildLShr(ctx->ac.builder, ctx->abi.vertex_id,
5461 LLVMConstInt(ctx->i32, 16, 0), "");
5462 ctx->abi.vertex_id = LLVMBuildAnd(ctx->ac.builder, ctx->abi.vertex_id,
5463 LLVMConstInt(ctx->i32, 0xffff, 0), "");
5464 }
5465
5466 /* Copy inputs to outputs. This should be no-op, as the registers match,
5467 * but it will prevent the compiler from overwriting them unintentionally.
5468 */
5469 ret = ctx->return_value;
5470 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5471 LLVMValueRef p = LLVMGetParam(func, i);
5472 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
5473 }
5474 for (i = 0; i < num_input_vgprs; i++) {
5475 LLVMValueRef p = input_vgprs[i];
5476
5477 if (i == vertex_id_vgpr)
5478 p = ctx->abi.vertex_id;
5479 else if (i == instance_id_vgpr)
5480 p = ctx->abi.instance_id;
5481
5482 p = ac_to_float(&ctx->ac, p);
5483 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
5484 key->vs_prolog.num_input_sgprs + i, "");
5485 }
5486
5487 /* Compute vertex load indices from instance divisors. */
5488 LLVMValueRef instance_divisor_constbuf = NULL;
5489
5490 if (key->vs_prolog.states.instance_divisor_is_fetched) {
5491 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
5492 LLVMValueRef buf_index =
5493 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
5494 instance_divisor_constbuf =
5495 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
5496 }
5497
5498 for (i = 0; i < key->vs_prolog.num_inputs; i++) {
5499 bool divisor_is_one =
5500 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
5501 bool divisor_is_fetched =
5502 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
5503 LLVMValueRef index = NULL;
5504
5505 if (divisor_is_one) {
5506 index = ctx->abi.instance_id;
5507 } else if (divisor_is_fetched) {
5508 LLVMValueRef udiv_factors[4];
5509
5510 for (unsigned j = 0; j < 4; j++) {
5511 udiv_factors[j] =
5512 si_buffer_load_const(ctx, instance_divisor_constbuf,
5513 LLVMConstInt(ctx->i32, i*16 + j*4, 0));
5514 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
5515 }
5516 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
5517 * Such InstanceID might not be achievable in a reasonable time though.
5518 */
5519 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id,
5520 udiv_factors[0], udiv_factors[1],
5521 udiv_factors[2], udiv_factors[3]);
5522 }
5523
5524 if (divisor_is_one || divisor_is_fetched) {
5525 /* Add StartInstance. */
5526 index = LLVMBuildAdd(ctx->ac.builder, index,
5527 LLVMGetParam(ctx->main_fn, user_sgpr_base +
5528 SI_SGPR_START_INSTANCE), "");
5529 } else {
5530 /* VertexID + BaseVertex */
5531 index = LLVMBuildAdd(ctx->ac.builder,
5532 ctx->abi.vertex_id,
5533 LLVMGetParam(func, user_sgpr_base +
5534 SI_SGPR_BASE_VERTEX), "");
5535 }
5536
5537 index = ac_to_float(&ctx->ac, index);
5538 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
5539 ctx->args.arg_count + i, "");
5540 }
5541
5542 si_llvm_build_ret(ctx, ret);
5543 }
5544
5545 static bool si_get_vs_prolog(struct si_screen *sscreen,
5546 struct ac_llvm_compiler *compiler,
5547 struct si_shader *shader,
5548 struct pipe_debug_callback *debug,
5549 struct si_shader *main_part,
5550 const struct si_vs_prolog_bits *key)
5551 {
5552 struct si_shader_selector *vs = main_part->selector;
5553
5554 if (!si_vs_needs_prolog(vs, key))
5555 return true;
5556
5557 /* Get the prolog. */
5558 union si_shader_part_key prolog_key;
5559 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
5560 key, shader, &prolog_key);
5561
5562 shader->prolog =
5563 si_get_shader_part(sscreen, &sscreen->vs_prologs,
5564 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
5565 debug, si_build_vs_prolog_function,
5566 "Vertex Shader Prolog");
5567 return shader->prolog != NULL;
5568 }
5569
5570 /**
5571 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
5572 */
5573 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
5574 struct ac_llvm_compiler *compiler,
5575 struct si_shader *shader,
5576 struct pipe_debug_callback *debug)
5577 {
5578 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
5579 &shader->key.part.vs.prolog);
5580 }
5581
5582 /**
5583 * Compile the TCS epilog function. This writes tesselation factors to memory
5584 * based on the output primitive type of the tesselator (determined by TES).
5585 */
5586 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
5587 union si_shader_part_key *key)
5588 {
5589 memset(&ctx->args, 0, sizeof(ctx->args));
5590
5591 if (ctx->screen->info.chip_class >= GFX9) {
5592 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5593 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5594 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5595 &ctx->tcs_offchip_offset);
5596 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* wave info */
5597 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5598 &ctx->tcs_factor_offset);
5599 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5600 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5601 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5602 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5603 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5604 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5605 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5606 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5607 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5608 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5609 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5610 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5611 &ctx->tcs_offchip_layout);
5612 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5613 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5614 &ctx->tcs_out_lds_layout);
5615 } else {
5616 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5617 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5618 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5619 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5620 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5621 &ctx->tcs_offchip_layout);
5622 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5623 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5624 &ctx->tcs_out_lds_layout);
5625 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5626 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5627 &ctx->tcs_offchip_offset);
5628 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5629 &ctx->tcs_factor_offset);
5630 }
5631
5632 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
5633 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
5634 struct ac_arg rel_patch_id; /* patch index within the wave (REL_PATCH_ID) */
5635 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &rel_patch_id);
5636 struct ac_arg invocation_id; /* invocation ID within the patch */
5637 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &invocation_id);
5638 struct ac_arg tcs_out_current_patch_data_offset; /* LDS offset where tess factors should be loaded from */
5639 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
5640 &tcs_out_current_patch_data_offset);
5641
5642 struct ac_arg tess_factors[6];
5643 for (unsigned i = 0; i < 6; i++)
5644 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &tess_factors[i]);
5645
5646 /* Create the function. */
5647 si_llvm_create_func(ctx, "tcs_epilog", NULL, 0,
5648 ctx->screen->info.chip_class >= GFX7 ? 128 : 0);
5649 ac_declare_lds_as_pointer(&ctx->ac);
5650
5651 LLVMValueRef invoc0_tess_factors[6];
5652 for (unsigned i = 0; i < 6; i++)
5653 invoc0_tess_factors[i] = ac_get_arg(&ctx->ac, tess_factors[i]);
5654
5655 si_write_tess_factors(ctx,
5656 ac_get_arg(&ctx->ac, rel_patch_id),
5657 ac_get_arg(&ctx->ac, invocation_id),
5658 ac_get_arg(&ctx->ac, tcs_out_current_patch_data_offset),
5659 invoc0_tess_factors, invoc0_tess_factors + 4);
5660
5661 LLVMBuildRetVoid(ctx->ac.builder);
5662 }
5663
5664 /**
5665 * Select and compile (or reuse) TCS parts (epilog).
5666 */
5667 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
5668 struct ac_llvm_compiler *compiler,
5669 struct si_shader *shader,
5670 struct pipe_debug_callback *debug)
5671 {
5672 if (sscreen->info.chip_class >= GFX9) {
5673 struct si_shader *ls_main_part =
5674 shader->key.part.tcs.ls->main_shader_part_ls;
5675
5676 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
5677 &shader->key.part.tcs.ls_prolog))
5678 return false;
5679
5680 shader->previous_stage = ls_main_part;
5681 }
5682
5683 /* Get the epilog. */
5684 union si_shader_part_key epilog_key;
5685 memset(&epilog_key, 0, sizeof(epilog_key));
5686 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5687
5688 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
5689 PIPE_SHADER_TESS_CTRL, false,
5690 &epilog_key, compiler, debug,
5691 si_build_tcs_epilog_function,
5692 "Tessellation Control Shader Epilog");
5693 return shader->epilog != NULL;
5694 }
5695
5696 /**
5697 * Select and compile (or reuse) GS parts (prolog).
5698 */
5699 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
5700 struct ac_llvm_compiler *compiler,
5701 struct si_shader *shader,
5702 struct pipe_debug_callback *debug)
5703 {
5704 if (sscreen->info.chip_class >= GFX9) {
5705 struct si_shader *es_main_part;
5706 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
5707
5708 if (shader->key.as_ngg)
5709 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
5710 else
5711 es_main_part = shader->key.part.gs.es->main_shader_part_es;
5712
5713 if (es_type == PIPE_SHADER_VERTEX &&
5714 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
5715 &shader->key.part.gs.vs_prolog))
5716 return false;
5717
5718 shader->previous_stage = es_main_part;
5719 }
5720
5721 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
5722 return true;
5723
5724 union si_shader_part_key prolog_key;
5725 memset(&prolog_key, 0, sizeof(prolog_key));
5726 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5727 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
5728
5729 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
5730 PIPE_SHADER_GEOMETRY, true,
5731 &prolog_key, compiler, debug,
5732 si_build_gs_prolog_function,
5733 "Geometry Shader Prolog");
5734 return shader->prolog2 != NULL;
5735 }
5736
5737 /**
5738 * Compute the PS prolog key, which contains all the information needed to
5739 * build the PS prolog function, and set related bits in shader->config.
5740 */
5741 void si_get_ps_prolog_key(struct si_shader *shader,
5742 union si_shader_part_key *key,
5743 bool separate_prolog)
5744 {
5745 struct si_shader_info *info = &shader->selector->info;
5746
5747 memset(key, 0, sizeof(*key));
5748 key->ps_prolog.states = shader->key.part.ps.prolog;
5749 key->ps_prolog.colors_read = info->colors_read;
5750 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5751 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5752 key->ps_prolog.wqm = info->uses_derivatives &&
5753 (key->ps_prolog.colors_read ||
5754 key->ps_prolog.states.force_persp_sample_interp ||
5755 key->ps_prolog.states.force_linear_sample_interp ||
5756 key->ps_prolog.states.force_persp_center_interp ||
5757 key->ps_prolog.states.force_linear_center_interp ||
5758 key->ps_prolog.states.bc_optimize_for_persp ||
5759 key->ps_prolog.states.bc_optimize_for_linear);
5760 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
5761
5762 if (info->colors_read) {
5763 unsigned *color = shader->selector->color_attr_index;
5764
5765 if (shader->key.part.ps.prolog.color_two_side) {
5766 /* BCOLORs are stored after the last input. */
5767 key->ps_prolog.num_interp_inputs = info->num_inputs;
5768 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5769 if (separate_prolog)
5770 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5771 }
5772
5773 for (unsigned i = 0; i < 2; i++) {
5774 unsigned interp = info->input_interpolate[color[i]];
5775 unsigned location = info->input_interpolate_loc[color[i]];
5776
5777 if (!(info->colors_read & (0xf << i*4)))
5778 continue;
5779
5780 key->ps_prolog.color_attr_index[i] = color[i];
5781
5782 if (shader->key.part.ps.prolog.flatshade_colors &&
5783 interp == TGSI_INTERPOLATE_COLOR)
5784 interp = TGSI_INTERPOLATE_CONSTANT;
5785
5786 switch (interp) {
5787 case TGSI_INTERPOLATE_CONSTANT:
5788 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5789 break;
5790 case TGSI_INTERPOLATE_PERSPECTIVE:
5791 case TGSI_INTERPOLATE_COLOR:
5792 /* Force the interpolation location for colors here. */
5793 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5794 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5795 if (shader->key.part.ps.prolog.force_persp_center_interp)
5796 location = TGSI_INTERPOLATE_LOC_CENTER;
5797
5798 switch (location) {
5799 case TGSI_INTERPOLATE_LOC_SAMPLE:
5800 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5801 if (separate_prolog) {
5802 shader->config.spi_ps_input_ena |=
5803 S_0286CC_PERSP_SAMPLE_ENA(1);
5804 }
5805 break;
5806 case TGSI_INTERPOLATE_LOC_CENTER:
5807 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5808 if (separate_prolog) {
5809 shader->config.spi_ps_input_ena |=
5810 S_0286CC_PERSP_CENTER_ENA(1);
5811 }
5812 break;
5813 case TGSI_INTERPOLATE_LOC_CENTROID:
5814 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5815 if (separate_prolog) {
5816 shader->config.spi_ps_input_ena |=
5817 S_0286CC_PERSP_CENTROID_ENA(1);
5818 }
5819 break;
5820 default:
5821 assert(0);
5822 }
5823 break;
5824 case TGSI_INTERPOLATE_LINEAR:
5825 /* Force the interpolation location for colors here. */
5826 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5827 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5828 if (shader->key.part.ps.prolog.force_linear_center_interp)
5829 location = TGSI_INTERPOLATE_LOC_CENTER;
5830
5831 /* The VGPR assignment for non-monolithic shaders
5832 * works because InitialPSInputAddr is set on the
5833 * main shader and PERSP_PULL_MODEL is never used.
5834 */
5835 switch (location) {
5836 case TGSI_INTERPOLATE_LOC_SAMPLE:
5837 key->ps_prolog.color_interp_vgpr_index[i] =
5838 separate_prolog ? 6 : 9;
5839 if (separate_prolog) {
5840 shader->config.spi_ps_input_ena |=
5841 S_0286CC_LINEAR_SAMPLE_ENA(1);
5842 }
5843 break;
5844 case TGSI_INTERPOLATE_LOC_CENTER:
5845 key->ps_prolog.color_interp_vgpr_index[i] =
5846 separate_prolog ? 8 : 11;
5847 if (separate_prolog) {
5848 shader->config.spi_ps_input_ena |=
5849 S_0286CC_LINEAR_CENTER_ENA(1);
5850 }
5851 break;
5852 case TGSI_INTERPOLATE_LOC_CENTROID:
5853 key->ps_prolog.color_interp_vgpr_index[i] =
5854 separate_prolog ? 10 : 13;
5855 if (separate_prolog) {
5856 shader->config.spi_ps_input_ena |=
5857 S_0286CC_LINEAR_CENTROID_ENA(1);
5858 }
5859 break;
5860 default:
5861 assert(0);
5862 }
5863 break;
5864 default:
5865 assert(0);
5866 }
5867 }
5868 }
5869 }
5870
5871 /**
5872 * Check whether a PS prolog is required based on the key.
5873 */
5874 bool si_need_ps_prolog(const union si_shader_part_key *key)
5875 {
5876 return key->ps_prolog.colors_read ||
5877 key->ps_prolog.states.force_persp_sample_interp ||
5878 key->ps_prolog.states.force_linear_sample_interp ||
5879 key->ps_prolog.states.force_persp_center_interp ||
5880 key->ps_prolog.states.force_linear_center_interp ||
5881 key->ps_prolog.states.bc_optimize_for_persp ||
5882 key->ps_prolog.states.bc_optimize_for_linear ||
5883 key->ps_prolog.states.poly_stipple ||
5884 key->ps_prolog.states.samplemask_log_ps_iter;
5885 }
5886
5887 /**
5888 * Compute the PS epilog key, which contains all the information needed to
5889 * build the PS epilog function.
5890 */
5891 void si_get_ps_epilog_key(struct si_shader *shader,
5892 union si_shader_part_key *key)
5893 {
5894 struct si_shader_info *info = &shader->selector->info;
5895 memset(key, 0, sizeof(*key));
5896 key->ps_epilog.colors_written = info->colors_written;
5897 key->ps_epilog.writes_z = info->writes_z;
5898 key->ps_epilog.writes_stencil = info->writes_stencil;
5899 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5900 key->ps_epilog.states = shader->key.part.ps.epilog;
5901 }
5902
5903 /**
5904 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
5905 */
5906 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
5907 struct ac_llvm_compiler *compiler,
5908 struct si_shader *shader,
5909 struct pipe_debug_callback *debug)
5910 {
5911 union si_shader_part_key prolog_key;
5912 union si_shader_part_key epilog_key;
5913
5914 /* Get the prolog. */
5915 si_get_ps_prolog_key(shader, &prolog_key, true);
5916
5917 /* The prolog is a no-op if these aren't set. */
5918 if (si_need_ps_prolog(&prolog_key)) {
5919 shader->prolog =
5920 si_get_shader_part(sscreen, &sscreen->ps_prologs,
5921 PIPE_SHADER_FRAGMENT, true,
5922 &prolog_key, compiler, debug,
5923 si_llvm_build_ps_prolog,
5924 "Fragment Shader Prolog");
5925 if (!shader->prolog)
5926 return false;
5927 }
5928
5929 /* Get the epilog. */
5930 si_get_ps_epilog_key(shader, &epilog_key);
5931
5932 shader->epilog =
5933 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
5934 PIPE_SHADER_FRAGMENT, false,
5935 &epilog_key, compiler, debug,
5936 si_llvm_build_ps_epilog,
5937 "Fragment Shader Epilog");
5938 if (!shader->epilog)
5939 return false;
5940
5941 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
5942 if (shader->key.part.ps.prolog.poly_stipple) {
5943 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
5944 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
5945 }
5946
5947 /* Set up the enable bits for per-sample shading if needed. */
5948 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
5949 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
5950 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5951 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
5952 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
5953 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
5954 }
5955 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
5956 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
5957 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5958 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
5959 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
5960 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
5961 }
5962 if (shader->key.part.ps.prolog.force_persp_center_interp &&
5963 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
5964 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5965 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
5966 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
5967 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
5968 }
5969 if (shader->key.part.ps.prolog.force_linear_center_interp &&
5970 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
5971 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5972 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
5973 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
5974 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
5975 }
5976
5977 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
5978 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
5979 !(shader->config.spi_ps_input_ena & 0xf)) {
5980 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
5981 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
5982 }
5983
5984 /* At least one pair of interpolation weights must be enabled. */
5985 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
5986 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
5987 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
5988 }
5989
5990 /* Samplemask fixup requires the sample ID. */
5991 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
5992 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
5993 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
5994 }
5995
5996 /* The sample mask input is always enabled, because the API shader always
5997 * passes it through to the epilog. Disable it here if it's unused.
5998 */
5999 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
6000 !shader->selector->info.reads_samplemask)
6001 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
6002
6003 return true;
6004 }
6005
6006 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
6007 unsigned *lds_size)
6008 {
6009 /* If tessellation is all offchip and on-chip GS isn't used, this
6010 * workaround is not needed.
6011 */
6012 return;
6013
6014 /* SPI barrier management bug:
6015 * Make sure we have at least 4k of LDS in use to avoid the bug.
6016 * It applies to workgroup sizes of more than one wavefront.
6017 */
6018 if (sscreen->info.family == CHIP_BONAIRE ||
6019 sscreen->info.family == CHIP_KABINI)
6020 *lds_size = MAX2(*lds_size, 8);
6021 }
6022
6023 static void si_fix_resource_usage(struct si_screen *sscreen,
6024 struct si_shader *shader)
6025 {
6026 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
6027
6028 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
6029
6030 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
6031 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
6032 si_multiwave_lds_size_workaround(sscreen,
6033 &shader->config.lds_size);
6034 }
6035 }
6036
6037 bool si_create_shader_variant(struct si_screen *sscreen,
6038 struct ac_llvm_compiler *compiler,
6039 struct si_shader *shader,
6040 struct pipe_debug_callback *debug)
6041 {
6042 struct si_shader_selector *sel = shader->selector;
6043 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
6044 int r;
6045
6046 /* LS, ES, VS are compiled on demand if the main part hasn't been
6047 * compiled for that stage.
6048 *
6049 * GS are compiled on demand if the main part hasn't been compiled
6050 * for the chosen NGG-ness.
6051 *
6052 * Vertex shaders are compiled on demand when a vertex fetch
6053 * workaround must be applied.
6054 */
6055 if (shader->is_monolithic) {
6056 /* Monolithic shader (compiled as a whole, has many variants,
6057 * may take a long time to compile).
6058 */
6059 r = si_compile_shader(sscreen, compiler, shader, debug);
6060 if (r)
6061 return false;
6062 } else {
6063 /* The shader consists of several parts:
6064 *
6065 * - the middle part is the user shader, it has 1 variant only
6066 * and it was compiled during the creation of the shader
6067 * selector
6068 * - the prolog part is inserted at the beginning
6069 * - the epilog part is inserted at the end
6070 *
6071 * The prolog and epilog have many (but simple) variants.
6072 *
6073 * Starting with gfx9, geometry and tessellation control
6074 * shaders also contain the prolog and user shader parts of
6075 * the previous shader stage.
6076 */
6077
6078 if (!mainp)
6079 return false;
6080
6081 /* Copy the compiled shader data over. */
6082 shader->is_binary_shared = true;
6083 shader->binary = mainp->binary;
6084 shader->config = mainp->config;
6085 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
6086 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
6087 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
6088 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
6089 memcpy(shader->info.vs_output_param_offset,
6090 mainp->info.vs_output_param_offset,
6091 sizeof(mainp->info.vs_output_param_offset));
6092 shader->info.uses_instanceid = mainp->info.uses_instanceid;
6093 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
6094 shader->info.nr_param_exports = mainp->info.nr_param_exports;
6095
6096 /* Select prologs and/or epilogs. */
6097 switch (sel->type) {
6098 case PIPE_SHADER_VERTEX:
6099 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
6100 return false;
6101 break;
6102 case PIPE_SHADER_TESS_CTRL:
6103 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
6104 return false;
6105 break;
6106 case PIPE_SHADER_TESS_EVAL:
6107 break;
6108 case PIPE_SHADER_GEOMETRY:
6109 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
6110 return false;
6111 break;
6112 case PIPE_SHADER_FRAGMENT:
6113 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
6114 return false;
6115
6116 /* Make sure we have at least as many VGPRs as there
6117 * are allocated inputs.
6118 */
6119 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6120 shader->info.num_input_vgprs);
6121 break;
6122 default:;
6123 }
6124
6125 /* Update SGPR and VGPR counts. */
6126 if (shader->prolog) {
6127 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6128 shader->prolog->config.num_sgprs);
6129 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6130 shader->prolog->config.num_vgprs);
6131 }
6132 if (shader->previous_stage) {
6133 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6134 shader->previous_stage->config.num_sgprs);
6135 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6136 shader->previous_stage->config.num_vgprs);
6137 shader->config.spilled_sgprs =
6138 MAX2(shader->config.spilled_sgprs,
6139 shader->previous_stage->config.spilled_sgprs);
6140 shader->config.spilled_vgprs =
6141 MAX2(shader->config.spilled_vgprs,
6142 shader->previous_stage->config.spilled_vgprs);
6143 shader->info.private_mem_vgprs =
6144 MAX2(shader->info.private_mem_vgprs,
6145 shader->previous_stage->info.private_mem_vgprs);
6146 shader->config.scratch_bytes_per_wave =
6147 MAX2(shader->config.scratch_bytes_per_wave,
6148 shader->previous_stage->config.scratch_bytes_per_wave);
6149 shader->info.uses_instanceid |=
6150 shader->previous_stage->info.uses_instanceid;
6151 }
6152 if (shader->prolog2) {
6153 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6154 shader->prolog2->config.num_sgprs);
6155 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6156 shader->prolog2->config.num_vgprs);
6157 }
6158 if (shader->epilog) {
6159 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6160 shader->epilog->config.num_sgprs);
6161 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6162 shader->epilog->config.num_vgprs);
6163 }
6164 si_calculate_max_simd_waves(shader);
6165 }
6166
6167 if (shader->key.as_ngg) {
6168 assert(!shader->key.as_es && !shader->key.as_ls);
6169 gfx10_ngg_calculate_subgroup_info(shader);
6170 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
6171 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
6172 }
6173
6174 si_fix_resource_usage(sscreen, shader);
6175 si_shader_dump(sscreen, shader, debug, stderr, true);
6176
6177 /* Upload. */
6178 if (!si_shader_binary_upload(sscreen, shader, 0)) {
6179 fprintf(stderr, "LLVM failed to upload shader\n");
6180 return false;
6181 }
6182
6183 return true;
6184 }
6185
6186 void si_shader_destroy(struct si_shader *shader)
6187 {
6188 if (shader->scratch_bo)
6189 si_resource_reference(&shader->scratch_bo, NULL);
6190
6191 si_resource_reference(&shader->bo, NULL);
6192
6193 if (!shader->is_binary_shared)
6194 si_shader_binary_clean(&shader->binary);
6195
6196 free(shader->shader_log);
6197 }