radeonsi: move si_insert_input_* functions
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <llvm/Config/llvm-config.h>
26
27 #include "util/u_memory.h"
28 #include "tgsi/tgsi_strings.h"
29 #include "tgsi/tgsi_from_mesa.h"
30
31 #include "ac_exp_param.h"
32 #include "ac_shader_util.h"
33 #include "ac_rtld.h"
34 #include "ac_llvm_util.h"
35 #include "si_shader_internal.h"
36 #include "si_pipe.h"
37 #include "sid.h"
38
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_serialize.h"
41
42 static const char scratch_rsrc_dword0_symbol[] =
43 "SCRATCH_RSRC_DWORD0";
44
45 static const char scratch_rsrc_dword1_symbol[] =
46 "SCRATCH_RSRC_DWORD1";
47
48 static void si_llvm_emit_barrier(struct si_shader_context *ctx);
49
50 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
51
52 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
53 union si_shader_part_key *key);
54 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
55 union si_shader_part_key *key);
56 static void si_fix_resource_usage(struct si_screen *sscreen,
57 struct si_shader *shader);
58
59 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
60 LLVMTypeRef type)
61 {
62 if (type == ctx->ac.i64 || type == ctx->ac.f64)
63 return true;
64
65 return false;
66 }
67
68 /** Whether the shader runs as a combination of multiple API shaders */
69 static bool is_multi_part_shader(struct si_shader_context *ctx)
70 {
71 if (ctx->screen->info.chip_class <= GFX8)
72 return false;
73
74 return ctx->shader->key.as_ls ||
75 ctx->shader->key.as_es ||
76 ctx->type == PIPE_SHADER_TESS_CTRL ||
77 ctx->type == PIPE_SHADER_GEOMETRY;
78 }
79
80 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
81 bool si_is_merged_shader(struct si_shader_context *ctx)
82 {
83 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
84 }
85
86 /**
87 * Returns a unique index for a per-patch semantic name and index. The index
88 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
89 * can be calculated.
90 */
91 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
92 {
93 switch (semantic_name) {
94 case TGSI_SEMANTIC_TESSOUTER:
95 return 0;
96 case TGSI_SEMANTIC_TESSINNER:
97 return 1;
98 case TGSI_SEMANTIC_PATCH:
99 assert(index < 30);
100 return 2 + index;
101
102 default:
103 assert(!"invalid semantic name");
104 return 0;
105 }
106 }
107
108 /**
109 * Returns a unique index for a semantic name and index. The index must be
110 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
111 * calculated.
112 */
113 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
114 unsigned is_varying)
115 {
116 switch (semantic_name) {
117 case TGSI_SEMANTIC_POSITION:
118 return 0;
119 case TGSI_SEMANTIC_GENERIC:
120 /* Since some shader stages use the the highest used IO index
121 * to determine the size to allocate for inputs/outputs
122 * (in LDS, tess and GS rings). GENERIC should be placed right
123 * after POSITION to make that size as small as possible.
124 */
125 if (index < SI_MAX_IO_GENERIC)
126 return 1 + index;
127
128 assert(!"invalid generic index");
129 return 0;
130 case TGSI_SEMANTIC_FOG:
131 return SI_MAX_IO_GENERIC + 1;
132 case TGSI_SEMANTIC_COLOR:
133 assert(index < 2);
134 return SI_MAX_IO_GENERIC + 2 + index;
135 case TGSI_SEMANTIC_BCOLOR:
136 assert(index < 2);
137 /* If it's a varying, COLOR and BCOLOR alias. */
138 if (is_varying)
139 return SI_MAX_IO_GENERIC + 2 + index;
140 else
141 return SI_MAX_IO_GENERIC + 4 + index;
142 case TGSI_SEMANTIC_TEXCOORD:
143 assert(index < 8);
144 return SI_MAX_IO_GENERIC + 6 + index;
145
146 /* These are rarely used between LS and HS or ES and GS. */
147 case TGSI_SEMANTIC_CLIPDIST:
148 assert(index < 2);
149 return SI_MAX_IO_GENERIC + 6 + 8 + index;
150 case TGSI_SEMANTIC_CLIPVERTEX:
151 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
152 case TGSI_SEMANTIC_PSIZE:
153 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
154
155 /* These can't be written by LS, HS, and ES. */
156 case TGSI_SEMANTIC_LAYER:
157 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
158 case TGSI_SEMANTIC_VIEWPORT_INDEX:
159 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
160 case TGSI_SEMANTIC_PRIMID:
161 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
162 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
163 default:
164 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
165 assert(!"invalid semantic name");
166 return 0;
167 }
168 }
169
170 /**
171 * Get the value of a shader input parameter and extract a bitfield.
172 */
173 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
174 LLVMValueRef value, unsigned rshift,
175 unsigned bitwidth)
176 {
177 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
178 value = ac_to_integer(&ctx->ac, value);
179
180 if (rshift)
181 value = LLVMBuildLShr(ctx->ac.builder, value,
182 LLVMConstInt(ctx->i32, rshift, 0), "");
183
184 if (rshift + bitwidth < 32) {
185 unsigned mask = (1 << bitwidth) - 1;
186 value = LLVMBuildAnd(ctx->ac.builder, value,
187 LLVMConstInt(ctx->i32, mask, 0), "");
188 }
189
190 return value;
191 }
192
193 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
194 struct ac_arg param, unsigned rshift,
195 unsigned bitwidth)
196 {
197 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
198
199 return unpack_llvm_param(ctx, value, rshift, bitwidth);
200 }
201
202 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
203 {
204 switch (ctx->type) {
205 case PIPE_SHADER_TESS_CTRL:
206 return si_unpack_param(ctx, ctx->args.tcs_rel_ids, 0, 8);
207
208 case PIPE_SHADER_TESS_EVAL:
209 return ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id);
210
211 default:
212 assert(0);
213 return NULL;
214 }
215 }
216
217 /* Tessellation shaders pass outputs to the next shader using LDS.
218 *
219 * LS outputs = TCS inputs
220 * TCS outputs = TES inputs
221 *
222 * The LDS layout is:
223 * - TCS inputs for patch 0
224 * - TCS inputs for patch 1
225 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
226 * - ...
227 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
228 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
229 * - TCS outputs for patch 1
230 * - Per-patch TCS outputs for patch 1
231 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
232 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
233 * - ...
234 *
235 * All three shaders VS(LS), TCS, TES share the same LDS space.
236 */
237
238 static LLVMValueRef
239 get_tcs_in_patch_stride(struct si_shader_context *ctx)
240 {
241 return si_unpack_param(ctx, ctx->vs_state_bits, 11, 13);
242 }
243
244 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
245 {
246 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
247
248 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
249 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
250
251 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
252 }
253
254 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
255 {
256 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
257
258 return LLVMConstInt(ctx->i32, stride, 0);
259 }
260
261 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
262 {
263 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
264 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 0, 13);
265
266 const struct si_shader_info *info = &ctx->shader->selector->info;
267 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
268 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
269 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
270 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
271 num_patch_outputs * 4;
272 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
273 }
274
275 static LLVMValueRef
276 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
277 {
278 return LLVMBuildMul(ctx->ac.builder,
279 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 0, 16),
280 LLVMConstInt(ctx->i32, 4, 0), "");
281 }
282
283 static LLVMValueRef
284 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
285 {
286 return LLVMBuildMul(ctx->ac.builder,
287 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 16, 16),
288 LLVMConstInt(ctx->i32, 4, 0), "");
289 }
290
291 static LLVMValueRef
292 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
293 {
294 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
295 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
296
297 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
298 }
299
300 static LLVMValueRef
301 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
302 {
303 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_offset);
308 }
309
310 static LLVMValueRef
311 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
312 {
313 LLVMValueRef patch0_patch_data_offset =
314 get_tcs_out_patch0_patch_data_offset(ctx);
315 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
316 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
317
318 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_patch_data_offset);
319 }
320
321 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
322 {
323 unsigned tcs_out_vertices =
324 ctx->shader->selector ?
325 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
326
327 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
328 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
329 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
330
331 return si_unpack_param(ctx, ctx->tcs_offchip_layout, 6, 6);
332 }
333
334 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
335 {
336 unsigned stride;
337
338 switch (ctx->type) {
339 case PIPE_SHADER_VERTEX:
340 stride = ctx->shader->selector->lshs_vertex_stride / 4;
341 return LLVMConstInt(ctx->i32, stride, 0);
342
343 case PIPE_SHADER_TESS_CTRL:
344 if (ctx->screen->info.chip_class >= GFX9 &&
345 ctx->shader->is_monolithic) {
346 stride = ctx->shader->key.part.tcs.ls->lshs_vertex_stride / 4;
347 return LLVMConstInt(ctx->i32, stride, 0);
348 }
349 return si_unpack_param(ctx, ctx->vs_state_bits, 24, 8);
350
351 default:
352 assert(0);
353 return NULL;
354 }
355 }
356
357 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
358 LLVMValueRef i32, unsigned index)
359 {
360 assert(index <= 1);
361
362 if (index == 1)
363 return LLVMBuildAShr(ctx->ac.builder, i32,
364 LLVMConstInt(ctx->i32, 16, 0), "");
365
366 return LLVMBuildSExt(ctx->ac.builder,
367 LLVMBuildTrunc(ctx->ac.builder, i32,
368 ctx->ac.i16, ""),
369 ctx->i32, "");
370 }
371
372 void si_llvm_load_input_vs(
373 struct si_shader_context *ctx,
374 unsigned input_index,
375 LLVMValueRef out[4])
376 {
377 const struct si_shader_info *info = &ctx->shader->selector->info;
378 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
379
380 if (vs_blit_property) {
381 LLVMValueRef vertex_id = ctx->abi.vertex_id;
382 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
383 LLVMIntULE, vertex_id,
384 ctx->i32_1, "");
385 /* Use LLVMIntNE, because we have 3 vertices and only
386 * the middle one should use y2.
387 */
388 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
389 LLVMIntNE, vertex_id,
390 ctx->i32_1, "");
391
392 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
393 if (input_index == 0) {
394 /* Position: */
395 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
396 param_vs_blit_inputs);
397 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
398 param_vs_blit_inputs + 1);
399
400 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
401 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
402 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
403 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
404
405 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
406 x1, x2, "");
407 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
408 y1, y2, "");
409
410 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
411 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
412 out[2] = LLVMGetParam(ctx->main_fn,
413 param_vs_blit_inputs + 2);
414 out[3] = ctx->ac.f32_1;
415 return;
416 }
417
418 /* Color or texture coordinates: */
419 assert(input_index == 1);
420
421 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
422 for (int i = 0; i < 4; i++) {
423 out[i] = LLVMGetParam(ctx->main_fn,
424 param_vs_blit_inputs + 3 + i);
425 }
426 } else {
427 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
428 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
429 param_vs_blit_inputs + 3);
430 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
431 param_vs_blit_inputs + 4);
432 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
433 param_vs_blit_inputs + 5);
434 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
435 param_vs_blit_inputs + 6);
436
437 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
438 x1, x2, "");
439 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
440 y1, y2, "");
441 out[2] = LLVMGetParam(ctx->main_fn,
442 param_vs_blit_inputs + 7);
443 out[3] = LLVMGetParam(ctx->main_fn,
444 param_vs_blit_inputs + 8);
445 }
446 return;
447 }
448
449 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
450 union si_vs_fix_fetch fix_fetch;
451 LLVMValueRef vb_desc;
452 LLVMValueRef vertex_index;
453 LLVMValueRef tmp;
454
455 if (input_index < num_vbos_in_user_sgprs) {
456 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
457 } else {
458 unsigned index= input_index - num_vbos_in_user_sgprs;
459 vb_desc = ac_build_load_to_sgpr(&ctx->ac,
460 ac_get_arg(&ctx->ac, ctx->vertex_buffers),
461 LLVMConstInt(ctx->i32, index, 0));
462 }
463
464 vertex_index = LLVMGetParam(ctx->main_fn,
465 ctx->vertex_index0.arg_index +
466 input_index);
467
468 /* Use the open-coded implementation for all loads of doubles and
469 * of dword-sized data that needs fixups. We need to insert conversion
470 * code anyway, and the amd/common code does it for us.
471 *
472 * Note: On LLVM <= 8, we can only open-code formats with
473 * channel size >= 4 bytes.
474 */
475 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
476 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
477 if (opencode ||
478 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
479 (fix_fetch.u.log_size == 2)) {
480 tmp = ac_build_opencoded_load_format(
481 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
482 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
483 vb_desc, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
484 for (unsigned i = 0; i < 4; ++i)
485 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
486 return;
487 }
488
489 /* Do multiple loads for special formats. */
490 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
491 LLVMValueRef fetches[4];
492 unsigned num_fetches;
493 unsigned fetch_stride;
494 unsigned channels_per_fetch;
495
496 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
497 num_fetches = MIN2(required_channels, 3);
498 fetch_stride = 1 << fix_fetch.u.log_size;
499 channels_per_fetch = 1;
500 } else {
501 num_fetches = 1;
502 fetch_stride = 0;
503 channels_per_fetch = required_channels;
504 }
505
506 for (unsigned i = 0; i < num_fetches; ++i) {
507 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
508 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
509 channels_per_fetch, 0, true);
510 }
511
512 if (num_fetches == 1 && channels_per_fetch > 1) {
513 LLVMValueRef fetch = fetches[0];
514 for (unsigned i = 0; i < channels_per_fetch; ++i) {
515 tmp = LLVMConstInt(ctx->i32, i, false);
516 fetches[i] = LLVMBuildExtractElement(
517 ctx->ac.builder, fetch, tmp, "");
518 }
519 num_fetches = channels_per_fetch;
520 channels_per_fetch = 1;
521 }
522
523 for (unsigned i = num_fetches; i < 4; ++i)
524 fetches[i] = LLVMGetUndef(ctx->f32);
525
526 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
527 required_channels == 4) {
528 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
529 fetches[3] = ctx->ac.i32_1;
530 else
531 fetches[3] = ctx->ac.f32_1;
532 } else if (fix_fetch.u.log_size == 3 &&
533 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
534 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
535 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
536 required_channels == 4) {
537 /* For 2_10_10_10, the hardware returns an unsigned value;
538 * convert it to a signed one.
539 */
540 LLVMValueRef tmp = fetches[3];
541 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
542
543 /* First, recover the sign-extended signed integer value. */
544 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
545 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
546 else
547 tmp = ac_to_integer(&ctx->ac, tmp);
548
549 /* For the integer-like cases, do a natural sign extension.
550 *
551 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
552 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
553 * exponent.
554 */
555 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
556 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
557 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
558 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
559
560 /* Convert back to the right type. */
561 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
562 LLVMValueRef clamp;
563 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
564 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
565 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
566 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
567 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
568 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
569 }
570
571 fetches[3] = tmp;
572 }
573
574 for (unsigned i = 0; i < 4; ++i)
575 out[i] = ac_to_float(&ctx->ac, fetches[i]);
576 }
577
578 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
579 unsigned swizzle)
580 {
581 if (swizzle > 0)
582 return ctx->i32_0;
583
584 switch (ctx->type) {
585 case PIPE_SHADER_VERTEX:
586 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
587 case PIPE_SHADER_TESS_CTRL:
588 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
589 case PIPE_SHADER_TESS_EVAL:
590 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
591 case PIPE_SHADER_GEOMETRY:
592 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
593 default:
594 assert(0);
595 return ctx->i32_0;
596 }
597 }
598
599 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
600 LLVMValueRef vertex_dw_stride,
601 LLVMValueRef base_addr,
602 LLVMValueRef vertex_index,
603 LLVMValueRef param_index,
604 ubyte name, ubyte index)
605 {
606 if (vertex_dw_stride) {
607 base_addr = ac_build_imad(&ctx->ac, vertex_index,
608 vertex_dw_stride, base_addr);
609 }
610
611 if (param_index) {
612 base_addr = ac_build_imad(&ctx->ac, param_index,
613 LLVMConstInt(ctx->i32, 4, 0), base_addr);
614 }
615
616 int param = name == TGSI_SEMANTIC_PATCH ||
617 name == TGSI_SEMANTIC_TESSINNER ||
618 name == TGSI_SEMANTIC_TESSOUTER ?
619 si_shader_io_get_unique_index_patch(name, index) :
620 si_shader_io_get_unique_index(name, index, false);
621
622 /* Add the base address of the element. */
623 return LLVMBuildAdd(ctx->ac.builder, base_addr,
624 LLVMConstInt(ctx->i32, param * 4, 0), "");
625 }
626
627 /* The offchip buffer layout for TCS->TES is
628 *
629 * - attribute 0 of patch 0 vertex 0
630 * - attribute 0 of patch 0 vertex 1
631 * - attribute 0 of patch 0 vertex 2
632 * ...
633 * - attribute 0 of patch 1 vertex 0
634 * - attribute 0 of patch 1 vertex 1
635 * ...
636 * - attribute 1 of patch 0 vertex 0
637 * - attribute 1 of patch 0 vertex 1
638 * ...
639 * - per patch attribute 0 of patch 0
640 * - per patch attribute 0 of patch 1
641 * ...
642 *
643 * Note that every attribute has 4 components.
644 */
645 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
646 LLVMValueRef rel_patch_id,
647 LLVMValueRef vertex_index,
648 LLVMValueRef param_index)
649 {
650 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
651 LLVMValueRef param_stride, constant16;
652
653 vertices_per_patch = get_num_tcs_out_vertices(ctx);
654 num_patches = si_unpack_param(ctx, ctx->tcs_offchip_layout, 0, 6);
655 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
656 num_patches, "");
657
658 constant16 = LLVMConstInt(ctx->i32, 16, 0);
659 if (vertex_index) {
660 base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
661 vertices_per_patch, vertex_index);
662 param_stride = total_vertices;
663 } else {
664 base_addr = rel_patch_id;
665 param_stride = num_patches;
666 }
667
668 base_addr = ac_build_imad(&ctx->ac, param_index, param_stride, base_addr);
669 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
670
671 if (!vertex_index) {
672 LLVMValueRef patch_data_offset =
673 si_unpack_param(ctx, ctx->tcs_offchip_layout, 12, 20);
674
675 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
676 patch_data_offset, "");
677 }
678 return base_addr;
679 }
680
681 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
682 struct si_shader_context *ctx,
683 LLVMValueRef vertex_index,
684 LLVMValueRef param_index,
685 ubyte name, ubyte index)
686 {
687 unsigned param_index_base;
688
689 param_index_base = name == TGSI_SEMANTIC_PATCH ||
690 name == TGSI_SEMANTIC_TESSINNER ||
691 name == TGSI_SEMANTIC_TESSOUTER ?
692 si_shader_io_get_unique_index_patch(name, index) :
693 si_shader_io_get_unique_index(name, index, false);
694
695 if (param_index) {
696 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
697 LLVMConstInt(ctx->i32, param_index_base, 0),
698 "");
699 } else {
700 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
701 }
702
703 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
704 vertex_index, param_index);
705 }
706
707 static LLVMValueRef si_build_gather_64bit(struct si_shader_context *ctx,
708 LLVMTypeRef type,
709 LLVMValueRef val1,
710 LLVMValueRef val2)
711 {
712 LLVMValueRef values[2] = {
713 ac_to_integer(&ctx->ac, val1),
714 ac_to_integer(&ctx->ac, val2),
715 };
716 LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, 2);
717 return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
718 }
719
720 static LLVMValueRef buffer_load(struct si_shader_context *ctx,
721 LLVMTypeRef type, unsigned swizzle,
722 LLVMValueRef buffer, LLVMValueRef offset,
723 LLVMValueRef base, bool can_speculate)
724 {
725 LLVMValueRef value, value2;
726 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
727
728 if (swizzle == ~0) {
729 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
730 0, ac_glc, can_speculate, false);
731
732 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
733 }
734
735 if (!llvm_type_is_64bit(ctx, type)) {
736 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
737 0, ac_glc, can_speculate, false);
738
739 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
740 return LLVMBuildExtractElement(ctx->ac.builder, value,
741 LLVMConstInt(ctx->i32, swizzle, 0), "");
742 }
743
744 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
745 swizzle * 4, ac_glc, can_speculate, false);
746
747 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
748 swizzle * 4 + 4, ac_glc, can_speculate, false);
749
750 return si_build_gather_64bit(ctx, type, value, value2);
751 }
752
753 /**
754 * Load from LSHS LDS storage.
755 *
756 * \param type output value type
757 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
758 * \param dw_addr address in dwords
759 */
760 static LLVMValueRef lshs_lds_load(struct si_shader_context *ctx,
761 LLVMTypeRef type, unsigned swizzle,
762 LLVMValueRef dw_addr)
763 {
764 LLVMValueRef value;
765
766 if (swizzle == ~0) {
767 LLVMValueRef values[4];
768
769 for (unsigned chan = 0; chan < 4; chan++)
770 values[chan] = lshs_lds_load(ctx, type, chan, dw_addr);
771
772 return ac_build_gather_values(&ctx->ac, values, 4);
773 }
774
775 /* Split 64-bit loads. */
776 if (llvm_type_is_64bit(ctx, type)) {
777 LLVMValueRef lo, hi;
778
779 lo = lshs_lds_load(ctx, ctx->i32, swizzle, dw_addr);
780 hi = lshs_lds_load(ctx, ctx->i32, swizzle + 1, dw_addr);
781 return si_build_gather_64bit(ctx, type, lo, hi);
782 }
783
784 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
785 LLVMConstInt(ctx->i32, swizzle, 0), "");
786
787 value = ac_lds_load(&ctx->ac, dw_addr);
788
789 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
790 }
791
792 /**
793 * Store to LSHS LDS storage.
794 *
795 * \param swizzle offset (typically 0..3)
796 * \param dw_addr address in dwords
797 * \param value value to store
798 */
799 static void lshs_lds_store(struct si_shader_context *ctx,
800 unsigned dw_offset_imm, LLVMValueRef dw_addr,
801 LLVMValueRef value)
802 {
803 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
804 LLVMConstInt(ctx->i32, dw_offset_imm, 0), "");
805
806 ac_lds_store(&ctx->ac, dw_addr, value);
807 }
808
809 enum si_tess_ring {
810 TCS_FACTOR_RING,
811 TESS_OFFCHIP_RING_TCS,
812 TESS_OFFCHIP_RING_TES,
813 };
814
815 static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx,
816 enum si_tess_ring ring)
817 {
818 LLVMBuilderRef builder = ctx->ac.builder;
819 LLVMValueRef addr = ac_get_arg(&ctx->ac,
820 ring == TESS_OFFCHIP_RING_TES ?
821 ctx->tes_offchip_addr :
822 ctx->tcs_out_lds_layout);
823
824 /* TCS only receives high 13 bits of the address. */
825 if (ring == TESS_OFFCHIP_RING_TCS || ring == TCS_FACTOR_RING) {
826 addr = LLVMBuildAnd(builder, addr,
827 LLVMConstInt(ctx->i32, 0xfff80000, 0), "");
828 }
829
830 if (ring == TCS_FACTOR_RING) {
831 unsigned tf_offset = ctx->screen->tess_offchip_ring_size;
832 addr = LLVMBuildAdd(builder, addr,
833 LLVMConstInt(ctx->i32, tf_offset, 0), "");
834 }
835
836 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
837 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
838 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
839 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
840
841 if (ctx->screen->info.chip_class >= GFX10)
842 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
843 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
844 S_008F0C_RESOURCE_LEVEL(1);
845 else
846 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
847 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
848
849 LLVMValueRef desc[4];
850 desc[0] = addr;
851 desc[1] = LLVMConstInt(ctx->i32,
852 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
853 desc[2] = LLVMConstInt(ctx->i32, 0xffffffff, 0);
854 desc[3] = LLVMConstInt(ctx->i32, rsrc3, false);
855
856 return ac_build_gather_values(&ctx->ac, desc, 4);
857 }
858
859 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
860 LLVMTypeRef type,
861 LLVMValueRef vertex_index,
862 LLVMValueRef param_index,
863 unsigned const_index,
864 unsigned location,
865 unsigned driver_location,
866 unsigned component,
867 unsigned num_components,
868 bool is_patch,
869 bool is_compact,
870 bool load_input)
871 {
872 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
873 struct si_shader_info *info = &ctx->shader->selector->info;
874 LLVMValueRef dw_addr, stride;
875 ubyte name, index;
876
877 driver_location = driver_location / 4;
878
879 if (load_input) {
880 name = info->input_semantic_name[driver_location];
881 index = info->input_semantic_index[driver_location];
882 } else {
883 name = info->output_semantic_name[driver_location];
884 index = info->output_semantic_index[driver_location];
885 }
886
887 assert((name == TGSI_SEMANTIC_PATCH ||
888 name == TGSI_SEMANTIC_TESSINNER ||
889 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
890
891 if (load_input) {
892 stride = get_tcs_in_vertex_dw_stride(ctx);
893 dw_addr = get_tcs_in_current_patch_offset(ctx);
894 } else {
895 if (is_patch) {
896 stride = NULL;
897 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
898 } else {
899 stride = get_tcs_out_vertex_dw_stride(ctx);
900 dw_addr = get_tcs_out_current_patch_offset(ctx);
901 }
902 }
903
904 if (!param_index) {
905 param_index = LLVMConstInt(ctx->i32, const_index, 0);
906 }
907
908 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
909 vertex_index, param_index,
910 name, index);
911
912 LLVMValueRef value[4];
913 for (unsigned i = 0; i < num_components; i++) {
914 unsigned offset = i;
915 if (llvm_type_is_64bit(ctx, type))
916 offset *= 2;
917
918 offset += component;
919 value[i + component] = lshs_lds_load(ctx, type, offset, dw_addr);
920 }
921
922 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
923 }
924
925 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
926 LLVMTypeRef type,
927 LLVMValueRef vertex_index,
928 LLVMValueRef param_index,
929 unsigned const_index,
930 unsigned location,
931 unsigned driver_location,
932 unsigned component,
933 unsigned num_components,
934 bool is_patch,
935 bool is_compact,
936 bool load_input)
937 {
938 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
939 struct si_shader_info *info = &ctx->shader->selector->info;
940 LLVMValueRef base, addr;
941
942 driver_location = driver_location / 4;
943 ubyte name = info->input_semantic_name[driver_location];
944 ubyte index = info->input_semantic_index[driver_location];
945
946 assert((name == TGSI_SEMANTIC_PATCH ||
947 name == TGSI_SEMANTIC_TESSINNER ||
948 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
949
950 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
951
952 if (!param_index) {
953 param_index = LLVMConstInt(ctx->i32, const_index, 0);
954 }
955
956 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
957 param_index,
958 name, index);
959
960 /* TODO: This will generate rather ordinary llvm code, although it
961 * should be easy for the optimiser to fix up. In future we might want
962 * to refactor buffer_load().
963 */
964 LLVMValueRef value[4];
965 for (unsigned i = 0; i < num_components; i++) {
966 unsigned offset = i;
967 if (llvm_type_is_64bit(ctx, type)) {
968 offset *= 2;
969 if (offset == 4) {
970 ubyte name = info->input_semantic_name[driver_location + 1];
971 ubyte index = info->input_semantic_index[driver_location + 1];
972 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
973 vertex_index,
974 param_index,
975 name, index);
976 }
977
978 offset = offset % 4;
979 }
980
981 offset += component;
982 value[i + component] = buffer_load(ctx, type, offset,
983 ctx->tess_offchip_ring, base, addr, true);
984 }
985
986 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
987 }
988
989 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
990 const struct nir_variable *var,
991 LLVMValueRef vertex_index,
992 LLVMValueRef param_index,
993 unsigned const_index,
994 LLVMValueRef src,
995 unsigned writemask)
996 {
997 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
998 struct si_shader_info *info = &ctx->shader->selector->info;
999 const unsigned component = var->data.location_frac;
1000 unsigned driver_location = var->data.driver_location;
1001 LLVMValueRef dw_addr, stride;
1002 LLVMValueRef buffer, base, addr;
1003 LLVMValueRef values[8];
1004 bool skip_lds_store;
1005 bool is_tess_factor = false, is_tess_inner = false;
1006
1007 driver_location = driver_location / 4;
1008 ubyte name = info->output_semantic_name[driver_location];
1009 ubyte index = info->output_semantic_index[driver_location];
1010
1011 bool is_const = !param_index;
1012 if (!param_index)
1013 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1014
1015 const bool is_patch = var->data.patch ||
1016 var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
1017 var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER;
1018
1019 assert((name == TGSI_SEMANTIC_PATCH ||
1020 name == TGSI_SEMANTIC_TESSINNER ||
1021 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
1022
1023 if (!is_patch) {
1024 stride = get_tcs_out_vertex_dw_stride(ctx);
1025 dw_addr = get_tcs_out_current_patch_offset(ctx);
1026 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1027 vertex_index, param_index,
1028 name, index);
1029
1030 skip_lds_store = !info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1034 vertex_index, param_index,
1035 name, index);
1036
1037 skip_lds_store = !info->reads_perpatch_outputs;
1038
1039 if (is_const && const_index == 0) {
1040 int name = info->output_semantic_name[driver_location];
1041
1042 /* Always write tess factors into LDS for the TCS epilog. */
1043 if (name == TGSI_SEMANTIC_TESSINNER ||
1044 name == TGSI_SEMANTIC_TESSOUTER) {
1045 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1046 skip_lds_store = !info->reads_tessfactor_outputs &&
1047 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs;
1048 is_tess_factor = true;
1049 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1050 }
1051 }
1052 }
1053
1054 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1055
1056 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1057
1058 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1059 param_index, name, index);
1060
1061 for (unsigned chan = component; chan < 8; chan++) {
1062 if (!(writemask & (1 << chan)))
1063 continue;
1064 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1065
1066 unsigned buffer_store_offset = chan % 4;
1067 if (chan == 4) {
1068 ubyte name = info->output_semantic_name[driver_location + 1];
1069 ubyte index = info->output_semantic_index[driver_location + 1];
1070 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
1071 vertex_index,
1072 param_index,
1073 name, index);
1074 }
1075
1076 /* Skip LDS stores if there is no LDS read of this output. */
1077 if (!skip_lds_store)
1078 lshs_lds_store(ctx, chan, dw_addr, value);
1079
1080 value = ac_to_integer(&ctx->ac, value);
1081 values[chan] = value;
1082
1083 if (writemask != 0xF && !is_tess_factor) {
1084 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1085 addr, base,
1086 4 * buffer_store_offset,
1087 ac_glc);
1088 }
1089
1090 /* Write tess factors into VGPRs for the epilog. */
1091 if (is_tess_factor &&
1092 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
1093 if (!is_tess_inner) {
1094 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1095 ctx->invoc0_tess_factors[chan]);
1096 } else if (chan < 2) {
1097 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1098 ctx->invoc0_tess_factors[4 + chan]);
1099 }
1100 }
1101 }
1102
1103 if (writemask == 0xF && !is_tess_factor) {
1104 LLVMValueRef value = ac_build_gather_values(&ctx->ac,
1105 values, 4);
1106 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1107 base, 0, ac_glc);
1108 }
1109 }
1110
1111 static LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1112 unsigned input_index,
1113 unsigned vtx_offset_param,
1114 LLVMTypeRef type,
1115 unsigned swizzle)
1116 {
1117 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1118 struct si_shader *shader = ctx->shader;
1119 LLVMValueRef vtx_offset, soffset;
1120 struct si_shader_info *info = &shader->selector->info;
1121 unsigned semantic_name = info->input_semantic_name[input_index];
1122 unsigned semantic_index = info->input_semantic_index[input_index];
1123 unsigned param;
1124 LLVMValueRef value;
1125
1126 param = si_shader_io_get_unique_index(semantic_name, semantic_index, false);
1127
1128 /* GFX9 has the ESGS ring in LDS. */
1129 if (ctx->screen->info.chip_class >= GFX9) {
1130 unsigned index = vtx_offset_param;
1131
1132 switch (index / 2) {
1133 case 0:
1134 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx01_offset,
1135 index % 2 ? 16 : 0, 16);
1136 break;
1137 case 1:
1138 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx23_offset,
1139 index % 2 ? 16 : 0, 16);
1140 break;
1141 case 2:
1142 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx45_offset,
1143 index % 2 ? 16 : 0, 16);
1144 break;
1145 default:
1146 assert(0);
1147 return NULL;
1148 }
1149
1150 unsigned offset = param * 4 + swizzle;
1151 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1152 LLVMConstInt(ctx->i32, offset, false), "");
1153
1154 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->esgs_ring, vtx_offset);
1155 LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, ptr, "");
1156 if (llvm_type_is_64bit(ctx, type)) {
1157 ptr = LLVMBuildGEP(ctx->ac.builder, ptr,
1158 &ctx->ac.i32_1, 1, "");
1159 LLVMValueRef values[2] = {
1160 value,
1161 LLVMBuildLoad(ctx->ac.builder, ptr, "")
1162 };
1163 value = ac_build_gather_values(&ctx->ac, values, 2);
1164 }
1165 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1166 }
1167
1168 /* GFX6: input load from the ESGS ring in memory. */
1169 if (swizzle == ~0) {
1170 LLVMValueRef values[4];
1171 unsigned chan;
1172 for (chan = 0; chan < 4; chan++) {
1173 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1174 type, chan);
1175 }
1176 return ac_build_gather_values(&ctx->ac, values, 4);
1177 }
1178
1179 /* Get the vertex offset parameter on GFX6. */
1180 LLVMValueRef gs_vtx_offset = ac_get_arg(&ctx->ac,
1181 ctx->gs_vtx_offset[vtx_offset_param]);
1182
1183 vtx_offset = LLVMBuildMul(ctx->ac.builder, gs_vtx_offset,
1184 LLVMConstInt(ctx->i32, 4, 0), "");
1185
1186 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1187
1188 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1189 vtx_offset, soffset, 0, ac_glc, true, false);
1190 if (llvm_type_is_64bit(ctx, type)) {
1191 LLVMValueRef value2;
1192 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1193
1194 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1195 ctx->i32_0, vtx_offset, soffset,
1196 0, ac_glc, true, false);
1197 return si_build_gather_64bit(ctx, type, value, value2);
1198 }
1199 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1200 }
1201
1202 static LLVMValueRef si_nir_load_input_gs(struct ac_shader_abi *abi,
1203 unsigned location,
1204 unsigned driver_location,
1205 unsigned component,
1206 unsigned num_components,
1207 unsigned vertex_index,
1208 unsigned const_index,
1209 LLVMTypeRef type)
1210 {
1211 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1212
1213 LLVMValueRef value[4];
1214 for (unsigned i = 0; i < num_components; i++) {
1215 unsigned offset = i;
1216 if (llvm_type_is_64bit(ctx, type))
1217 offset *= 2;
1218
1219 offset += component;
1220 value[i + component] = si_llvm_load_input_gs(&ctx->abi, driver_location / 4 + const_index,
1221 vertex_index, type, offset);
1222 }
1223
1224 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1225 }
1226
1227 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
1228 {
1229 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1230
1231 /* For non-indexed draws, the base vertex set by the driver
1232 * (for direct draws) or the CP (for indirect draws) is the
1233 * first vertex ID, but GLSL expects 0 to be returned.
1234 */
1235 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
1236 ctx->vs_state_bits);
1237 LLVMValueRef indexed;
1238
1239 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
1240 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
1241
1242 return LLVMBuildSelect(ctx->ac.builder, indexed,
1243 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
1244 ctx->i32_0, "");
1245 }
1246
1247 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
1248 {
1249 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1250
1251 LLVMValueRef values[3];
1252 LLVMValueRef result;
1253 unsigned i;
1254 unsigned *properties = ctx->shader->selector->info.properties;
1255
1256 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1257 unsigned sizes[3] = {
1258 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1259 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1260 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1261 };
1262
1263 for (i = 0; i < 3; ++i)
1264 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1265
1266 result = ac_build_gather_values(&ctx->ac, values, 3);
1267 } else {
1268 result = ac_get_arg(&ctx->ac, ctx->block_size);
1269 }
1270
1271 return result;
1272 }
1273
1274 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi)
1275 {
1276 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1277 LLVMValueRef coord[4] = {
1278 ac_get_arg(&ctx->ac, ctx->tes_u),
1279 ac_get_arg(&ctx->ac, ctx->tes_v),
1280 ctx->ac.f32_0,
1281 ctx->ac.f32_0
1282 };
1283
1284 /* For triangles, the vector should be (u, v, 1-u-v). */
1285 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1286 PIPE_PRIM_TRIANGLES) {
1287 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1288 LLVMBuildFAdd(ctx->ac.builder,
1289 coord[0], coord[1], ""), "");
1290 }
1291 return ac_build_gather_values(&ctx->ac, coord, 4);
1292 }
1293
1294 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1295 unsigned semantic_name)
1296 {
1297 LLVMValueRef base, addr;
1298
1299 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1300
1301 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1302 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1303 LLVMConstInt(ctx->i32, param, 0));
1304
1305 return buffer_load(ctx, ctx->f32,
1306 ~0, ctx->tess_offchip_ring, base, addr, true);
1307
1308 }
1309
1310 static LLVMValueRef load_tess_level_default(struct si_shader_context *ctx,
1311 unsigned semantic_name)
1312 {
1313 LLVMValueRef buf, slot, val[4];
1314 int i, offset;
1315
1316 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1317 buf = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1318 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
1319 offset = semantic_name == TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL ? 4 : 0;
1320
1321 for (i = 0; i < 4; i++)
1322 val[i] = si_buffer_load_const(ctx, buf,
1323 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1324 return ac_build_gather_values(&ctx->ac, val, 4);
1325 }
1326
1327 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1328 unsigned varying_id,
1329 bool load_default_state)
1330 {
1331 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1332 unsigned semantic_name;
1333
1334 if (load_default_state) {
1335 switch (varying_id) {
1336 case VARYING_SLOT_TESS_LEVEL_INNER:
1337 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL;
1338 break;
1339 case VARYING_SLOT_TESS_LEVEL_OUTER:
1340 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL;
1341 break;
1342 default:
1343 unreachable("unknown tess level");
1344 }
1345 return load_tess_level_default(ctx, semantic_name);
1346 }
1347
1348 switch (varying_id) {
1349 case VARYING_SLOT_TESS_LEVEL_INNER:
1350 semantic_name = TGSI_SEMANTIC_TESSINNER;
1351 break;
1352 case VARYING_SLOT_TESS_LEVEL_OUTER:
1353 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1354 break;
1355 default:
1356 unreachable("unknown tess level");
1357 }
1358
1359 return load_tess_level(ctx, semantic_name);
1360
1361 }
1362
1363 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
1364 {
1365 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1366 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1367 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 13, 6);
1368 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1369 return get_num_tcs_out_vertices(ctx);
1370 else
1371 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1372 }
1373
1374 void si_declare_compute_memory(struct si_shader_context *ctx)
1375 {
1376 struct si_shader_selector *sel = ctx->shader->selector;
1377 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
1378
1379 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
1380 LLVMValueRef var;
1381
1382 assert(!ctx->ac.lds);
1383
1384 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1385 LLVMArrayType(ctx->i8, lds_size),
1386 "compute_lds",
1387 AC_ADDR_SPACE_LDS);
1388 LLVMSetAlignment(var, 64 * 1024);
1389
1390 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
1391 }
1392
1393 static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *ctx)
1394 {
1395 LLVMValueRef ptr =
1396 ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1397 struct si_shader_selector *sel = ctx->shader->selector;
1398
1399 /* Do the bounds checking with a descriptor, because
1400 * doing computation and manual bounds checking of 64-bit
1401 * addresses generates horrible VALU code with very high
1402 * VGPR usage and very low SIMD occupancy.
1403 */
1404 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.intptr, "");
1405
1406 LLVMValueRef desc0, desc1;
1407 desc0 = ptr;
1408 desc1 = LLVMConstInt(ctx->i32,
1409 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
1410
1411 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1412 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1413 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1414 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1415
1416 if (ctx->screen->info.chip_class >= GFX10)
1417 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1418 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1419 S_008F0C_RESOURCE_LEVEL(1);
1420 else
1421 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1422 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1423
1424 LLVMValueRef desc_elems[] = {
1425 desc0,
1426 desc1,
1427 LLVMConstInt(ctx->i32, sel->info.constbuf0_num_slots * 16, 0),
1428 LLVMConstInt(ctx->i32, rsrc3, false)
1429 };
1430
1431 return ac_build_gather_values(&ctx->ac, desc_elems, 4);
1432 }
1433
1434 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
1435 {
1436 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1437 struct si_shader_selector *sel = ctx->shader->selector;
1438
1439 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1440
1441 if (sel->info.const_buffers_declared == 1 &&
1442 sel->info.shader_buffers_declared == 0) {
1443 return load_const_buffer_desc_fast_path(ctx);
1444 }
1445
1446 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
1447 index = LLVMBuildAdd(ctx->ac.builder, index,
1448 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1449
1450 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
1451 }
1452
1453 static LLVMValueRef
1454 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
1455 {
1456 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1457 LLVMValueRef rsrc_ptr = ac_get_arg(&ctx->ac,
1458 ctx->const_and_shader_buffers);
1459
1460 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
1461 index = LLVMBuildSub(ctx->ac.builder,
1462 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
1463 index, "");
1464
1465 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
1466 }
1467
1468 /* Initialize arguments for the shader export intrinsic */
1469 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx,
1470 LLVMValueRef *values,
1471 unsigned target,
1472 struct ac_export_args *args)
1473 {
1474 args->enabled_channels = 0xf; /* writemask - default is 0xf */
1475 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
1476 args->done = 0; /* Specify whether this is the last export */
1477 args->target = target; /* Specify the target we are exporting */
1478 args->compr = false;
1479
1480 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
1481 }
1482
1483 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
1484 struct ac_export_args *pos, LLVMValueRef *out_elts)
1485 {
1486 unsigned reg_index;
1487 unsigned chan;
1488 unsigned const_chan;
1489 LLVMValueRef base_elt;
1490 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1491 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
1492 SI_VS_CONST_CLIP_PLANES, 0);
1493 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
1494
1495 for (reg_index = 0; reg_index < 2; reg_index ++) {
1496 struct ac_export_args *args = &pos[2 + reg_index];
1497
1498 args->out[0] =
1499 args->out[1] =
1500 args->out[2] =
1501 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
1502
1503 /* Compute dot products of position and user clip plane vectors */
1504 for (chan = 0; chan < 4; chan++) {
1505 for (const_chan = 0; const_chan < 4; const_chan++) {
1506 LLVMValueRef addr =
1507 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
1508 const_chan) * 4, 0);
1509 base_elt = si_buffer_load_const(ctx, const_resource,
1510 addr);
1511 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
1512 out_elts[const_chan], args->out[chan]);
1513 }
1514 }
1515
1516 args->enabled_channels = 0xf;
1517 args->valid_mask = 0;
1518 args->done = 0;
1519 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
1520 args->compr = 0;
1521 }
1522 }
1523
1524 static void si_dump_streamout(struct pipe_stream_output_info *so)
1525 {
1526 unsigned i;
1527
1528 if (so->num_outputs)
1529 fprintf(stderr, "STREAMOUT\n");
1530
1531 for (i = 0; i < so->num_outputs; i++) {
1532 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
1533 so->output[i].start_component;
1534 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
1535 i, so->output[i].output_buffer,
1536 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
1537 so->output[i].register_index,
1538 mask & 1 ? "x" : "",
1539 mask & 2 ? "y" : "",
1540 mask & 4 ? "z" : "",
1541 mask & 8 ? "w" : "");
1542 }
1543 }
1544
1545 void si_emit_streamout_output(struct si_shader_context *ctx,
1546 LLVMValueRef const *so_buffers,
1547 LLVMValueRef const *so_write_offsets,
1548 struct pipe_stream_output *stream_out,
1549 struct si_shader_output_values *shader_out)
1550 {
1551 unsigned buf_idx = stream_out->output_buffer;
1552 unsigned start = stream_out->start_component;
1553 unsigned num_comps = stream_out->num_components;
1554 LLVMValueRef out[4];
1555
1556 assert(num_comps && num_comps <= 4);
1557 if (!num_comps || num_comps > 4)
1558 return;
1559
1560 /* Load the output as int. */
1561 for (int j = 0; j < num_comps; j++) {
1562 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
1563
1564 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
1565 }
1566
1567 /* Pack the output. */
1568 LLVMValueRef vdata = NULL;
1569
1570 switch (num_comps) {
1571 case 1: /* as i32 */
1572 vdata = out[0];
1573 break;
1574 case 2: /* as v2i32 */
1575 case 3: /* as v3i32 */
1576 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
1577 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
1578 break;
1579 }
1580 /* as v4i32 (aligned to 4) */
1581 out[3] = LLVMGetUndef(ctx->i32);
1582 /* fall through */
1583 case 4: /* as v4i32 */
1584 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
1585 break;
1586 }
1587
1588 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
1589 vdata, num_comps,
1590 so_write_offsets[buf_idx],
1591 ctx->i32_0,
1592 stream_out->dst_offset * 4, ac_glc | ac_slc);
1593 }
1594
1595 /**
1596 * Write streamout data to buffers for vertex stream @p stream (different
1597 * vertex streams can occur for GS copy shaders).
1598 */
1599 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
1600 struct si_shader_output_values *outputs,
1601 unsigned noutput, unsigned stream)
1602 {
1603 struct si_shader_selector *sel = ctx->shader->selector;
1604 struct pipe_stream_output_info *so = &sel->so;
1605 LLVMBuilderRef builder = ctx->ac.builder;
1606 int i;
1607
1608 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1609 LLVMValueRef so_vtx_count =
1610 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
1611
1612 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
1613
1614 /* can_emit = tid < so_vtx_count; */
1615 LLVMValueRef can_emit =
1616 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
1617
1618 /* Emit the streamout code conditionally. This actually avoids
1619 * out-of-bounds buffer access. The hw tells us via the SGPR
1620 * (so_vtx_count) which threads are allowed to emit streamout data. */
1621 ac_build_ifcc(&ctx->ac, can_emit, 6501);
1622 {
1623 /* The buffer offset is computed as follows:
1624 * ByteOffset = streamout_offset[buffer_id]*4 +
1625 * (streamout_write_index + thread_id)*stride[buffer_id] +
1626 * attrib_offset
1627 */
1628
1629 LLVMValueRef so_write_index =
1630 ac_get_arg(&ctx->ac,
1631 ctx->streamout_write_index);
1632
1633 /* Compute (streamout_write_index + thread_id). */
1634 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
1635
1636 /* Load the descriptor and compute the write offset for each
1637 * enabled buffer. */
1638 LLVMValueRef so_write_offset[4] = {};
1639 LLVMValueRef so_buffers[4];
1640 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
1641 ctx->rw_buffers);
1642
1643 for (i = 0; i < 4; i++) {
1644 if (!so->stride[i])
1645 continue;
1646
1647 LLVMValueRef offset = LLVMConstInt(ctx->i32,
1648 SI_VS_STREAMOUT_BUF0 + i, 0);
1649
1650 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
1651
1652 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
1653 ctx->streamout_offset[i]);
1654 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
1655
1656 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
1657 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
1658 so_offset);
1659 }
1660
1661 /* Write streamout data. */
1662 for (i = 0; i < so->num_outputs; i++) {
1663 unsigned reg = so->output[i].register_index;
1664
1665 if (reg >= noutput)
1666 continue;
1667
1668 if (stream != so->output[i].stream)
1669 continue;
1670
1671 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
1672 &so->output[i], &outputs[reg]);
1673 }
1674 }
1675 ac_build_endif(&ctx->ac, 6501);
1676 }
1677
1678 static void si_export_param(struct si_shader_context *ctx, unsigned index,
1679 LLVMValueRef *values)
1680 {
1681 struct ac_export_args args;
1682
1683 si_llvm_init_vs_export_args(ctx, values,
1684 V_008DFC_SQ_EXP_PARAM + index, &args);
1685 ac_build_export(&ctx->ac, &args);
1686 }
1687
1688 static void si_build_param_exports(struct si_shader_context *ctx,
1689 struct si_shader_output_values *outputs,
1690 unsigned noutput)
1691 {
1692 struct si_shader *shader = ctx->shader;
1693 unsigned param_count = 0;
1694
1695 for (unsigned i = 0; i < noutput; i++) {
1696 unsigned semantic_name = outputs[i].semantic_name;
1697 unsigned semantic_index = outputs[i].semantic_index;
1698
1699 if (outputs[i].vertex_stream[0] != 0 &&
1700 outputs[i].vertex_stream[1] != 0 &&
1701 outputs[i].vertex_stream[2] != 0 &&
1702 outputs[i].vertex_stream[3] != 0)
1703 continue;
1704
1705 switch (semantic_name) {
1706 case TGSI_SEMANTIC_LAYER:
1707 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1708 case TGSI_SEMANTIC_CLIPDIST:
1709 case TGSI_SEMANTIC_COLOR:
1710 case TGSI_SEMANTIC_BCOLOR:
1711 case TGSI_SEMANTIC_PRIMID:
1712 case TGSI_SEMANTIC_FOG:
1713 case TGSI_SEMANTIC_TEXCOORD:
1714 case TGSI_SEMANTIC_GENERIC:
1715 break;
1716 default:
1717 continue;
1718 }
1719
1720 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
1721 semantic_index < SI_MAX_IO_GENERIC) &&
1722 shader->key.opt.kill_outputs &
1723 (1ull << si_shader_io_get_unique_index(semantic_name,
1724 semantic_index, true)))
1725 continue;
1726
1727 si_export_param(ctx, param_count, outputs[i].values);
1728
1729 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
1730 shader->info.vs_output_param_offset[i] = param_count++;
1731 }
1732
1733 shader->info.nr_param_exports = param_count;
1734 }
1735
1736 /**
1737 * Vertex color clamping.
1738 *
1739 * This uses a state constant loaded in a user data SGPR and
1740 * an IF statement is added that clamps all colors if the constant
1741 * is true.
1742 */
1743 static void si_vertex_color_clamping(struct si_shader_context *ctx,
1744 struct si_shader_output_values *outputs,
1745 unsigned noutput)
1746 {
1747 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
1748 bool has_colors = false;
1749
1750 /* Store original colors to alloca variables. */
1751 for (unsigned i = 0; i < noutput; i++) {
1752 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1753 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1754 continue;
1755
1756 for (unsigned j = 0; j < 4; j++) {
1757 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->f32, "");
1758 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
1759 }
1760 has_colors = true;
1761 }
1762
1763 if (!has_colors)
1764 return;
1765
1766 /* The state is in the first bit of the user SGPR. */
1767 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
1768 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
1769
1770 ac_build_ifcc(&ctx->ac, cond, 6502);
1771
1772 /* Store clamped colors to alloca variables within the conditional block. */
1773 for (unsigned i = 0; i < noutput; i++) {
1774 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1775 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1776 continue;
1777
1778 for (unsigned j = 0; j < 4; j++) {
1779 LLVMBuildStore(ctx->ac.builder,
1780 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
1781 addr[i][j]);
1782 }
1783 }
1784 ac_build_endif(&ctx->ac, 6502);
1785
1786 /* Load clamped colors */
1787 for (unsigned i = 0; i < noutput; i++) {
1788 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1789 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1790 continue;
1791
1792 for (unsigned j = 0; j < 4; j++) {
1793 outputs[i].values[j] =
1794 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
1795 }
1796 }
1797 }
1798
1799 /* Generate export instructions for hardware VS shader stage or NGG GS stage
1800 * (position and parameter data only).
1801 */
1802 void si_llvm_export_vs(struct si_shader_context *ctx,
1803 struct si_shader_output_values *outputs,
1804 unsigned noutput)
1805 {
1806 struct si_shader *shader = ctx->shader;
1807 struct ac_export_args pos_args[4] = {};
1808 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
1809 unsigned pos_idx;
1810 int i;
1811
1812 si_vertex_color_clamping(ctx, outputs, noutput);
1813
1814 /* Build position exports. */
1815 for (i = 0; i < noutput; i++) {
1816 switch (outputs[i].semantic_name) {
1817 case TGSI_SEMANTIC_POSITION:
1818 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1819 V_008DFC_SQ_EXP_POS, &pos_args[0]);
1820 break;
1821 case TGSI_SEMANTIC_PSIZE:
1822 psize_value = outputs[i].values[0];
1823 break;
1824 case TGSI_SEMANTIC_LAYER:
1825 layer_value = outputs[i].values[0];
1826 break;
1827 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1828 viewport_index_value = outputs[i].values[0];
1829 break;
1830 case TGSI_SEMANTIC_EDGEFLAG:
1831 edgeflag_value = outputs[i].values[0];
1832 break;
1833 case TGSI_SEMANTIC_CLIPDIST:
1834 if (!shader->key.opt.clip_disable) {
1835 unsigned index = 2 + outputs[i].semantic_index;
1836 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1837 V_008DFC_SQ_EXP_POS + index,
1838 &pos_args[index]);
1839 }
1840 break;
1841 case TGSI_SEMANTIC_CLIPVERTEX:
1842 if (!shader->key.opt.clip_disable) {
1843 si_llvm_emit_clipvertex(ctx, pos_args,
1844 outputs[i].values);
1845 }
1846 break;
1847 }
1848 }
1849
1850 /* We need to add the position output manually if it's missing. */
1851 if (!pos_args[0].out[0]) {
1852 pos_args[0].enabled_channels = 0xf; /* writemask */
1853 pos_args[0].valid_mask = 0; /* EXEC mask */
1854 pos_args[0].done = 0; /* last export? */
1855 pos_args[0].target = V_008DFC_SQ_EXP_POS;
1856 pos_args[0].compr = 0; /* COMPR flag */
1857 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
1858 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
1859 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
1860 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
1861 }
1862
1863 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
1864 !shader->key.as_ngg;
1865
1866 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1867 if (shader->selector->info.writes_psize ||
1868 pos_writes_edgeflag ||
1869 shader->selector->info.writes_viewport_index ||
1870 shader->selector->info.writes_layer) {
1871 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
1872 (pos_writes_edgeflag << 1) |
1873 (shader->selector->info.writes_layer << 2);
1874
1875 pos_args[1].valid_mask = 0; /* EXEC mask */
1876 pos_args[1].done = 0; /* last export? */
1877 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
1878 pos_args[1].compr = 0; /* COMPR flag */
1879 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
1880 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
1881 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
1882 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
1883
1884 if (shader->selector->info.writes_psize)
1885 pos_args[1].out[0] = psize_value;
1886
1887 if (pos_writes_edgeflag) {
1888 /* The output is a float, but the hw expects an integer
1889 * with the first bit containing the edge flag. */
1890 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
1891 edgeflag_value,
1892 ctx->i32, "");
1893 edgeflag_value = ac_build_umin(&ctx->ac,
1894 edgeflag_value,
1895 ctx->i32_1);
1896
1897 /* The LLVM intrinsic expects a float. */
1898 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
1899 }
1900
1901 if (ctx->screen->info.chip_class >= GFX9) {
1902 /* GFX9 has the layer in out.z[10:0] and the viewport
1903 * index in out.z[19:16].
1904 */
1905 if (shader->selector->info.writes_layer)
1906 pos_args[1].out[2] = layer_value;
1907
1908 if (shader->selector->info.writes_viewport_index) {
1909 LLVMValueRef v = viewport_index_value;
1910
1911 v = ac_to_integer(&ctx->ac, v);
1912 v = LLVMBuildShl(ctx->ac.builder, v,
1913 LLVMConstInt(ctx->i32, 16, 0), "");
1914 v = LLVMBuildOr(ctx->ac.builder, v,
1915 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
1916 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
1917 pos_args[1].enabled_channels |= 1 << 2;
1918 }
1919 } else {
1920 if (shader->selector->info.writes_layer)
1921 pos_args[1].out[2] = layer_value;
1922
1923 if (shader->selector->info.writes_viewport_index) {
1924 pos_args[1].out[3] = viewport_index_value;
1925 pos_args[1].enabled_channels |= 1 << 3;
1926 }
1927 }
1928 }
1929
1930 for (i = 0; i < 4; i++)
1931 if (pos_args[i].out[0])
1932 shader->info.nr_pos_exports++;
1933
1934 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
1935 * Setting valid_mask=1 prevents it and has no other effect.
1936 */
1937 if (ctx->screen->info.family == CHIP_NAVI10 ||
1938 ctx->screen->info.family == CHIP_NAVI12 ||
1939 ctx->screen->info.family == CHIP_NAVI14)
1940 pos_args[0].valid_mask = 1;
1941
1942 pos_idx = 0;
1943 for (i = 0; i < 4; i++) {
1944 if (!pos_args[i].out[0])
1945 continue;
1946
1947 /* Specify the target we are exporting */
1948 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
1949
1950 if (pos_idx == shader->info.nr_pos_exports)
1951 /* Specify that this is the last export */
1952 pos_args[i].done = 1;
1953
1954 ac_build_export(&ctx->ac, &pos_args[i]);
1955 }
1956
1957 /* Build parameter exports. */
1958 si_build_param_exports(ctx, outputs, noutput);
1959 }
1960
1961 /**
1962 * Forward all outputs from the vertex shader to the TES. This is only used
1963 * for the fixed function TCS.
1964 */
1965 static void si_copy_tcs_inputs(struct si_shader_context *ctx)
1966 {
1967 LLVMValueRef invocation_id, buffer, buffer_offset;
1968 LLVMValueRef lds_vertex_stride, lds_base;
1969 uint64_t inputs;
1970
1971 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
1972 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1973 buffer_offset = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1974
1975 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
1976 lds_base = get_tcs_in_current_patch_offset(ctx);
1977 lds_base = ac_build_imad(&ctx->ac, invocation_id, lds_vertex_stride,
1978 lds_base);
1979
1980 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
1981 while (inputs) {
1982 unsigned i = u_bit_scan64(&inputs);
1983
1984 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
1985 LLVMConstInt(ctx->i32, 4 * i, 0),
1986 "");
1987
1988 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
1989 get_rel_patch_id(ctx),
1990 invocation_id,
1991 LLVMConstInt(ctx->i32, i, 0));
1992
1993 LLVMValueRef value = lshs_lds_load(ctx, ctx->ac.i32, ~0, lds_ptr);
1994
1995 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
1996 buffer_offset, 0, ac_glc);
1997 }
1998 }
1999
2000 static void si_write_tess_factors(struct si_shader_context *ctx,
2001 LLVMValueRef rel_patch_id,
2002 LLVMValueRef invocation_id,
2003 LLVMValueRef tcs_out_current_patch_data_offset,
2004 LLVMValueRef invoc0_tf_outer[4],
2005 LLVMValueRef invoc0_tf_inner[2])
2006 {
2007 struct si_shader *shader = ctx->shader;
2008 unsigned tess_inner_index, tess_outer_index;
2009 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2010 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2011 unsigned stride, outer_comps, inner_comps, i, offset;
2012
2013 /* Add a barrier before loading tess factors from LDS. */
2014 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
2015 si_llvm_emit_barrier(ctx);
2016
2017 /* Do this only for invocation 0, because the tess levels are per-patch,
2018 * not per-vertex.
2019 *
2020 * This can't jump, because invocation 0 executes this. It should
2021 * at least mask out the loads and stores for other invocations.
2022 */
2023 ac_build_ifcc(&ctx->ac,
2024 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2025 invocation_id, ctx->i32_0, ""), 6503);
2026
2027 /* Determine the layout of one tess factor element in the buffer. */
2028 switch (shader->key.part.tcs.epilog.prim_mode) {
2029 case PIPE_PRIM_LINES:
2030 stride = 2; /* 2 dwords, 1 vec2 store */
2031 outer_comps = 2;
2032 inner_comps = 0;
2033 break;
2034 case PIPE_PRIM_TRIANGLES:
2035 stride = 4; /* 4 dwords, 1 vec4 store */
2036 outer_comps = 3;
2037 inner_comps = 1;
2038 break;
2039 case PIPE_PRIM_QUADS:
2040 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2041 outer_comps = 4;
2042 inner_comps = 2;
2043 break;
2044 default:
2045 assert(0);
2046 return;
2047 }
2048
2049 for (i = 0; i < 4; i++) {
2050 inner[i] = LLVMGetUndef(ctx->i32);
2051 outer[i] = LLVMGetUndef(ctx->i32);
2052 }
2053
2054 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
2055 /* Tess factors are in VGPRs. */
2056 for (i = 0; i < outer_comps; i++)
2057 outer[i] = out[i] = invoc0_tf_outer[i];
2058 for (i = 0; i < inner_comps; i++)
2059 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
2060 } else {
2061 /* Load tess_inner and tess_outer from LDS.
2062 * Any invocation can write them, so we can't get them from a temporary.
2063 */
2064 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2065 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2066
2067 lds_base = tcs_out_current_patch_data_offset;
2068 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2069 LLVMConstInt(ctx->i32,
2070 tess_inner_index * 4, 0), "");
2071 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2072 LLVMConstInt(ctx->i32,
2073 tess_outer_index * 4, 0), "");
2074
2075 for (i = 0; i < outer_comps; i++) {
2076 outer[i] = out[i] =
2077 lshs_lds_load(ctx, ctx->ac.i32, i, lds_outer);
2078 }
2079 for (i = 0; i < inner_comps; i++) {
2080 inner[i] = out[outer_comps+i] =
2081 lshs_lds_load(ctx, ctx->ac.i32, i, lds_inner);
2082 }
2083 }
2084
2085 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2086 /* For isolines, the hardware expects tess factors in the
2087 * reverse order from what NIR specifies.
2088 */
2089 LLVMValueRef tmp = out[0];
2090 out[0] = out[1];
2091 out[1] = tmp;
2092 }
2093
2094 /* Convert the outputs to vectors for stores. */
2095 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2096 vec1 = NULL;
2097
2098 if (stride > 4)
2099 vec1 = ac_build_gather_values(&ctx->ac, out+4, stride - 4);
2100
2101 /* Get the buffer. */
2102 buffer = get_tess_ring_descriptor(ctx, TCS_FACTOR_RING);
2103
2104 /* Get the offset. */
2105 tf_base = ac_get_arg(&ctx->ac,
2106 ctx->tcs_factor_offset);
2107 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2108 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2109
2110 ac_build_ifcc(&ctx->ac,
2111 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2112 rel_patch_id, ctx->i32_0, ""), 6504);
2113
2114 /* Store the dynamic HS control word. */
2115 offset = 0;
2116 if (ctx->screen->info.chip_class <= GFX8) {
2117 ac_build_buffer_store_dword(&ctx->ac, buffer,
2118 LLVMConstInt(ctx->i32, 0x80000000, 0),
2119 1, ctx->i32_0, tf_base,
2120 offset, ac_glc);
2121 offset += 4;
2122 }
2123
2124 ac_build_endif(&ctx->ac, 6504);
2125
2126 /* Store the tessellation factors. */
2127 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2128 MIN2(stride, 4), byteoffset, tf_base,
2129 offset, ac_glc);
2130 offset += 16;
2131 if (vec1)
2132 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2133 stride - 4, byteoffset, tf_base,
2134 offset, ac_glc);
2135
2136 /* Store the tess factors into the offchip buffer if TES reads them. */
2137 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2138 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2139 LLVMValueRef tf_inner_offset;
2140 unsigned param_outer, param_inner;
2141
2142 buf = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
2143 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
2144
2145 param_outer = si_shader_io_get_unique_index_patch(
2146 TGSI_SEMANTIC_TESSOUTER, 0);
2147 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2148 LLVMConstInt(ctx->i32, param_outer, 0));
2149
2150 unsigned outer_vec_size =
2151 ac_has_vec3_support(ctx->screen->info.chip_class, false) ?
2152 outer_comps : util_next_power_of_two(outer_comps);
2153 outer_vec = ac_build_gather_values(&ctx->ac, outer, outer_vec_size);
2154
2155 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2156 outer_comps, tf_outer_offset,
2157 base, 0, ac_glc);
2158 if (inner_comps) {
2159 param_inner = si_shader_io_get_unique_index_patch(
2160 TGSI_SEMANTIC_TESSINNER, 0);
2161 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2162 LLVMConstInt(ctx->i32, param_inner, 0));
2163
2164 inner_vec = inner_comps == 1 ? inner[0] :
2165 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2166 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2167 inner_comps, tf_inner_offset,
2168 base, 0, ac_glc);
2169 }
2170 }
2171
2172 ac_build_endif(&ctx->ac, 6503);
2173 }
2174
2175 /* This only writes the tessellation factor levels. */
2176 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
2177 unsigned max_outputs,
2178 LLVMValueRef *addrs)
2179 {
2180 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2181 LLVMBuilderRef builder = ctx->ac.builder;
2182 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2183
2184 si_copy_tcs_inputs(ctx);
2185
2186 rel_patch_id = get_rel_patch_id(ctx);
2187 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
2188 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2189
2190 if (ctx->screen->info.chip_class >= GFX9) {
2191 LLVMBasicBlockRef blocks[2] = {
2192 LLVMGetInsertBlock(builder),
2193 ctx->merged_wrap_if_entry_block
2194 };
2195 LLVMValueRef values[2];
2196
2197 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2198
2199 values[0] = rel_patch_id;
2200 values[1] = LLVMGetUndef(ctx->i32);
2201 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2202
2203 values[0] = tf_lds_offset;
2204 values[1] = LLVMGetUndef(ctx->i32);
2205 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2206
2207 values[0] = invocation_id;
2208 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
2209 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2210 }
2211
2212 /* Return epilog parameters from this function. */
2213 LLVMValueRef ret = ctx->return_value;
2214 unsigned vgpr;
2215
2216 if (ctx->screen->info.chip_class >= GFX9) {
2217 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2218 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2219 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2220 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2221 /* Tess offchip and tess factor offsets are at the beginning. */
2222 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2223 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2224 vgpr = 8 + GFX9_SGPR_TCS_OUT_LAYOUT + 1;
2225 } else {
2226 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2227 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2228 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2229 GFX6_SGPR_TCS_OUT_LAYOUT);
2230 /* Tess offchip and tess factor offsets are after user SGPRs. */
2231 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset,
2232 GFX6_TCS_NUM_USER_SGPR);
2233 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset,
2234 GFX6_TCS_NUM_USER_SGPR + 1);
2235 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2236 }
2237
2238 /* VGPRs */
2239 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
2240 invocation_id = ac_to_float(&ctx->ac, invocation_id);
2241 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
2242
2243 /* Leave a hole corresponding to the two input VGPRs. This ensures that
2244 * the invocation_id output does not alias the tcs_rel_ids input,
2245 * which saves a V_MOV on gfx9.
2246 */
2247 vgpr += 2;
2248
2249 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2250 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2251
2252 if (ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
2253 vgpr++; /* skip the tess factor LDS offset */
2254 for (unsigned i = 0; i < 6; i++) {
2255 LLVMValueRef value =
2256 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
2257 value = ac_to_float(&ctx->ac, value);
2258 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
2259 }
2260 } else {
2261 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2262 }
2263 ctx->return_value = ret;
2264 }
2265
2266 /* Pass TCS inputs from LS to TCS on GFX9. */
2267 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2268 {
2269 LLVMValueRef ret = ctx->return_value;
2270
2271 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2272 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2273 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2274 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2275 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2276 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2277
2278 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2279 8 + SI_SGPR_RW_BUFFERS);
2280 ret = si_insert_input_ptr(ctx, ret,
2281 ctx->bindless_samplers_and_images,
2282 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2283
2284 ret = si_insert_input_ret(ctx, ret, ctx->vs_state_bits,
2285 8 + SI_SGPR_VS_STATE_BITS);
2286
2287 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2288 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2289 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_offsets,
2290 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2291 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2292 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2293
2294 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2295 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2296 ac_to_float(&ctx->ac,
2297 ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id)),
2298 vgpr++, "");
2299 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2300 ac_to_float(&ctx->ac,
2301 ac_get_arg(&ctx->ac, ctx->args.tcs_rel_ids)),
2302 vgpr++, "");
2303 ctx->return_value = ret;
2304 }
2305
2306 /* Pass GS inputs from ES to GS on GFX9. */
2307 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2308 {
2309 LLVMValueRef ret = ctx->return_value;
2310
2311 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2312 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2313 if (ctx->shader->key.as_ngg)
2314 ret = si_insert_input_ptr(ctx, ret, ctx->gs_tg_info, 2);
2315 else
2316 ret = si_insert_input_ret(ctx, ret, ctx->gs2vs_offset, 2);
2317 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2318 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2319
2320 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2321 8 + SI_SGPR_RW_BUFFERS);
2322 ret = si_insert_input_ptr(ctx, ret,
2323 ctx->bindless_samplers_and_images,
2324 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2325 if (ctx->screen->use_ngg) {
2326 ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits,
2327 8 + SI_SGPR_VS_STATE_BITS);
2328 }
2329
2330 unsigned vgpr;
2331 if (ctx->type == PIPE_SHADER_VERTEX)
2332 vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR;
2333 else
2334 vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
2335
2336 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx01_offset, vgpr++);
2337 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx23_offset, vgpr++);
2338 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
2339 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
2340 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx45_offset, vgpr++);
2341 ctx->return_value = ret;
2342 }
2343
2344 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
2345 unsigned max_outputs,
2346 LLVMValueRef *addrs)
2347 {
2348 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2349 struct si_shader *shader = ctx->shader;
2350 struct si_shader_info *info = &shader->selector->info;
2351 unsigned i, chan;
2352 LLVMValueRef vertex_id = ac_get_arg(&ctx->ac, ctx->rel_auto_id);
2353 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
2354 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2355 vertex_dw_stride, "");
2356
2357 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2358 * its inputs from it. */
2359 for (i = 0; i < info->num_outputs; i++) {
2360 unsigned name = info->output_semantic_name[i];
2361 unsigned index = info->output_semantic_index[i];
2362
2363 /* The ARB_shader_viewport_layer_array spec contains the
2364 * following issue:
2365 *
2366 * 2) What happens if gl_ViewportIndex or gl_Layer is
2367 * written in the vertex shader and a geometry shader is
2368 * present?
2369 *
2370 * RESOLVED: The value written by the last vertex processing
2371 * stage is used. If the last vertex processing stage
2372 * (vertex, tessellation evaluation or geometry) does not
2373 * statically assign to gl_ViewportIndex or gl_Layer, index
2374 * or layer zero is assumed.
2375 *
2376 * So writes to those outputs in VS-as-LS are simply ignored.
2377 */
2378 if (name == TGSI_SEMANTIC_LAYER ||
2379 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2380 continue;
2381
2382 int param = si_shader_io_get_unique_index(name, index, false);
2383 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2384 LLVMConstInt(ctx->i32, param * 4, 0), "");
2385
2386 for (chan = 0; chan < 4; chan++) {
2387 if (!(info->output_usagemask[i] & (1 << chan)))
2388 continue;
2389
2390 lshs_lds_store(ctx, chan, dw_addr,
2391 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
2392 }
2393 }
2394
2395 if (ctx->screen->info.chip_class >= GFX9)
2396 si_set_ls_return_value_for_tcs(ctx);
2397 }
2398
2399 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
2400 unsigned max_outputs,
2401 LLVMValueRef *addrs)
2402 {
2403 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2404 struct si_shader *es = ctx->shader;
2405 struct si_shader_info *info = &es->selector->info;
2406 LLVMValueRef lds_base = NULL;
2407 unsigned chan;
2408 int i;
2409
2410 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
2411 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2412 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2413 LLVMValueRef wave_idx = si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
2414 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2415 LLVMBuildMul(ctx->ac.builder, wave_idx,
2416 LLVMConstInt(ctx->i32, ctx->ac.wave_size, false), ""), "");
2417 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2418 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2419 }
2420
2421 for (i = 0; i < info->num_outputs; i++) {
2422 int param;
2423
2424 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2425 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2426 continue;
2427
2428 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2429 info->output_semantic_index[i], false);
2430
2431 for (chan = 0; chan < 4; chan++) {
2432 if (!(info->output_usagemask[i] & (1 << chan)))
2433 continue;
2434
2435 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2436 out_val = ac_to_integer(&ctx->ac, out_val);
2437
2438 /* GFX9 has the ESGS ring in LDS. */
2439 if (ctx->screen->info.chip_class >= GFX9) {
2440 LLVMValueRef idx = LLVMConstInt(ctx->i32, param * 4 + chan, false);
2441 idx = LLVMBuildAdd(ctx->ac.builder, lds_base, idx, "");
2442 ac_build_indexed_store(&ctx->ac, ctx->esgs_ring, idx, out_val);
2443 continue;
2444 }
2445
2446 ac_build_buffer_store_dword(&ctx->ac,
2447 ctx->esgs_ring,
2448 out_val, 1, NULL,
2449 ac_get_arg(&ctx->ac, ctx->es2gs_offset),
2450 (4 * param + chan) * 4,
2451 ac_glc | ac_slc | ac_swizzled);
2452 }
2453 }
2454
2455 if (ctx->screen->info.chip_class >= GFX9)
2456 si_set_es_return_value_for_gs(ctx);
2457 }
2458
2459 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2460 {
2461 if (ctx->screen->info.chip_class >= GFX9)
2462 return si_unpack_param(ctx, ctx->merged_wave_info, 16, 8);
2463 else
2464 return ac_get_arg(&ctx->ac, ctx->gs_wave_id);
2465 }
2466
2467 static void emit_gs_epilogue(struct si_shader_context *ctx)
2468 {
2469 if (ctx->shader->key.as_ngg) {
2470 gfx10_ngg_gs_emit_epilogue(ctx);
2471 return;
2472 }
2473
2474 if (ctx->screen->info.chip_class >= GFX10)
2475 LLVMBuildFence(ctx->ac.builder, LLVMAtomicOrderingRelease, false, "");
2476
2477 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2478 si_get_gs_wave_id(ctx));
2479
2480 if (ctx->screen->info.chip_class >= GFX9)
2481 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2482 }
2483
2484 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
2485 unsigned max_outputs,
2486 LLVMValueRef *addrs)
2487 {
2488 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2489 struct si_shader_info UNUSED *info = &ctx->shader->selector->info;
2490
2491 assert(info->num_outputs <= max_outputs);
2492
2493 emit_gs_epilogue(ctx);
2494 }
2495
2496 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
2497 unsigned max_outputs,
2498 LLVMValueRef *addrs)
2499 {
2500 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2501 struct si_shader_info *info = &ctx->shader->selector->info;
2502 struct si_shader_output_values *outputs = NULL;
2503 int i,j;
2504
2505 assert(!ctx->shader->is_gs_copy_shader);
2506 assert(info->num_outputs <= max_outputs);
2507
2508 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2509
2510 for (i = 0; i < info->num_outputs; i++) {
2511 outputs[i].semantic_name = info->output_semantic_name[i];
2512 outputs[i].semantic_index = info->output_semantic_index[i];
2513
2514 for (j = 0; j < 4; j++) {
2515 outputs[i].values[j] =
2516 LLVMBuildLoad(ctx->ac.builder,
2517 addrs[4 * i + j],
2518 "");
2519 outputs[i].vertex_stream[j] =
2520 (info->output_streams[i] >> (2 * j)) & 3;
2521 }
2522 }
2523
2524 if (!ctx->screen->use_ngg_streamout &&
2525 ctx->shader->selector->so.num_outputs)
2526 si_llvm_emit_streamout(ctx, outputs, i, 0);
2527
2528 /* Export PrimitiveID. */
2529 if (ctx->shader->key.mono.u.vs_export_prim_id) {
2530 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
2531 outputs[i].semantic_index = 0;
2532 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
2533 for (j = 1; j < 4; j++)
2534 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
2535
2536 memset(outputs[i].vertex_stream, 0,
2537 sizeof(outputs[i].vertex_stream));
2538 i++;
2539 }
2540
2541 si_llvm_export_vs(ctx, outputs, i);
2542 FREE(outputs);
2543 }
2544
2545 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
2546 unsigned max_outputs,
2547 LLVMValueRef *addrs)
2548 {
2549 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2550 struct si_shader_info *info = &ctx->shader->selector->info;
2551 LLVMValueRef pos[4] = {};
2552
2553 assert(info->num_outputs <= max_outputs);
2554
2555 for (unsigned i = 0; i < info->num_outputs; i++) {
2556 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
2557 continue;
2558
2559 for (unsigned chan = 0; chan < 4; chan++)
2560 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2561 break;
2562 }
2563 assert(pos[0] != NULL);
2564
2565 /* Return the position output. */
2566 LLVMValueRef ret = ctx->return_value;
2567 for (unsigned chan = 0; chan < 4; chan++)
2568 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
2569 ctx->return_value = ret;
2570 }
2571
2572 /* Emit one vertex from the geometry shader */
2573 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
2574 unsigned stream,
2575 LLVMValueRef *addrs)
2576 {
2577 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2578
2579 if (ctx->shader->key.as_ngg) {
2580 gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
2581 return;
2582 }
2583
2584 struct si_shader_info *info = &ctx->shader->selector->info;
2585 struct si_shader *shader = ctx->shader;
2586 LLVMValueRef soffset = ac_get_arg(&ctx->ac, ctx->gs2vs_offset);
2587 LLVMValueRef gs_next_vertex;
2588 LLVMValueRef can_emit;
2589 unsigned chan, offset;
2590 int i;
2591
2592 /* Write vertex attribute values to GSVS ring */
2593 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
2594 ctx->gs_next_vertex[stream],
2595 "");
2596
2597 /* If this thread has already emitted the declared maximum number of
2598 * vertices, skip the write: excessive vertex emissions are not
2599 * supposed to have any effect.
2600 *
2601 * If the shader has no writes to memory, kill it instead. This skips
2602 * further memory loads and may allow LLVM to skip to the end
2603 * altogether.
2604 */
2605 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
2606 LLVMConstInt(ctx->i32,
2607 shader->selector->gs_max_out_vertices, 0), "");
2608
2609 bool use_kill = !info->writes_memory;
2610 if (use_kill) {
2611 ac_build_kill_if_false(&ctx->ac, can_emit);
2612 } else {
2613 ac_build_ifcc(&ctx->ac, can_emit, 6505);
2614 }
2615
2616 offset = 0;
2617 for (i = 0; i < info->num_outputs; i++) {
2618 for (chan = 0; chan < 4; chan++) {
2619 if (!(info->output_usagemask[i] & (1 << chan)) ||
2620 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
2621 continue;
2622
2623 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2624 LLVMValueRef voffset =
2625 LLVMConstInt(ctx->i32, offset *
2626 shader->selector->gs_max_out_vertices, 0);
2627 offset++;
2628
2629 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
2630 voffset = LLVMBuildMul(ctx->ac.builder, voffset,
2631 LLVMConstInt(ctx->i32, 4, 0), "");
2632
2633 out_val = ac_to_integer(&ctx->ac, out_val);
2634
2635 ac_build_buffer_store_dword(&ctx->ac,
2636 ctx->gsvs_ring[stream],
2637 out_val, 1,
2638 voffset, soffset, 0,
2639 ac_glc | ac_slc | ac_swizzled);
2640 }
2641 }
2642
2643 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex, ctx->i32_1, "");
2644 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
2645
2646 /* Signal vertex emission if vertex data was written. */
2647 if (offset) {
2648 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
2649 si_get_gs_wave_id(ctx));
2650 }
2651
2652 if (!use_kill)
2653 ac_build_endif(&ctx->ac, 6505);
2654 }
2655
2656 /* Cut one primitive from the geometry shader */
2657 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
2658 unsigned stream)
2659 {
2660 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2661
2662 if (ctx->shader->key.as_ngg) {
2663 LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
2664 return;
2665 }
2666
2667 /* Signal primitive cut */
2668 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
2669 si_get_gs_wave_id(ctx));
2670 }
2671
2672 static void si_llvm_emit_barrier(struct si_shader_context *ctx)
2673 {
2674 /* GFX6 only (thanks to a hw bug workaround):
2675 * The real barrier instruction isn’t needed, because an entire patch
2676 * always fits into a single wave.
2677 */
2678 if (ctx->screen->info.chip_class == GFX6 &&
2679 ctx->type == PIPE_SHADER_TESS_CTRL) {
2680 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
2681 return;
2682 }
2683
2684 ac_build_s_barrier(&ctx->ac);
2685 }
2686
2687 static void declare_streamout_params(struct si_shader_context *ctx,
2688 struct pipe_stream_output_info *so)
2689 {
2690 if (ctx->screen->use_ngg_streamout) {
2691 if (ctx->type == PIPE_SHADER_TESS_EVAL)
2692 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2693 return;
2694 }
2695
2696 /* Streamout SGPRs. */
2697 if (so->num_outputs) {
2698 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
2699 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
2700 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
2701 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2702 }
2703
2704 /* A streamout buffer offset is loaded if the stride is non-zero. */
2705 for (int i = 0; i < 4; i++) {
2706 if (!so->stride[i])
2707 continue;
2708
2709 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
2710 }
2711 }
2712
2713 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
2714 {
2715 switch (shader->selector->type) {
2716 case PIPE_SHADER_VERTEX:
2717 case PIPE_SHADER_TESS_EVAL:
2718 return shader->key.as_ngg ? 128 : 0;
2719
2720 case PIPE_SHADER_TESS_CTRL:
2721 /* Return this so that LLVM doesn't remove s_barrier
2722 * instructions on chips where we use s_barrier. */
2723 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
2724
2725 case PIPE_SHADER_GEOMETRY:
2726 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
2727
2728 case PIPE_SHADER_COMPUTE:
2729 break; /* see below */
2730
2731 default:
2732 return 0;
2733 }
2734
2735 const unsigned *properties = shader->selector->info.properties;
2736 unsigned max_work_group_size =
2737 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
2738 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
2739 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
2740
2741 if (!max_work_group_size) {
2742 /* This is a variable group size compute shader,
2743 * compile it for the maximum possible group size.
2744 */
2745 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
2746 }
2747 return max_work_group_size;
2748 }
2749
2750 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
2751 bool assign_params)
2752 {
2753 enum ac_arg_type const_shader_buf_type;
2754
2755 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
2756 ctx->shader->selector->info.shader_buffers_declared == 0)
2757 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
2758 else
2759 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
2760
2761 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
2762 assign_params ? &ctx->const_and_shader_buffers :
2763 &ctx->other_const_and_shader_buffers);
2764 }
2765
2766 static void declare_samplers_and_images(struct si_shader_context *ctx,
2767 bool assign_params)
2768 {
2769 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2770 assign_params ? &ctx->samplers_and_images :
2771 &ctx->other_samplers_and_images);
2772 }
2773
2774 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
2775 bool assign_params)
2776 {
2777 declare_const_and_shader_buffers(ctx, assign_params);
2778 declare_samplers_and_images(ctx, assign_params);
2779 }
2780
2781 static void declare_global_desc_pointers(struct si_shader_context *ctx)
2782 {
2783 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
2784 &ctx->rw_buffers);
2785 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2786 &ctx->bindless_samplers_and_images);
2787 }
2788
2789 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
2790 {
2791 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
2792 if (!ctx->shader->is_gs_copy_shader) {
2793 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
2794 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
2795 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
2796 }
2797 }
2798
2799 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
2800 {
2801 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
2802
2803 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
2804 if (num_vbos_in_user_sgprs) {
2805 unsigned user_sgprs = ctx->args.num_sgprs_used;
2806
2807 if (si_is_merged_shader(ctx))
2808 user_sgprs -= 8;
2809 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
2810
2811 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
2812 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
2813 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
2814
2815 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
2816 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
2817 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
2818 }
2819 }
2820
2821 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
2822 unsigned *num_prolog_vgprs)
2823 {
2824 struct si_shader *shader = ctx->shader;
2825
2826 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
2827 if (shader->key.as_ls) {
2828 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
2829 if (ctx->screen->info.chip_class >= GFX10) {
2830 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
2831 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2832 } else {
2833 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2834 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
2835 }
2836 } else if (ctx->screen->info.chip_class >= GFX10) {
2837 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
2838 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
2839 &ctx->vs_prim_id); /* user vgpr or PrimID (legacy) */
2840 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2841 } else {
2842 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
2843 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->vs_prim_id);
2844 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
2845 }
2846
2847 if (!shader->is_gs_copy_shader) {
2848 /* Vertex load indices. */
2849 if (shader->selector->info.num_inputs) {
2850 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
2851 &ctx->vertex_index0);
2852 for (unsigned i = 1; i < shader->selector->info.num_inputs; i++)
2853 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
2854 }
2855 *num_prolog_vgprs += shader->selector->info.num_inputs;
2856 }
2857 }
2858
2859 static void declare_vs_blit_inputs(struct si_shader_context *ctx,
2860 unsigned vs_blit_property)
2861 {
2862 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
2863 &ctx->vs_blit_inputs); /* i16 x1, y1 */
2864 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* i16 x1, y1 */
2865 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* depth */
2866
2867 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
2868 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color0 */
2869 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color1 */
2870 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color2 */
2871 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* color3 */
2872 } else if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD) {
2873 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x1 */
2874 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y1 */
2875 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.x2 */
2876 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.y2 */
2877 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.z */
2878 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_FLOAT, NULL); /* texcoord.w */
2879 }
2880 }
2881
2882 static void declare_tes_input_vgprs(struct si_shader_context *ctx)
2883 {
2884 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_u);
2885 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, &ctx->tes_v);
2886 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->tes_rel_patch_id);
2887 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tes_patch_id);
2888 }
2889
2890 enum {
2891 /* Convenient merged shader definitions. */
2892 SI_SHADER_MERGED_VERTEX_TESSCTRL = PIPE_SHADER_TYPES,
2893 SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY,
2894 };
2895
2896 void si_add_arg_checked(struct ac_shader_args *args,
2897 enum ac_arg_regfile file,
2898 unsigned registers, enum ac_arg_type type,
2899 struct ac_arg *arg,
2900 unsigned idx)
2901 {
2902 assert(args->arg_count == idx);
2903 ac_add_arg(args, file, registers, type, arg);
2904 }
2905
2906 static void create_function(struct si_shader_context *ctx)
2907 {
2908 struct si_shader *shader = ctx->shader;
2909 LLVMTypeRef returns[AC_MAX_ARGS];
2910 unsigned i, num_return_sgprs;
2911 unsigned num_returns = 0;
2912 unsigned num_prolog_vgprs = 0;
2913 unsigned type = ctx->type;
2914 unsigned vs_blit_property =
2915 shader->selector->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
2916
2917 memset(&ctx->args, 0, sizeof(ctx->args));
2918
2919 /* Set MERGED shaders. */
2920 if (ctx->screen->info.chip_class >= GFX9) {
2921 if (shader->key.as_ls || type == PIPE_SHADER_TESS_CTRL)
2922 type = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
2923 else if (shader->key.as_es || shader->key.as_ngg || type == PIPE_SHADER_GEOMETRY)
2924 type = SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY;
2925 }
2926
2927 switch (type) {
2928 case PIPE_SHADER_VERTEX:
2929 declare_global_desc_pointers(ctx);
2930
2931 if (vs_blit_property) {
2932 declare_vs_blit_inputs(ctx, vs_blit_property);
2933
2934 /* VGPRs */
2935 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
2936 break;
2937 }
2938
2939 declare_per_stage_desc_pointers(ctx, true);
2940 declare_vs_specific_input_sgprs(ctx);
2941 if (!shader->is_gs_copy_shader)
2942 declare_vb_descriptor_input_sgprs(ctx);
2943
2944 if (shader->key.as_es) {
2945 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
2946 &ctx->es2gs_offset);
2947 } else if (shader->key.as_ls) {
2948 /* no extra parameters */
2949 } else {
2950 /* The locations of the other parameters are assigned dynamically. */
2951 declare_streamout_params(ctx, &shader->selector->so);
2952 }
2953
2954 /* VGPRs */
2955 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
2956
2957 /* Return values */
2958 if (shader->key.opt.vs_as_prim_discard_cs) {
2959 for (i = 0; i < 4; i++)
2960 returns[num_returns++] = ctx->f32; /* VGPRs */
2961 }
2962 break;
2963
2964 case PIPE_SHADER_TESS_CTRL: /* GFX6-GFX8 */
2965 declare_global_desc_pointers(ctx);
2966 declare_per_stage_desc_pointers(ctx, true);
2967 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
2968 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
2969 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
2970 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
2971 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
2972 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
2973
2974 /* VGPRs */
2975 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
2976 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
2977
2978 /* param_tcs_offchip_offset and param_tcs_factor_offset are
2979 * placed after the user SGPRs.
2980 */
2981 for (i = 0; i < GFX6_TCS_NUM_USER_SGPR + 2; i++)
2982 returns[num_returns++] = ctx->i32; /* SGPRs */
2983 for (i = 0; i < 11; i++)
2984 returns[num_returns++] = ctx->f32; /* VGPRs */
2985 break;
2986
2987 case SI_SHADER_MERGED_VERTEX_TESSCTRL:
2988 /* Merged stages have 8 system SGPRs at the beginning. */
2989 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_HS */
2990 declare_per_stage_desc_pointers(ctx,
2991 ctx->type == PIPE_SHADER_TESS_CTRL);
2992 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
2993 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
2994 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_factor_offset);
2995 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
2996 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
2997 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
2998
2999 declare_global_desc_pointers(ctx);
3000 declare_per_stage_desc_pointers(ctx,
3001 ctx->type == PIPE_SHADER_VERTEX);
3002 declare_vs_specific_input_sgprs(ctx);
3003
3004 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3005 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_offsets);
3006 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_out_lds_layout);
3007 declare_vb_descriptor_input_sgprs(ctx);
3008
3009 /* VGPRs (first TCS, then VS) */
3010 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_patch_id);
3011 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.tcs_rel_ids);
3012
3013 if (ctx->type == PIPE_SHADER_VERTEX) {
3014 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3015
3016 /* LS return values are inputs to the TCS main shader part. */
3017 for (i = 0; i < 8 + GFX9_TCS_NUM_USER_SGPR; i++)
3018 returns[num_returns++] = ctx->i32; /* SGPRs */
3019 for (i = 0; i < 2; i++)
3020 returns[num_returns++] = ctx->f32; /* VGPRs */
3021 } else {
3022 /* TCS return values are inputs to the TCS epilog.
3023 *
3024 * param_tcs_offchip_offset, param_tcs_factor_offset,
3025 * param_tcs_offchip_layout, and param_rw_buffers
3026 * should be passed to the epilog.
3027 */
3028 for (i = 0; i <= 8 + GFX9_SGPR_TCS_OUT_LAYOUT; i++)
3029 returns[num_returns++] = ctx->i32; /* SGPRs */
3030 for (i = 0; i < 11; i++)
3031 returns[num_returns++] = ctx->f32; /* VGPRs */
3032 }
3033 break;
3034
3035 case SI_SHADER_MERGED_VERTEX_OR_TESSEVAL_GEOMETRY:
3036 /* Merged stages have 8 system SGPRs at the beginning. */
3037 /* SPI_SHADER_USER_DATA_ADDR_LO/HI_GS */
3038 declare_per_stage_desc_pointers(ctx,
3039 ctx->type == PIPE_SHADER_GEOMETRY);
3040
3041 if (ctx->shader->key.as_ngg)
3042 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_tg_info);
3043 else
3044 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3045
3046 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_wave_info);
3047 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3048 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->merged_scratch_offset);
3049 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS << 8) */
3050 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused (SPI_SHADER_PGM_LO/HI_GS >> 24) */
3051
3052 declare_global_desc_pointers(ctx);
3053 if (ctx->type != PIPE_SHADER_VERTEX || !vs_blit_property) {
3054 declare_per_stage_desc_pointers(ctx,
3055 (ctx->type == PIPE_SHADER_VERTEX ||
3056 ctx->type == PIPE_SHADER_TESS_EVAL));
3057 }
3058
3059 if (ctx->type == PIPE_SHADER_VERTEX) {
3060 if (vs_blit_property)
3061 declare_vs_blit_inputs(ctx, vs_blit_property);
3062 else
3063 declare_vs_specific_input_sgprs(ctx);
3064 } else {
3065 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3066 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3067 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3068 /* Declare as many input SGPRs as the VS has. */
3069 }
3070
3071 if (ctx->type == PIPE_SHADER_VERTEX)
3072 declare_vb_descriptor_input_sgprs(ctx);
3073
3074 /* VGPRs (first GS, then VS/TES) */
3075 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx01_offset);
3076 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx23_offset);
3077 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3078 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3079 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx45_offset);
3080
3081 if (ctx->type == PIPE_SHADER_VERTEX) {
3082 declare_vs_input_vgprs(ctx, &num_prolog_vgprs);
3083 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3084 declare_tes_input_vgprs(ctx);
3085 }
3086
3087 if (ctx->shader->key.as_es &&
3088 (ctx->type == PIPE_SHADER_VERTEX ||
3089 ctx->type == PIPE_SHADER_TESS_EVAL)) {
3090 unsigned num_user_sgprs;
3091
3092 if (ctx->type == PIPE_SHADER_VERTEX)
3093 num_user_sgprs = GFX9_VSGS_NUM_USER_SGPR;
3094 else
3095 num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
3096
3097 /* ES return values are inputs to GS. */
3098 for (i = 0; i < 8 + num_user_sgprs; i++)
3099 returns[num_returns++] = ctx->i32; /* SGPRs */
3100 for (i = 0; i < 5; i++)
3101 returns[num_returns++] = ctx->f32; /* VGPRs */
3102 }
3103 break;
3104
3105 case PIPE_SHADER_TESS_EVAL:
3106 declare_global_desc_pointers(ctx);
3107 declare_per_stage_desc_pointers(ctx, true);
3108 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
3109 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_layout);
3110 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tes_offchip_addr);
3111
3112 if (shader->key.as_es) {
3113 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3114 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
3115 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->es2gs_offset);
3116 } else {
3117 declare_streamout_params(ctx, &shader->selector->so);
3118 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->tcs_offchip_offset);
3119 }
3120
3121 /* VGPRs */
3122 declare_tes_input_vgprs(ctx);
3123 break;
3124
3125 case PIPE_SHADER_GEOMETRY:
3126 declare_global_desc_pointers(ctx);
3127 declare_per_stage_desc_pointers(ctx, true);
3128 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs2vs_offset);
3129 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_wave_id);
3130
3131 /* VGPRs */
3132 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[0]);
3133 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[1]);
3134 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_prim_id);
3135 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[2]);
3136 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[3]);
3137 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[4]);
3138 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->gs_vtx_offset[5]);
3139 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.gs_invocation_id);
3140 break;
3141
3142 case PIPE_SHADER_FRAGMENT:
3143 declare_global_desc_pointers(ctx);
3144 declare_per_stage_desc_pointers(ctx, true);
3145 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL,
3146 SI_PARAM_ALPHA_REF);
3147 si_add_arg_checked(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3148 &ctx->args.prim_mask, SI_PARAM_PRIM_MASK);
3149
3150 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT, &ctx->args.persp_sample,
3151 SI_PARAM_PERSP_SAMPLE);
3152 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3153 &ctx->args.persp_center, SI_PARAM_PERSP_CENTER);
3154 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3155 &ctx->args.persp_centroid, SI_PARAM_PERSP_CENTROID);
3156 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3157 NULL, SI_PARAM_PERSP_PULL_MODEL);
3158 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3159 &ctx->args.linear_sample, SI_PARAM_LINEAR_SAMPLE);
3160 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3161 &ctx->args.linear_center, SI_PARAM_LINEAR_CENTER);
3162 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 2, AC_ARG_INT,
3163 &ctx->args.linear_centroid, SI_PARAM_LINEAR_CENTROID);
3164 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_FLOAT,
3165 NULL, SI_PARAM_LINE_STIPPLE_TEX);
3166 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3167 &ctx->args.frag_pos[0], SI_PARAM_POS_X_FLOAT);
3168 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3169 &ctx->args.frag_pos[1], SI_PARAM_POS_Y_FLOAT);
3170 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3171 &ctx->args.frag_pos[2], SI_PARAM_POS_Z_FLOAT);
3172 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3173 &ctx->args.frag_pos[3], SI_PARAM_POS_W_FLOAT);
3174 shader->info.face_vgpr_index = ctx->args.num_vgprs_used;
3175 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3176 &ctx->args.front_face, SI_PARAM_FRONT_FACE);
3177 shader->info.ancillary_vgpr_index = ctx->args.num_vgprs_used;
3178 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3179 &ctx->args.ancillary, SI_PARAM_ANCILLARY);
3180 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT,
3181 &ctx->args.sample_coverage, SI_PARAM_SAMPLE_COVERAGE);
3182 si_add_arg_checked(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
3183 &ctx->pos_fixed_pt, SI_PARAM_POS_FIXED_PT);
3184
3185 /* Color inputs from the prolog. */
3186 if (shader->selector->info.colors_read) {
3187 unsigned num_color_elements =
3188 util_bitcount(shader->selector->info.colors_read);
3189
3190 for (i = 0; i < num_color_elements; i++)
3191 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_FLOAT, NULL);
3192
3193 num_prolog_vgprs += num_color_elements;
3194 }
3195
3196 /* Outputs for the epilog. */
3197 num_return_sgprs = SI_SGPR_ALPHA_REF + 1;
3198 num_returns =
3199 num_return_sgprs +
3200 util_bitcount(shader->selector->info.colors_written) * 4 +
3201 shader->selector->info.writes_z +
3202 shader->selector->info.writes_stencil +
3203 shader->selector->info.writes_samplemask +
3204 1 /* SampleMaskIn */;
3205
3206 num_returns = MAX2(num_returns,
3207 num_return_sgprs +
3208 PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
3209
3210 for (i = 0; i < num_return_sgprs; i++)
3211 returns[i] = ctx->i32;
3212 for (; i < num_returns; i++)
3213 returns[i] = ctx->f32;
3214 break;
3215
3216 case PIPE_SHADER_COMPUTE:
3217 declare_global_desc_pointers(ctx);
3218 declare_per_stage_desc_pointers(ctx, true);
3219 if (shader->selector->info.uses_grid_size)
3220 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT,
3221 &ctx->args.num_work_groups);
3222 if (shader->selector->info.uses_block_size &&
3223 shader->selector->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0)
3224 ac_add_arg(&ctx->args, AC_ARG_SGPR, 3, AC_ARG_INT, &ctx->block_size);
3225
3226 unsigned cs_user_data_dwords =
3227 shader->selector->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
3228 if (cs_user_data_dwords) {
3229 ac_add_arg(&ctx->args, AC_ARG_SGPR, cs_user_data_dwords, AC_ARG_INT,
3230 &ctx->cs_user_data);
3231 }
3232
3233 /* Hardware SGPRs. */
3234 for (i = 0; i < 3; i++) {
3235 if (shader->selector->info.uses_block_id[i]) {
3236 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
3237 &ctx->args.workgroup_ids[i]);
3238 }
3239 }
3240 if (shader->selector->info.uses_subgroup_info)
3241 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tg_size);
3242
3243 /* Hardware VGPRs. */
3244 ac_add_arg(&ctx->args, AC_ARG_VGPR, 3, AC_ARG_INT,
3245 &ctx->args.local_invocation_ids);
3246 break;
3247 default:
3248 assert(0 && "unimplemented shader");
3249 return;
3250 }
3251
3252 si_llvm_create_func(ctx, "main", returns, num_returns,
3253 si_get_max_workgroup_size(shader));
3254
3255 /* Reserve register locations for VGPR inputs the PS prolog may need. */
3256 if (ctx->type == PIPE_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
3257 ac_llvm_add_target_dep_function_attr(ctx->main_fn,
3258 "InitialPSInputAddr",
3259 S_0286D0_PERSP_SAMPLE_ENA(1) |
3260 S_0286D0_PERSP_CENTER_ENA(1) |
3261 S_0286D0_PERSP_CENTROID_ENA(1) |
3262 S_0286D0_LINEAR_SAMPLE_ENA(1) |
3263 S_0286D0_LINEAR_CENTER_ENA(1) |
3264 S_0286D0_LINEAR_CENTROID_ENA(1) |
3265 S_0286D0_FRONT_FACE_ENA(1) |
3266 S_0286D0_ANCILLARY_ENA(1) |
3267 S_0286D0_POS_FIXED_PT_ENA(1));
3268 }
3269
3270 shader->info.num_input_sgprs = ctx->args.num_sgprs_used;
3271 shader->info.num_input_vgprs = ctx->args.num_vgprs_used;
3272
3273 assert(shader->info.num_input_vgprs >= num_prolog_vgprs);
3274 shader->info.num_input_vgprs -= num_prolog_vgprs;
3275
3276 if (shader->key.as_ls || ctx->type == PIPE_SHADER_TESS_CTRL) {
3277 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3278 /* The LSHS size is not known until draw time, so we append it
3279 * at the end of whatever LDS use there may be in the rest of
3280 * the shader (currently none, unless LLVM decides to do its
3281 * own LDS-based lowering).
3282 */
3283 ctx->ac.lds = LLVMAddGlobalInAddressSpace(
3284 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3285 "__lds_end", AC_ADDR_SPACE_LDS);
3286 LLVMSetAlignment(ctx->ac.lds, 256);
3287 } else {
3288 ac_declare_lds_as_pointer(&ctx->ac);
3289 }
3290 }
3291
3292 /* Unlike radv, we override these arguments in the prolog, so to the
3293 * API shader they appear as normal arguments.
3294 */
3295 if (ctx->type == PIPE_SHADER_VERTEX) {
3296 ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
3297 ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
3298 } else if (ctx->type == PIPE_SHADER_FRAGMENT) {
3299 ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
3300 ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
3301 }
3302 }
3303
3304 /* Ensure that the esgs ring is declared.
3305 *
3306 * We declare it with 64KB alignment as a hint that the
3307 * pointer value will always be 0.
3308 */
3309 static void declare_esgs_ring(struct si_shader_context *ctx)
3310 {
3311 if (ctx->esgs_ring)
3312 return;
3313
3314 assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
3315
3316 ctx->esgs_ring = LLVMAddGlobalInAddressSpace(
3317 ctx->ac.module, LLVMArrayType(ctx->i32, 0),
3318 "esgs_ring",
3319 AC_ADDR_SPACE_LDS);
3320 LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
3321 LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
3322 }
3323
3324 /**
3325 * Load ESGS and GSVS ring buffer resource descriptors and save the variables
3326 * for later use.
3327 */
3328 static void preload_ring_buffers(struct si_shader_context *ctx)
3329 {
3330 LLVMBuilderRef builder = ctx->ac.builder;
3331
3332 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
3333
3334 if (ctx->shader->key.as_es || ctx->type == PIPE_SHADER_GEOMETRY) {
3335 if (ctx->screen->info.chip_class <= GFX8) {
3336 unsigned ring =
3337 ctx->type == PIPE_SHADER_GEOMETRY ? SI_GS_RING_ESGS
3338 : SI_ES_RING_ESGS;
3339 LLVMValueRef offset = LLVMConstInt(ctx->i32, ring, 0);
3340
3341 ctx->esgs_ring =
3342 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3343 } else {
3344 if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
3345 /* Declare the ESGS ring as an explicit LDS symbol. */
3346 declare_esgs_ring(ctx);
3347 } else {
3348 ac_declare_lds_as_pointer(&ctx->ac);
3349 ctx->esgs_ring = ctx->ac.lds;
3350 }
3351 }
3352 }
3353
3354 if (ctx->shader->is_gs_copy_shader) {
3355 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3356
3357 ctx->gsvs_ring[0] =
3358 ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3359 } else if (ctx->type == PIPE_SHADER_GEOMETRY) {
3360 const struct si_shader_selector *sel = ctx->shader->selector;
3361 LLVMValueRef offset = LLVMConstInt(ctx->i32, SI_RING_GSVS, 0);
3362 LLVMValueRef base_ring;
3363
3364 base_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
3365
3366 /* The conceptual layout of the GSVS ring is
3367 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3368 * but the real memory layout is swizzled across
3369 * threads:
3370 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3371 * t16v0c0 ..
3372 * Override the buffer descriptor accordingly.
3373 */
3374 LLVMTypeRef v2i64 = LLVMVectorType(ctx->i64, 2);
3375 uint64_t stream_offset = 0;
3376
3377 for (unsigned stream = 0; stream < 4; ++stream) {
3378 unsigned num_components;
3379 unsigned stride;
3380 unsigned num_records;
3381 LLVMValueRef ring, tmp;
3382
3383 num_components = sel->info.num_stream_output_components[stream];
3384 if (!num_components)
3385 continue;
3386
3387 stride = 4 * num_components * sel->gs_max_out_vertices;
3388
3389 /* Limit on the stride field for <= GFX7. */
3390 assert(stride < (1 << 14));
3391
3392 num_records = ctx->ac.wave_size;
3393
3394 ring = LLVMBuildBitCast(builder, base_ring, v2i64, "");
3395 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_0, "");
3396 tmp = LLVMBuildAdd(builder, tmp,
3397 LLVMConstInt(ctx->i64,
3398 stream_offset, 0), "");
3399 stream_offset += stride * ctx->ac.wave_size;
3400
3401 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_0, "");
3402 ring = LLVMBuildBitCast(builder, ring, ctx->v4i32, "");
3403 tmp = LLVMBuildExtractElement(builder, ring, ctx->i32_1, "");
3404 tmp = LLVMBuildOr(builder, tmp,
3405 LLVMConstInt(ctx->i32,
3406 S_008F04_STRIDE(stride) |
3407 S_008F04_SWIZZLE_ENABLE(1), 0), "");
3408 ring = LLVMBuildInsertElement(builder, ring, tmp, ctx->i32_1, "");
3409 ring = LLVMBuildInsertElement(builder, ring,
3410 LLVMConstInt(ctx->i32, num_records, 0),
3411 LLVMConstInt(ctx->i32, 2, 0), "");
3412
3413 uint32_t rsrc3 =
3414 S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
3415 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3416 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
3417 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
3418 S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
3419 S_008F0C_ADD_TID_ENABLE(1);
3420
3421 if (ctx->ac.chip_class >= GFX10) {
3422 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
3423 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
3424 S_008F0C_RESOURCE_LEVEL(1);
3425 } else {
3426 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3427 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
3428 S_008F0C_ELEMENT_SIZE(1); /* element_size = 4 (bytes) */
3429 }
3430
3431 ring = LLVMBuildInsertElement(builder, ring,
3432 LLVMConstInt(ctx->i32, rsrc3, false),
3433 LLVMConstInt(ctx->i32, 3, 0), "");
3434
3435 ctx->gsvs_ring[stream] = ring;
3436 }
3437 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
3438 ctx->tess_offchip_ring = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TES);
3439 }
3440 }
3441
3442 /* For the UMR disassembler. */
3443 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
3444 #define DEBUGGER_NUM_MARKERS 5
3445
3446 static bool si_shader_binary_open(struct si_screen *screen,
3447 struct si_shader *shader,
3448 struct ac_rtld_binary *rtld)
3449 {
3450 const struct si_shader_selector *sel = shader->selector;
3451 const char *part_elfs[5];
3452 size_t part_sizes[5];
3453 unsigned num_parts = 0;
3454
3455 #define add_part(shader_or_part) \
3456 if (shader_or_part) { \
3457 part_elfs[num_parts] = (shader_or_part)->binary.elf_buffer; \
3458 part_sizes[num_parts] = (shader_or_part)->binary.elf_size; \
3459 num_parts++; \
3460 }
3461
3462 add_part(shader->prolog);
3463 add_part(shader->previous_stage);
3464 add_part(shader->prolog2);
3465 add_part(shader);
3466 add_part(shader->epilog);
3467
3468 #undef add_part
3469
3470 struct ac_rtld_symbol lds_symbols[2];
3471 unsigned num_lds_symbols = 0;
3472
3473 if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
3474 (sel->type == PIPE_SHADER_GEOMETRY || shader->key.as_ngg)) {
3475 /* We add this symbol even on LLVM <= 8 to ensure that
3476 * shader->config.lds_size is set correctly below.
3477 */
3478 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
3479 sym->name = "esgs_ring";
3480 sym->size = shader->gs_info.esgs_ring_size;
3481 sym->align = 64 * 1024;
3482 }
3483
3484 if (shader->key.as_ngg && sel->type == PIPE_SHADER_GEOMETRY) {
3485 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
3486 sym->name = "ngg_emit";
3487 sym->size = shader->ngg.ngg_emit_size * 4;
3488 sym->align = 4;
3489 }
3490
3491 bool ok = ac_rtld_open(rtld, (struct ac_rtld_open_info){
3492 .info = &screen->info,
3493 .options = {
3494 .halt_at_entry = screen->options.halt_shaders,
3495 },
3496 .shader_type = tgsi_processor_to_shader_stage(sel->type),
3497 .wave_size = si_get_shader_wave_size(shader),
3498 .num_parts = num_parts,
3499 .elf_ptrs = part_elfs,
3500 .elf_sizes = part_sizes,
3501 .num_shared_lds_symbols = num_lds_symbols,
3502 .shared_lds_symbols = lds_symbols });
3503
3504 if (rtld->lds_size > 0) {
3505 unsigned alloc_granularity = screen->info.chip_class >= GFX7 ? 512 : 256;
3506 shader->config.lds_size =
3507 align(rtld->lds_size, alloc_granularity) / alloc_granularity;
3508 }
3509
3510 return ok;
3511 }
3512
3513 static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_shader *shader)
3514 {
3515 struct ac_rtld_binary rtld;
3516 si_shader_binary_open(screen, shader, &rtld);
3517 return rtld.exec_size;
3518 }
3519
3520 static bool si_get_external_symbol(void *data, const char *name, uint64_t *value)
3521 {
3522 uint64_t *scratch_va = data;
3523
3524 if (!strcmp(scratch_rsrc_dword0_symbol, name)) {
3525 *value = (uint32_t)*scratch_va;
3526 return true;
3527 }
3528 if (!strcmp(scratch_rsrc_dword1_symbol, name)) {
3529 /* Enable scratch coalescing. */
3530 *value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32) |
3531 S_008F04_SWIZZLE_ENABLE(1);
3532 return true;
3533 }
3534
3535 return false;
3536 }
3537
3538 bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader,
3539 uint64_t scratch_va)
3540 {
3541 struct ac_rtld_binary binary;
3542 if (!si_shader_binary_open(sscreen, shader, &binary))
3543 return false;
3544
3545 si_resource_reference(&shader->bo, NULL);
3546 shader->bo = si_aligned_buffer_create(&sscreen->b,
3547 sscreen->info.cpdma_prefetch_writes_memory ?
3548 0 : SI_RESOURCE_FLAG_READ_ONLY,
3549 PIPE_USAGE_IMMUTABLE,
3550 align(binary.rx_size, SI_CPDMA_ALIGNMENT),
3551 256);
3552 if (!shader->bo)
3553 return false;
3554
3555 /* Upload. */
3556 struct ac_rtld_upload_info u = {};
3557 u.binary = &binary;
3558 u.get_external_symbol = si_get_external_symbol;
3559 u.cb_data = &scratch_va;
3560 u.rx_va = shader->bo->gpu_address;
3561 u.rx_ptr = sscreen->ws->buffer_map(shader->bo->buf, NULL,
3562 PIPE_TRANSFER_READ_WRITE |
3563 PIPE_TRANSFER_UNSYNCHRONIZED |
3564 RADEON_TRANSFER_TEMPORARY);
3565 if (!u.rx_ptr)
3566 return false;
3567
3568 bool ok = ac_rtld_upload(&u);
3569
3570 sscreen->ws->buffer_unmap(shader->bo->buf);
3571 ac_rtld_close(&binary);
3572
3573 return ok;
3574 }
3575
3576 static void si_shader_dump_disassembly(struct si_screen *screen,
3577 const struct si_shader_binary *binary,
3578 enum pipe_shader_type shader_type,
3579 unsigned wave_size,
3580 struct pipe_debug_callback *debug,
3581 const char *name, FILE *file)
3582 {
3583 struct ac_rtld_binary rtld_binary;
3584
3585 if (!ac_rtld_open(&rtld_binary, (struct ac_rtld_open_info){
3586 .info = &screen->info,
3587 .shader_type = tgsi_processor_to_shader_stage(shader_type),
3588 .wave_size = wave_size,
3589 .num_parts = 1,
3590 .elf_ptrs = &binary->elf_buffer,
3591 .elf_sizes = &binary->elf_size }))
3592 return;
3593
3594 const char *disasm;
3595 size_t nbytes;
3596
3597 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm, &nbytes))
3598 goto out;
3599
3600 if (nbytes > INT_MAX)
3601 goto out;
3602
3603 if (debug && debug->debug_message) {
3604 /* Very long debug messages are cut off, so send the
3605 * disassembly one line at a time. This causes more
3606 * overhead, but on the plus side it simplifies
3607 * parsing of resulting logs.
3608 */
3609 pipe_debug_message(debug, SHADER_INFO,
3610 "Shader Disassembly Begin");
3611
3612 uint64_t line = 0;
3613 while (line < nbytes) {
3614 int count = nbytes - line;
3615 const char *nl = memchr(disasm + line, '\n', nbytes - line);
3616 if (nl)
3617 count = nl - (disasm + line);
3618
3619 if (count) {
3620 pipe_debug_message(debug, SHADER_INFO,
3621 "%.*s", count, disasm + line);
3622 }
3623
3624 line += count + 1;
3625 }
3626
3627 pipe_debug_message(debug, SHADER_INFO,
3628 "Shader Disassembly End");
3629 }
3630
3631 if (file) {
3632 fprintf(file, "Shader %s disassembly:\n", name);
3633 fprintf(file, "%*s", (int)nbytes, disasm);
3634 }
3635
3636 out:
3637 ac_rtld_close(&rtld_binary);
3638 }
3639
3640 static void si_calculate_max_simd_waves(struct si_shader *shader)
3641 {
3642 struct si_screen *sscreen = shader->selector->screen;
3643 struct ac_shader_config *conf = &shader->config;
3644 unsigned num_inputs = shader->selector->info.num_inputs;
3645 unsigned lds_increment = sscreen->info.chip_class >= GFX7 ? 512 : 256;
3646 unsigned lds_per_wave = 0;
3647 unsigned max_simd_waves;
3648
3649 max_simd_waves = sscreen->info.max_wave64_per_simd;
3650
3651 /* Compute LDS usage for PS. */
3652 switch (shader->selector->type) {
3653 case PIPE_SHADER_FRAGMENT:
3654 /* The minimum usage per wave is (num_inputs * 48). The maximum
3655 * usage is (num_inputs * 48 * 16).
3656 * We can get anything in between and it varies between waves.
3657 *
3658 * The 48 bytes per input for a single primitive is equal to
3659 * 4 bytes/component * 4 components/input * 3 points.
3660 *
3661 * Other stages don't know the size at compile time or don't
3662 * allocate LDS per wave, but instead they do it per thread group.
3663 */
3664 lds_per_wave = conf->lds_size * lds_increment +
3665 align(num_inputs * 48, lds_increment);
3666 break;
3667 case PIPE_SHADER_COMPUTE:
3668 if (shader->selector) {
3669 unsigned max_workgroup_size =
3670 si_get_max_workgroup_size(shader);
3671 lds_per_wave = (conf->lds_size * lds_increment) /
3672 DIV_ROUND_UP(max_workgroup_size,
3673 sscreen->compute_wave_size);
3674 }
3675 break;
3676 default:;
3677 }
3678
3679 /* Compute the per-SIMD wave counts. */
3680 if (conf->num_sgprs) {
3681 max_simd_waves =
3682 MIN2(max_simd_waves,
3683 sscreen->info.num_physical_sgprs_per_simd / conf->num_sgprs);
3684 }
3685
3686 if (conf->num_vgprs) {
3687 /* Always print wave limits as Wave64, so that we can compare
3688 * Wave32 and Wave64 with shader-db fairly. */
3689 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd;
3690 max_simd_waves = MIN2(max_simd_waves, max_vgprs / conf->num_vgprs);
3691 }
3692
3693 /* LDS is 64KB per CU (4 SIMDs) on GFX6-9, which is 16KB per SIMD (usage above
3694 * 16KB makes some SIMDs unoccupied).
3695 *
3696 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
3697 */
3698 unsigned max_lds_size = sscreen->info.chip_class >= GFX10 ? 128*1024 : 64*1024;
3699 unsigned max_lds_per_simd = max_lds_size / 4;
3700 if (lds_per_wave)
3701 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
3702
3703 shader->info.max_simd_waves = max_simd_waves;
3704 }
3705
3706 void si_shader_dump_stats_for_shader_db(struct si_screen *screen,
3707 struct si_shader *shader,
3708 struct pipe_debug_callback *debug)
3709 {
3710 const struct ac_shader_config *conf = &shader->config;
3711
3712 if (screen->options.debug_disassembly)
3713 si_shader_dump_disassembly(screen, &shader->binary,
3714 shader->selector->type,
3715 si_get_shader_wave_size(shader),
3716 debug, "main", NULL);
3717
3718 pipe_debug_message(debug, SHADER_INFO,
3719 "Shader Stats: SGPRS: %d VGPRS: %d Code Size: %d "
3720 "LDS: %d Scratch: %d Max Waves: %d Spilled SGPRs: %d "
3721 "Spilled VGPRs: %d PrivMem VGPRs: %d",
3722 conf->num_sgprs, conf->num_vgprs,
3723 si_get_shader_binary_size(screen, shader),
3724 conf->lds_size, conf->scratch_bytes_per_wave,
3725 shader->info.max_simd_waves, conf->spilled_sgprs,
3726 conf->spilled_vgprs, shader->info.private_mem_vgprs);
3727 }
3728
3729 static void si_shader_dump_stats(struct si_screen *sscreen,
3730 struct si_shader *shader,
3731 FILE *file,
3732 bool check_debug_option)
3733 {
3734 const struct ac_shader_config *conf = &shader->config;
3735
3736 if (!check_debug_option ||
3737 si_can_dump_shader(sscreen, shader->selector->type)) {
3738 if (shader->selector->type == PIPE_SHADER_FRAGMENT) {
3739 fprintf(file, "*** SHADER CONFIG ***\n"
3740 "SPI_PS_INPUT_ADDR = 0x%04x\n"
3741 "SPI_PS_INPUT_ENA = 0x%04x\n",
3742 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
3743 }
3744
3745 fprintf(file, "*** SHADER STATS ***\n"
3746 "SGPRS: %d\n"
3747 "VGPRS: %d\n"
3748 "Spilled SGPRs: %d\n"
3749 "Spilled VGPRs: %d\n"
3750 "Private memory VGPRs: %d\n"
3751 "Code Size: %d bytes\n"
3752 "LDS: %d blocks\n"
3753 "Scratch: %d bytes per wave\n"
3754 "Max Waves: %d\n"
3755 "********************\n\n\n",
3756 conf->num_sgprs, conf->num_vgprs,
3757 conf->spilled_sgprs, conf->spilled_vgprs,
3758 shader->info.private_mem_vgprs,
3759 si_get_shader_binary_size(sscreen, shader),
3760 conf->lds_size, conf->scratch_bytes_per_wave,
3761 shader->info.max_simd_waves);
3762 }
3763 }
3764
3765 const char *si_get_shader_name(const struct si_shader *shader)
3766 {
3767 switch (shader->selector->type) {
3768 case PIPE_SHADER_VERTEX:
3769 if (shader->key.as_es)
3770 return "Vertex Shader as ES";
3771 else if (shader->key.as_ls)
3772 return "Vertex Shader as LS";
3773 else if (shader->key.opt.vs_as_prim_discard_cs)
3774 return "Vertex Shader as Primitive Discard CS";
3775 else if (shader->key.as_ngg)
3776 return "Vertex Shader as ESGS";
3777 else
3778 return "Vertex Shader as VS";
3779 case PIPE_SHADER_TESS_CTRL:
3780 return "Tessellation Control Shader";
3781 case PIPE_SHADER_TESS_EVAL:
3782 if (shader->key.as_es)
3783 return "Tessellation Evaluation Shader as ES";
3784 else if (shader->key.as_ngg)
3785 return "Tessellation Evaluation Shader as ESGS";
3786 else
3787 return "Tessellation Evaluation Shader as VS";
3788 case PIPE_SHADER_GEOMETRY:
3789 if (shader->is_gs_copy_shader)
3790 return "GS Copy Shader as VS";
3791 else
3792 return "Geometry Shader";
3793 case PIPE_SHADER_FRAGMENT:
3794 return "Pixel Shader";
3795 case PIPE_SHADER_COMPUTE:
3796 return "Compute Shader";
3797 default:
3798 return "Unknown Shader";
3799 }
3800 }
3801
3802 void si_shader_dump(struct si_screen *sscreen, struct si_shader *shader,
3803 struct pipe_debug_callback *debug,
3804 FILE *file, bool check_debug_option)
3805 {
3806 enum pipe_shader_type shader_type = shader->selector->type;
3807
3808 if (!check_debug_option ||
3809 si_can_dump_shader(sscreen, shader_type))
3810 si_dump_shader_key(shader, file);
3811
3812 if (!check_debug_option && shader->binary.llvm_ir_string) {
3813 if (shader->previous_stage &&
3814 shader->previous_stage->binary.llvm_ir_string) {
3815 fprintf(file, "\n%s - previous stage - LLVM IR:\n\n",
3816 si_get_shader_name(shader));
3817 fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
3818 }
3819
3820 fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
3821 si_get_shader_name(shader));
3822 fprintf(file, "%s\n", shader->binary.llvm_ir_string);
3823 }
3824
3825 if (!check_debug_option ||
3826 (si_can_dump_shader(sscreen, shader_type) &&
3827 !(sscreen->debug_flags & DBG(NO_ASM)))) {
3828 unsigned wave_size = si_get_shader_wave_size(shader);
3829
3830 fprintf(file, "\n%s:\n", si_get_shader_name(shader));
3831
3832 if (shader->prolog)
3833 si_shader_dump_disassembly(sscreen, &shader->prolog->binary,
3834 shader_type, wave_size, debug, "prolog", file);
3835 if (shader->previous_stage)
3836 si_shader_dump_disassembly(sscreen, &shader->previous_stage->binary,
3837 shader_type, wave_size, debug, "previous stage", file);
3838 if (shader->prolog2)
3839 si_shader_dump_disassembly(sscreen, &shader->prolog2->binary,
3840 shader_type, wave_size, debug, "prolog2", file);
3841
3842 si_shader_dump_disassembly(sscreen, &shader->binary, shader_type,
3843 wave_size, debug, "main", file);
3844
3845 if (shader->epilog)
3846 si_shader_dump_disassembly(sscreen, &shader->epilog->binary,
3847 shader_type, wave_size, debug, "epilog", file);
3848 fprintf(file, "\n");
3849 }
3850
3851 si_shader_dump_stats(sscreen, shader, file, check_debug_option);
3852 }
3853
3854 static int si_compile_llvm(struct si_screen *sscreen,
3855 struct si_shader_binary *binary,
3856 struct ac_shader_config *conf,
3857 struct ac_llvm_compiler *compiler,
3858 LLVMModuleRef mod,
3859 struct pipe_debug_callback *debug,
3860 enum pipe_shader_type shader_type,
3861 unsigned wave_size,
3862 const char *name,
3863 bool less_optimized)
3864 {
3865 unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
3866
3867 if (si_can_dump_shader(sscreen, shader_type)) {
3868 fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
3869
3870 if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
3871 fprintf(stderr, "%s LLVM IR:\n\n", name);
3872 ac_dump_module(mod);
3873 fprintf(stderr, "\n");
3874 }
3875 }
3876
3877 if (sscreen->record_llvm_ir) {
3878 char *ir = LLVMPrintModuleToString(mod);
3879 binary->llvm_ir_string = strdup(ir);
3880 LLVMDisposeMessage(ir);
3881 }
3882
3883 if (!si_replace_shader(count, binary)) {
3884 unsigned r = si_llvm_compile(mod, binary, compiler, debug,
3885 less_optimized, wave_size);
3886 if (r)
3887 return r;
3888 }
3889
3890 struct ac_rtld_binary rtld;
3891 if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
3892 .info = &sscreen->info,
3893 .shader_type = tgsi_processor_to_shader_stage(shader_type),
3894 .wave_size = wave_size,
3895 .num_parts = 1,
3896 .elf_ptrs = &binary->elf_buffer,
3897 .elf_sizes = &binary->elf_size }))
3898 return -1;
3899
3900 bool ok = ac_rtld_read_config(&rtld, conf);
3901 ac_rtld_close(&rtld);
3902 if (!ok)
3903 return -1;
3904
3905 /* Enable 64-bit and 16-bit denormals, because there is no performance
3906 * cost.
3907 *
3908 * If denormals are enabled, all floating-point output modifiers are
3909 * ignored.
3910 *
3911 * Don't enable denormals for 32-bit floats, because:
3912 * - Floating-point output modifiers would be ignored by the hw.
3913 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3914 * have to stop using those.
3915 * - GFX6 & GFX7 would be very slow.
3916 */
3917 conf->float_mode |= V_00B028_FP_64_DENORMS;
3918
3919 return 0;
3920 }
3921
3922 /* Generate code for the hardware VS shader stage to go with a geometry shader */
3923 struct si_shader *
3924 si_generate_gs_copy_shader(struct si_screen *sscreen,
3925 struct ac_llvm_compiler *compiler,
3926 struct si_shader_selector *gs_selector,
3927 struct pipe_debug_callback *debug)
3928 {
3929 struct si_shader_context ctx;
3930 struct si_shader *shader;
3931 LLVMBuilderRef builder;
3932 struct si_shader_output_values outputs[SI_MAX_VS_OUTPUTS];
3933 struct si_shader_info *gsinfo = &gs_selector->info;
3934 int i;
3935
3936
3937 shader = CALLOC_STRUCT(si_shader);
3938 if (!shader)
3939 return NULL;
3940
3941 /* We can leave the fence as permanently signaled because the GS copy
3942 * shader only becomes visible globally after it has been compiled. */
3943 util_queue_fence_init(&shader->ready);
3944
3945 shader->selector = gs_selector;
3946 shader->is_gs_copy_shader = true;
3947
3948 si_llvm_context_init(&ctx, sscreen, compiler,
3949 si_get_wave_size(sscreen, PIPE_SHADER_VERTEX, false, false));
3950 ctx.shader = shader;
3951 ctx.type = PIPE_SHADER_VERTEX;
3952
3953 builder = ctx.ac.builder;
3954
3955 create_function(&ctx);
3956 preload_ring_buffers(&ctx);
3957
3958 LLVMValueRef voffset =
3959 LLVMBuildMul(ctx.ac.builder, ctx.abi.vertex_id,
3960 LLVMConstInt(ctx.i32, 4, 0), "");
3961
3962 /* Fetch the vertex stream ID.*/
3963 LLVMValueRef stream_id;
3964
3965 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs)
3966 stream_id = si_unpack_param(&ctx, ctx.streamout_config, 24, 2);
3967 else
3968 stream_id = ctx.i32_0;
3969
3970 /* Fill in output information. */
3971 for (i = 0; i < gsinfo->num_outputs; ++i) {
3972 outputs[i].semantic_name = gsinfo->output_semantic_name[i];
3973 outputs[i].semantic_index = gsinfo->output_semantic_index[i];
3974
3975 for (int chan = 0; chan < 4; chan++) {
3976 outputs[i].vertex_stream[chan] =
3977 (gsinfo->output_streams[i] >> (2 * chan)) & 3;
3978 }
3979 }
3980
3981 LLVMBasicBlockRef end_bb;
3982 LLVMValueRef switch_inst;
3983
3984 end_bb = LLVMAppendBasicBlockInContext(ctx.ac.context, ctx.main_fn, "end");
3985 switch_inst = LLVMBuildSwitch(builder, stream_id, end_bb, 4);
3986
3987 for (int stream = 0; stream < 4; stream++) {
3988 LLVMBasicBlockRef bb;
3989 unsigned offset;
3990
3991 if (!gsinfo->num_stream_output_components[stream])
3992 continue;
3993
3994 if (stream > 0 && !gs_selector->so.num_outputs)
3995 continue;
3996
3997 bb = LLVMInsertBasicBlockInContext(ctx.ac.context, end_bb, "out");
3998 LLVMAddCase(switch_inst, LLVMConstInt(ctx.i32, stream, 0), bb);
3999 LLVMPositionBuilderAtEnd(builder, bb);
4000
4001 /* Fetch vertex data from GSVS ring */
4002 offset = 0;
4003 for (i = 0; i < gsinfo->num_outputs; ++i) {
4004 for (unsigned chan = 0; chan < 4; chan++) {
4005 if (!(gsinfo->output_usagemask[i] & (1 << chan)) ||
4006 outputs[i].vertex_stream[chan] != stream) {
4007 outputs[i].values[chan] = LLVMGetUndef(ctx.f32);
4008 continue;
4009 }
4010
4011 LLVMValueRef soffset = LLVMConstInt(ctx.i32,
4012 offset * gs_selector->gs_max_out_vertices * 16 * 4, 0);
4013 offset++;
4014
4015 outputs[i].values[chan] =
4016 ac_build_buffer_load(&ctx.ac,
4017 ctx.gsvs_ring[0], 1,
4018 ctx.i32_0, voffset,
4019 soffset, 0, ac_glc | ac_slc,
4020 true, false);
4021 }
4022 }
4023
4024 /* Streamout and exports. */
4025 if (!sscreen->use_ngg_streamout && gs_selector->so.num_outputs) {
4026 si_llvm_emit_streamout(&ctx, outputs,
4027 gsinfo->num_outputs,
4028 stream);
4029 }
4030
4031 if (stream == 0)
4032 si_llvm_export_vs(&ctx, outputs, gsinfo->num_outputs);
4033
4034 LLVMBuildBr(builder, end_bb);
4035 }
4036
4037 LLVMPositionBuilderAtEnd(builder, end_bb);
4038
4039 LLVMBuildRetVoid(ctx.ac.builder);
4040
4041 ctx.type = PIPE_SHADER_GEOMETRY; /* override for shader dumping */
4042 si_llvm_optimize_module(&ctx);
4043
4044 bool ok = false;
4045 if (si_compile_llvm(sscreen, &ctx.shader->binary,
4046 &ctx.shader->config, ctx.compiler,
4047 ctx.ac.module,
4048 debug, PIPE_SHADER_GEOMETRY, ctx.ac.wave_size,
4049 "GS Copy Shader", false) == 0) {
4050 if (si_can_dump_shader(sscreen, PIPE_SHADER_GEOMETRY))
4051 fprintf(stderr, "GS Copy Shader:\n");
4052 si_shader_dump(sscreen, ctx.shader, debug, stderr, true);
4053
4054 if (!ctx.shader->config.scratch_bytes_per_wave)
4055 ok = si_shader_binary_upload(sscreen, ctx.shader, 0);
4056 else
4057 ok = true;
4058 }
4059
4060 si_llvm_dispose(&ctx);
4061
4062 if (!ok) {
4063 FREE(shader);
4064 shader = NULL;
4065 } else {
4066 si_fix_resource_usage(sscreen, shader);
4067 }
4068 return shader;
4069 }
4070
4071 static void si_dump_shader_key_vs(const struct si_shader_key *key,
4072 const struct si_vs_prolog_bits *prolog,
4073 const char *prefix, FILE *f)
4074 {
4075 fprintf(f, " %s.instance_divisor_is_one = %u\n",
4076 prefix, prolog->instance_divisor_is_one);
4077 fprintf(f, " %s.instance_divisor_is_fetched = %u\n",
4078 prefix, prolog->instance_divisor_is_fetched);
4079 fprintf(f, " %s.unpack_instance_id_from_vertex_id = %u\n",
4080 prefix, prolog->unpack_instance_id_from_vertex_id);
4081 fprintf(f, " %s.ls_vgpr_fix = %u\n",
4082 prefix, prolog->ls_vgpr_fix);
4083
4084 fprintf(f, " mono.vs.fetch_opencode = %x\n", key->mono.vs_fetch_opencode);
4085 fprintf(f, " mono.vs.fix_fetch = {");
4086 for (int i = 0; i < SI_MAX_ATTRIBS; i++) {
4087 union si_vs_fix_fetch fix = key->mono.vs_fix_fetch[i];
4088 if (i)
4089 fprintf(f, ", ");
4090 if (!fix.bits)
4091 fprintf(f, "0");
4092 else
4093 fprintf(f, "%u.%u.%u.%u", fix.u.reverse, fix.u.log_size,
4094 fix.u.num_channels_m1, fix.u.format);
4095 }
4096 fprintf(f, "}\n");
4097 }
4098
4099 static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
4100 {
4101 const struct si_shader_key *key = &shader->key;
4102 enum pipe_shader_type shader_type = shader->selector->type;
4103
4104 fprintf(f, "SHADER KEY\n");
4105
4106 switch (shader_type) {
4107 case PIPE_SHADER_VERTEX:
4108 si_dump_shader_key_vs(key, &key->part.vs.prolog,
4109 "part.vs.prolog", f);
4110 fprintf(f, " as_es = %u\n", key->as_es);
4111 fprintf(f, " as_ls = %u\n", key->as_ls);
4112 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4113 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4114 key->mono.u.vs_export_prim_id);
4115 fprintf(f, " opt.vs_as_prim_discard_cs = %u\n",
4116 key->opt.vs_as_prim_discard_cs);
4117 fprintf(f, " opt.cs_prim_type = %s\n",
4118 tgsi_primitive_names[key->opt.cs_prim_type]);
4119 fprintf(f, " opt.cs_indexed = %u\n",
4120 key->opt.cs_indexed);
4121 fprintf(f, " opt.cs_instancing = %u\n",
4122 key->opt.cs_instancing);
4123 fprintf(f, " opt.cs_primitive_restart = %u\n",
4124 key->opt.cs_primitive_restart);
4125 fprintf(f, " opt.cs_provoking_vertex_first = %u\n",
4126 key->opt.cs_provoking_vertex_first);
4127 fprintf(f, " opt.cs_need_correct_orientation = %u\n",
4128 key->opt.cs_need_correct_orientation);
4129 fprintf(f, " opt.cs_cull_front = %u\n",
4130 key->opt.cs_cull_front);
4131 fprintf(f, " opt.cs_cull_back = %u\n",
4132 key->opt.cs_cull_back);
4133 fprintf(f, " opt.cs_cull_z = %u\n",
4134 key->opt.cs_cull_z);
4135 fprintf(f, " opt.cs_halfz_clip_space = %u\n",
4136 key->opt.cs_halfz_clip_space);
4137 break;
4138
4139 case PIPE_SHADER_TESS_CTRL:
4140 if (shader->selector->screen->info.chip_class >= GFX9) {
4141 si_dump_shader_key_vs(key, &key->part.tcs.ls_prolog,
4142 "part.tcs.ls_prolog", f);
4143 }
4144 fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->part.tcs.epilog.prim_mode);
4145 fprintf(f, " mono.u.ff_tcs_inputs_to_copy = 0x%"PRIx64"\n", key->mono.u.ff_tcs_inputs_to_copy);
4146 break;
4147
4148 case PIPE_SHADER_TESS_EVAL:
4149 fprintf(f, " as_es = %u\n", key->as_es);
4150 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4151 fprintf(f, " mono.u.vs_export_prim_id = %u\n",
4152 key->mono.u.vs_export_prim_id);
4153 break;
4154
4155 case PIPE_SHADER_GEOMETRY:
4156 if (shader->is_gs_copy_shader)
4157 break;
4158
4159 if (shader->selector->screen->info.chip_class >= GFX9 &&
4160 key->part.gs.es->type == PIPE_SHADER_VERTEX) {
4161 si_dump_shader_key_vs(key, &key->part.gs.vs_prolog,
4162 "part.gs.vs_prolog", f);
4163 }
4164 fprintf(f, " part.gs.prolog.tri_strip_adj_fix = %u\n", key->part.gs.prolog.tri_strip_adj_fix);
4165 fprintf(f, " part.gs.prolog.gfx9_prev_is_vs = %u\n", key->part.gs.prolog.gfx9_prev_is_vs);
4166 fprintf(f, " as_ngg = %u\n", key->as_ngg);
4167 break;
4168
4169 case PIPE_SHADER_COMPUTE:
4170 break;
4171
4172 case PIPE_SHADER_FRAGMENT:
4173 fprintf(f, " part.ps.prolog.color_two_side = %u\n", key->part.ps.prolog.color_two_side);
4174 fprintf(f, " part.ps.prolog.flatshade_colors = %u\n", key->part.ps.prolog.flatshade_colors);
4175 fprintf(f, " part.ps.prolog.poly_stipple = %u\n", key->part.ps.prolog.poly_stipple);
4176 fprintf(f, " part.ps.prolog.force_persp_sample_interp = %u\n", key->part.ps.prolog.force_persp_sample_interp);
4177 fprintf(f, " part.ps.prolog.force_linear_sample_interp = %u\n", key->part.ps.prolog.force_linear_sample_interp);
4178 fprintf(f, " part.ps.prolog.force_persp_center_interp = %u\n", key->part.ps.prolog.force_persp_center_interp);
4179 fprintf(f, " part.ps.prolog.force_linear_center_interp = %u\n", key->part.ps.prolog.force_linear_center_interp);
4180 fprintf(f, " part.ps.prolog.bc_optimize_for_persp = %u\n", key->part.ps.prolog.bc_optimize_for_persp);
4181 fprintf(f, " part.ps.prolog.bc_optimize_for_linear = %u\n", key->part.ps.prolog.bc_optimize_for_linear);
4182 fprintf(f, " part.ps.prolog.samplemask_log_ps_iter = %u\n", key->part.ps.prolog.samplemask_log_ps_iter);
4183 fprintf(f, " part.ps.epilog.spi_shader_col_format = 0x%x\n", key->part.ps.epilog.spi_shader_col_format);
4184 fprintf(f, " part.ps.epilog.color_is_int8 = 0x%X\n", key->part.ps.epilog.color_is_int8);
4185 fprintf(f, " part.ps.epilog.color_is_int10 = 0x%X\n", key->part.ps.epilog.color_is_int10);
4186 fprintf(f, " part.ps.epilog.last_cbuf = %u\n", key->part.ps.epilog.last_cbuf);
4187 fprintf(f, " part.ps.epilog.alpha_func = %u\n", key->part.ps.epilog.alpha_func);
4188 fprintf(f, " part.ps.epilog.alpha_to_one = %u\n", key->part.ps.epilog.alpha_to_one);
4189 fprintf(f, " part.ps.epilog.poly_line_smoothing = %u\n", key->part.ps.epilog.poly_line_smoothing);
4190 fprintf(f, " part.ps.epilog.clamp_color = %u\n", key->part.ps.epilog.clamp_color);
4191 fprintf(f, " mono.u.ps.interpolate_at_sample_force_center = %u\n", key->mono.u.ps.interpolate_at_sample_force_center);
4192 fprintf(f, " mono.u.ps.fbfetch_msaa = %u\n", key->mono.u.ps.fbfetch_msaa);
4193 fprintf(f, " mono.u.ps.fbfetch_is_1D = %u\n", key->mono.u.ps.fbfetch_is_1D);
4194 fprintf(f, " mono.u.ps.fbfetch_layered = %u\n", key->mono.u.ps.fbfetch_layered);
4195 break;
4196
4197 default:
4198 assert(0);
4199 }
4200
4201 if ((shader_type == PIPE_SHADER_GEOMETRY ||
4202 shader_type == PIPE_SHADER_TESS_EVAL ||
4203 shader_type == PIPE_SHADER_VERTEX) &&
4204 !key->as_es && !key->as_ls) {
4205 fprintf(f, " opt.kill_outputs = 0x%"PRIx64"\n", key->opt.kill_outputs);
4206 fprintf(f, " opt.clip_disable = %u\n", key->opt.clip_disable);
4207 }
4208 }
4209
4210 static void si_optimize_vs_outputs(struct si_shader_context *ctx)
4211 {
4212 struct si_shader *shader = ctx->shader;
4213 struct si_shader_info *info = &shader->selector->info;
4214
4215 if ((ctx->type != PIPE_SHADER_VERTEX &&
4216 ctx->type != PIPE_SHADER_TESS_EVAL) ||
4217 shader->key.as_ls ||
4218 shader->key.as_es)
4219 return;
4220
4221 ac_optimize_vs_outputs(&ctx->ac,
4222 ctx->main_fn,
4223 shader->info.vs_output_param_offset,
4224 info->num_outputs,
4225 &shader->info.nr_param_exports);
4226 }
4227
4228 static void si_init_exec_from_input(struct si_shader_context *ctx,
4229 struct ac_arg param, unsigned bitoffset)
4230 {
4231 LLVMValueRef args[] = {
4232 ac_get_arg(&ctx->ac, param),
4233 LLVMConstInt(ctx->i32, bitoffset, 0),
4234 };
4235 ac_build_intrinsic(&ctx->ac,
4236 "llvm.amdgcn.init.exec.from.input",
4237 ctx->voidt, args, 2, AC_FUNC_ATTR_CONVERGENT);
4238 }
4239
4240 static bool si_vs_needs_prolog(const struct si_shader_selector *sel,
4241 const struct si_vs_prolog_bits *key)
4242 {
4243 /* VGPR initialization fixup for Vega10 and Raven is always done in the
4244 * VS prolog. */
4245 return sel->vs_needs_prolog ||
4246 key->ls_vgpr_fix ||
4247 key->unpack_instance_id_from_vertex_id;
4248 }
4249
4250 LLVMValueRef si_is_es_thread(struct si_shader_context *ctx)
4251 {
4252 /* Return true if the current thread should execute an ES thread. */
4253 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4254 ac_get_thread_id(&ctx->ac),
4255 si_unpack_param(ctx, ctx->merged_wave_info, 0, 8), "");
4256 }
4257
4258 LLVMValueRef si_is_gs_thread(struct si_shader_context *ctx)
4259 {
4260 /* Return true if the current thread should execute a GS thread. */
4261 return LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
4262 ac_get_thread_id(&ctx->ac),
4263 si_unpack_param(ctx, ctx->merged_wave_info, 8, 8), "");
4264 }
4265
4266 static bool si_build_main_function(struct si_shader_context *ctx,
4267 struct nir_shader *nir, bool free_nir)
4268 {
4269 struct si_shader *shader = ctx->shader;
4270 struct si_shader_selector *sel = shader->selector;
4271
4272 switch (ctx->type) {
4273 case PIPE_SHADER_VERTEX:
4274 if (shader->key.as_ls)
4275 ctx->abi.emit_outputs = si_llvm_emit_ls_epilogue;
4276 else if (shader->key.as_es)
4277 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4278 else if (shader->key.opt.vs_as_prim_discard_cs)
4279 ctx->abi.emit_outputs = si_llvm_emit_prim_discard_cs_epilogue;
4280 else if (shader->key.as_ngg)
4281 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4282 else
4283 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4284 ctx->abi.load_base_vertex = get_base_vertex;
4285 break;
4286 case PIPE_SHADER_TESS_CTRL:
4287 ctx->abi.load_tess_varyings = si_nir_load_tcs_varyings;
4288 ctx->abi.load_tess_level = si_load_tess_level;
4289 ctx->abi.store_tcs_outputs = si_nir_store_output_tcs;
4290 ctx->abi.emit_outputs = si_llvm_emit_tcs_epilogue;
4291 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4292 break;
4293 case PIPE_SHADER_TESS_EVAL:
4294 ctx->abi.load_tess_varyings = si_nir_load_input_tes;
4295 ctx->abi.load_tess_coord = si_load_tess_coord;
4296 ctx->abi.load_tess_level = si_load_tess_level;
4297 ctx->abi.load_patch_vertices_in = si_load_patch_vertices_in;
4298 if (shader->key.as_es)
4299 ctx->abi.emit_outputs = si_llvm_emit_es_epilogue;
4300 else if (shader->key.as_ngg)
4301 ctx->abi.emit_outputs = gfx10_emit_ngg_epilogue;
4302 else
4303 ctx->abi.emit_outputs = si_llvm_emit_vs_epilogue;
4304 break;
4305 case PIPE_SHADER_GEOMETRY:
4306 ctx->abi.load_inputs = si_nir_load_input_gs;
4307 ctx->abi.emit_vertex = si_llvm_emit_vertex;
4308 ctx->abi.emit_primitive = si_llvm_emit_primitive;
4309 ctx->abi.emit_outputs = si_llvm_emit_gs_epilogue;
4310 break;
4311 case PIPE_SHADER_FRAGMENT:
4312 si_llvm_init_ps_callbacks(ctx);
4313 break;
4314 case PIPE_SHADER_COMPUTE:
4315 ctx->abi.load_local_group_size = get_block_size;
4316 break;
4317 default:
4318 assert(!"Unsupported shader type");
4319 return false;
4320 }
4321
4322 ctx->abi.load_ubo = load_ubo;
4323 ctx->abi.load_ssbo = load_ssbo;
4324
4325 create_function(ctx);
4326 preload_ring_buffers(ctx);
4327
4328 if (ctx->type == PIPE_SHADER_TESS_CTRL &&
4329 sel->info.tessfactors_are_def_in_all_invocs) {
4330 for (unsigned i = 0; i < 6; i++) {
4331 ctx->invoc0_tess_factors[i] =
4332 ac_build_alloca_undef(&ctx->ac, ctx->i32, "");
4333 }
4334 }
4335
4336 if (ctx->type == PIPE_SHADER_GEOMETRY) {
4337 for (unsigned i = 0; i < 4; i++) {
4338 ctx->gs_next_vertex[i] =
4339 ac_build_alloca(&ctx->ac, ctx->i32, "");
4340 }
4341 if (shader->key.as_ngg) {
4342 for (unsigned i = 0; i < 4; ++i) {
4343 ctx->gs_curprim_verts[i] =
4344 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4345 ctx->gs_generated_prims[i] =
4346 ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
4347 }
4348
4349 unsigned scratch_size = 8;
4350 if (sel->so.num_outputs)
4351 scratch_size = 44;
4352
4353 LLVMTypeRef ai32 = LLVMArrayType(ctx->i32, scratch_size);
4354 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4355 ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4356 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
4357 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4358
4359 ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4360 LLVMArrayType(ctx->i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
4361 LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
4362 LLVMSetAlignment(ctx->gs_ngg_emit, 4);
4363 }
4364 }
4365
4366 if (ctx->type != PIPE_SHADER_GEOMETRY &&
4367 (shader->key.as_ngg && !shader->key.as_es)) {
4368 /* Unconditionally declare scratch space base for streamout and
4369 * vertex compaction. Whether space is actually allocated is
4370 * determined during linking / PM4 creation.
4371 *
4372 * Add an extra dword per vertex to ensure an odd stride, which
4373 * avoids bank conflicts for SoA accesses.
4374 */
4375 if (!gfx10_is_ngg_passthrough(shader))
4376 declare_esgs_ring(ctx);
4377
4378 /* This is really only needed when streamout and / or vertex
4379 * compaction is enabled.
4380 */
4381 if (sel->so.num_outputs && !ctx->gs_ngg_scratch) {
4382 LLVMTypeRef asi32 = LLVMArrayType(ctx->i32, 8);
4383 ctx->gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx->ac.module,
4384 asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
4385 LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
4386 LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
4387 }
4388 }
4389
4390 /* For GFX9 merged shaders:
4391 * - Set EXEC for the first shader. If the prolog is present, set
4392 * EXEC there instead.
4393 * - Add a barrier before the second shader.
4394 * - In the second shader, reset EXEC to ~0 and wrap the main part in
4395 * an if-statement. This is required for correctness in geometry
4396 * shaders, to ensure that empty GS waves do not send GS_EMIT and
4397 * GS_CUT messages.
4398 *
4399 * For monolithic merged shaders, the first shader is wrapped in an
4400 * if-block together with its prolog in si_build_wrapper_function.
4401 *
4402 * NGG vertex and tess eval shaders running as the last
4403 * vertex/geometry stage handle execution explicitly using
4404 * if-statements.
4405 */
4406 if (ctx->screen->info.chip_class >= GFX9) {
4407 if (!shader->is_monolithic &&
4408 (shader->key.as_es || shader->key.as_ls) &&
4409 (ctx->type == PIPE_SHADER_TESS_EVAL ||
4410 (ctx->type == PIPE_SHADER_VERTEX &&
4411 !si_vs_needs_prolog(sel, &shader->key.part.vs.prolog)))) {
4412 si_init_exec_from_input(ctx,
4413 ctx->merged_wave_info, 0);
4414 } else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
4415 ctx->type == PIPE_SHADER_GEOMETRY ||
4416 (shader->key.as_ngg && !shader->key.as_es)) {
4417 LLVMValueRef thread_enabled;
4418 bool nested_barrier;
4419
4420 if (!shader->is_monolithic ||
4421 (ctx->type == PIPE_SHADER_TESS_EVAL &&
4422 (shader->key.as_ngg && !shader->key.as_es)))
4423 ac_init_exec_full_mask(&ctx->ac);
4424
4425 if (ctx->type == PIPE_SHADER_TESS_CTRL ||
4426 ctx->type == PIPE_SHADER_GEOMETRY) {
4427 if (ctx->type == PIPE_SHADER_GEOMETRY && shader->key.as_ngg) {
4428 gfx10_ngg_gs_emit_prologue(ctx);
4429 nested_barrier = false;
4430 } else {
4431 nested_barrier = true;
4432 }
4433
4434 thread_enabled = si_is_gs_thread(ctx);
4435 } else {
4436 thread_enabled = si_is_es_thread(ctx);
4437 nested_barrier = false;
4438 }
4439
4440 ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
4441 ctx->merged_wrap_if_label = 11500;
4442 ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
4443
4444 if (nested_barrier) {
4445 /* Execute a barrier before the second shader in
4446 * a merged shader.
4447 *
4448 * Execute the barrier inside the conditional block,
4449 * so that empty waves can jump directly to s_endpgm,
4450 * which will also signal the barrier.
4451 *
4452 * This is possible in gfx9, because an empty wave
4453 * for the second shader does not participate in
4454 * the epilogue. With NGG, empty waves may still
4455 * be required to export data (e.g. GS output vertices),
4456 * so we cannot let them exit early.
4457 *
4458 * If the shader is TCS and the TCS epilog is present
4459 * and contains a barrier, it will wait there and then
4460 * reach s_endpgm.
4461 */
4462 si_llvm_emit_barrier(ctx);
4463 }
4464 }
4465 }
4466
4467 if (sel->force_correct_derivs_after_kill) {
4468 ctx->postponed_kill = ac_build_alloca_undef(&ctx->ac, ctx->i1, "");
4469 /* true = don't kill. */
4470 LLVMBuildStore(ctx->ac.builder, ctx->i1true,
4471 ctx->postponed_kill);
4472 }
4473
4474 bool success = si_nir_build_llvm(ctx, nir);
4475 if (free_nir)
4476 ralloc_free(nir);
4477 if (!success) {
4478 fprintf(stderr, "Failed to translate shader from NIR to LLVM\n");
4479 return false;
4480 }
4481
4482 si_llvm_build_ret(ctx, ctx->return_value);
4483 return true;
4484 }
4485
4486 /**
4487 * Compute the VS prolog key, which contains all the information needed to
4488 * build the VS prolog function, and set shader->info bits where needed.
4489 *
4490 * \param info Shader info of the vertex shader.
4491 * \param num_input_sgprs Number of input SGPRs for the vertex shader.
4492 * \param prolog_key Key of the VS prolog
4493 * \param shader_out The vertex shader, or the next shader if merging LS+HS or ES+GS.
4494 * \param key Output shader part key.
4495 */
4496 static void si_get_vs_prolog_key(const struct si_shader_info *info,
4497 unsigned num_input_sgprs,
4498 const struct si_vs_prolog_bits *prolog_key,
4499 struct si_shader *shader_out,
4500 union si_shader_part_key *key)
4501 {
4502 memset(key, 0, sizeof(*key));
4503 key->vs_prolog.states = *prolog_key;
4504 key->vs_prolog.num_input_sgprs = num_input_sgprs;
4505 key->vs_prolog.num_inputs = info->num_inputs;
4506 key->vs_prolog.as_ls = shader_out->key.as_ls;
4507 key->vs_prolog.as_es = shader_out->key.as_es;
4508 key->vs_prolog.as_ngg = shader_out->key.as_ngg;
4509
4510 if (shader_out->selector->type == PIPE_SHADER_TESS_CTRL) {
4511 key->vs_prolog.as_ls = 1;
4512 key->vs_prolog.num_merged_next_stage_vgprs = 2;
4513 } else if (shader_out->selector->type == PIPE_SHADER_GEOMETRY) {
4514 key->vs_prolog.as_es = 1;
4515 key->vs_prolog.num_merged_next_stage_vgprs = 5;
4516 } else if (shader_out->key.as_ngg) {
4517 key->vs_prolog.num_merged_next_stage_vgprs = 5;
4518 }
4519
4520 /* Enable loading the InstanceID VGPR. */
4521 uint16_t input_mask = u_bit_consecutive(0, info->num_inputs);
4522
4523 if ((key->vs_prolog.states.instance_divisor_is_one |
4524 key->vs_prolog.states.instance_divisor_is_fetched) & input_mask)
4525 shader_out->info.uses_instanceid = true;
4526 }
4527
4528 /**
4529 * Build the GS prolog function. Rotate the input vertices for triangle strips
4530 * with adjacency.
4531 */
4532 static void si_build_gs_prolog_function(struct si_shader_context *ctx,
4533 union si_shader_part_key *key)
4534 {
4535 unsigned num_sgprs, num_vgprs;
4536 LLVMBuilderRef builder = ctx->ac.builder;
4537 LLVMTypeRef returns[AC_MAX_ARGS];
4538 LLVMValueRef func, ret;
4539
4540 memset(&ctx->args, 0, sizeof(ctx->args));
4541
4542 if (ctx->screen->info.chip_class >= GFX9) {
4543 if (key->gs_prolog.states.gfx9_prev_is_vs)
4544 num_sgprs = 8 + GFX9_VSGS_NUM_USER_SGPR;
4545 else
4546 num_sgprs = 8 + GFX9_TESGS_NUM_USER_SGPR;
4547 num_vgprs = 5; /* ES inputs are not needed by GS */
4548 } else {
4549 num_sgprs = GFX6_GS_NUM_USER_SGPR + 2;
4550 num_vgprs = 8;
4551 }
4552
4553 for (unsigned i = 0; i < num_sgprs; ++i) {
4554 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
4555 returns[i] = ctx->i32;
4556 }
4557
4558 for (unsigned i = 0; i < num_vgprs; ++i) {
4559 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL);
4560 returns[num_sgprs + i] = ctx->f32;
4561 }
4562
4563 /* Create the function. */
4564 si_llvm_create_func(ctx, "gs_prolog", returns, num_sgprs + num_vgprs, 0);
4565 func = ctx->main_fn;
4566
4567 /* Set the full EXEC mask for the prolog, because we are only fiddling
4568 * with registers here. The main shader part will set the correct EXEC
4569 * mask.
4570 */
4571 if (ctx->screen->info.chip_class >= GFX9 && !key->gs_prolog.is_monolithic)
4572 ac_init_exec_full_mask(&ctx->ac);
4573
4574 /* Copy inputs to outputs. This should be no-op, as the registers match,
4575 * but it will prevent the compiler from overwriting them unintentionally.
4576 */
4577 ret = ctx->return_value;
4578 for (unsigned i = 0; i < num_sgprs; i++) {
4579 LLVMValueRef p = LLVMGetParam(func, i);
4580 ret = LLVMBuildInsertValue(builder, ret, p, i, "");
4581 }
4582 for (unsigned i = 0; i < num_vgprs; i++) {
4583 LLVMValueRef p = LLVMGetParam(func, num_sgprs + i);
4584 p = ac_to_float(&ctx->ac, p);
4585 ret = LLVMBuildInsertValue(builder, ret, p, num_sgprs + i, "");
4586 }
4587
4588 if (key->gs_prolog.states.tri_strip_adj_fix) {
4589 /* Remap the input vertices for every other primitive. */
4590 const struct ac_arg gfx6_vtx_params[6] = {
4591 { .used = true, .arg_index = num_sgprs },
4592 { .used = true, .arg_index = num_sgprs + 1 },
4593 { .used = true, .arg_index = num_sgprs + 3 },
4594 { .used = true, .arg_index = num_sgprs + 4 },
4595 { .used = true, .arg_index = num_sgprs + 5 },
4596 { .used = true, .arg_index = num_sgprs + 6 },
4597 };
4598 const struct ac_arg gfx9_vtx_params[3] = {
4599 { .used = true, .arg_index = num_sgprs },
4600 { .used = true, .arg_index = num_sgprs + 1 },
4601 { .used = true, .arg_index = num_sgprs + 4 },
4602 };
4603 LLVMValueRef vtx_in[6], vtx_out[6];
4604 LLVMValueRef prim_id, rotate;
4605
4606 if (ctx->screen->info.chip_class >= GFX9) {
4607 for (unsigned i = 0; i < 3; i++) {
4608 vtx_in[i*2] = si_unpack_param(ctx, gfx9_vtx_params[i], 0, 16);
4609 vtx_in[i*2+1] = si_unpack_param(ctx, gfx9_vtx_params[i], 16, 16);
4610 }
4611 } else {
4612 for (unsigned i = 0; i < 6; i++)
4613 vtx_in[i] = ac_get_arg(&ctx->ac, gfx6_vtx_params[i]);
4614 }
4615
4616 prim_id = LLVMGetParam(func, num_sgprs + 2);
4617 rotate = LLVMBuildTrunc(builder, prim_id, ctx->i1, "");
4618
4619 for (unsigned i = 0; i < 6; ++i) {
4620 LLVMValueRef base, rotated;
4621 base = vtx_in[i];
4622 rotated = vtx_in[(i + 4) % 6];
4623 vtx_out[i] = LLVMBuildSelect(builder, rotate, rotated, base, "");
4624 }
4625
4626 if (ctx->screen->info.chip_class >= GFX9) {
4627 for (unsigned i = 0; i < 3; i++) {
4628 LLVMValueRef hi, out;
4629
4630 hi = LLVMBuildShl(builder, vtx_out[i*2+1],
4631 LLVMConstInt(ctx->i32, 16, 0), "");
4632 out = LLVMBuildOr(builder, vtx_out[i*2], hi, "");
4633 out = ac_to_float(&ctx->ac, out);
4634 ret = LLVMBuildInsertValue(builder, ret, out,
4635 gfx9_vtx_params[i].arg_index, "");
4636 }
4637 } else {
4638 for (unsigned i = 0; i < 6; i++) {
4639 LLVMValueRef out;
4640
4641 out = ac_to_float(&ctx->ac, vtx_out[i]);
4642 ret = LLVMBuildInsertValue(builder, ret, out,
4643 gfx6_vtx_params[i].arg_index, "");
4644 }
4645 }
4646 }
4647
4648 LLVMBuildRet(builder, ret);
4649 }
4650
4651 /**
4652 * Given a list of shader part functions, build a wrapper function that
4653 * runs them in sequence to form a monolithic shader.
4654 */
4655 void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
4656 unsigned num_parts, unsigned main_part,
4657 unsigned next_shader_first_part)
4658 {
4659 LLVMBuilderRef builder = ctx->ac.builder;
4660 /* PS epilog has one arg per color component; gfx9 merged shader
4661 * prologs need to forward 40 SGPRs.
4662 */
4663 LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
4664 LLVMTypeRef function_type;
4665 unsigned num_first_params;
4666 unsigned num_out, initial_num_out;
4667 ASSERTED unsigned num_out_sgpr; /* used in debug checks */
4668 ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
4669 unsigned num_sgprs, num_vgprs;
4670 unsigned gprs;
4671
4672 memset(&ctx->args, 0, sizeof(ctx->args));
4673
4674 for (unsigned i = 0; i < num_parts; ++i) {
4675 ac_add_function_attr(ctx->ac.context, parts[i], -1,
4676 AC_FUNC_ATTR_ALWAYSINLINE);
4677 LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
4678 }
4679
4680 /* The parameters of the wrapper function correspond to those of the
4681 * first part in terms of SGPRs and VGPRs, but we use the types of the
4682 * main part to get the right types. This is relevant for the
4683 * dereferenceable attribute on descriptor table pointers.
4684 */
4685 num_sgprs = 0;
4686 num_vgprs = 0;
4687
4688 function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
4689 num_first_params = LLVMCountParamTypes(function_type);
4690
4691 for (unsigned i = 0; i < num_first_params; ++i) {
4692 LLVMValueRef param = LLVMGetParam(parts[0], i);
4693
4694 if (ac_is_sgpr_param(param)) {
4695 assert(num_vgprs == 0);
4696 num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
4697 } else {
4698 num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
4699 }
4700 }
4701
4702 gprs = 0;
4703 while (gprs < num_sgprs + num_vgprs) {
4704 LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
4705 LLVMTypeRef type = LLVMTypeOf(param);
4706 unsigned size = ac_get_type_size(type) / 4;
4707
4708 /* This is going to get casted anyways, so we don't have to
4709 * have the exact same type. But we do have to preserve the
4710 * pointer-ness so that LLVM knows about it.
4711 */
4712 enum ac_arg_type arg_type = AC_ARG_INT;
4713 if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
4714 type = LLVMGetElementType(type);
4715
4716 if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
4717 if (LLVMGetVectorSize(type) == 4)
4718 arg_type = AC_ARG_CONST_DESC_PTR;
4719 else if (LLVMGetVectorSize(type) == 8)
4720 arg_type = AC_ARG_CONST_IMAGE_PTR;
4721 else
4722 assert(0);
4723 } else if (type == ctx->f32) {
4724 arg_type = AC_ARG_CONST_FLOAT_PTR;
4725 } else {
4726 assert(0);
4727 }
4728 }
4729
4730 ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR,
4731 size, arg_type, NULL);
4732
4733 assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
4734 assert(gprs + size <= num_sgprs + num_vgprs &&
4735 (gprs >= num_sgprs || gprs + size <= num_sgprs));
4736
4737 gprs += size;
4738 }
4739
4740 /* Prepare the return type. */
4741 unsigned num_returns = 0;
4742 LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
4743
4744 last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
4745 return_type = LLVMGetReturnType(last_func_type);
4746
4747 switch (LLVMGetTypeKind(return_type)) {
4748 case LLVMStructTypeKind:
4749 num_returns = LLVMCountStructElementTypes(return_type);
4750 assert(num_returns <= ARRAY_SIZE(returns));
4751 LLVMGetStructElementTypes(return_type, returns);
4752 break;
4753 case LLVMVoidTypeKind:
4754 break;
4755 default:
4756 unreachable("unexpected type");
4757 }
4758
4759 si_llvm_create_func(ctx, "wrapper", returns, num_returns,
4760 si_get_max_workgroup_size(ctx->shader));
4761
4762 if (si_is_merged_shader(ctx))
4763 ac_init_exec_full_mask(&ctx->ac);
4764
4765 /* Record the arguments of the function as if they were an output of
4766 * a previous part.
4767 */
4768 num_out = 0;
4769 num_out_sgpr = 0;
4770
4771 for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
4772 LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
4773 LLVMTypeRef param_type = LLVMTypeOf(param);
4774 LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->i32 : ctx->f32;
4775 unsigned size = ac_get_type_size(param_type) / 4;
4776
4777 if (size == 1) {
4778 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4779 param = LLVMBuildPtrToInt(builder, param, ctx->i32, "");
4780 param_type = ctx->i32;
4781 }
4782
4783 if (param_type != out_type)
4784 param = LLVMBuildBitCast(builder, param, out_type, "");
4785 out[num_out++] = param;
4786 } else {
4787 LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
4788
4789 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4790 param = LLVMBuildPtrToInt(builder, param, ctx->i64, "");
4791 param_type = ctx->i64;
4792 }
4793
4794 if (param_type != vector_type)
4795 param = LLVMBuildBitCast(builder, param, vector_type, "");
4796
4797 for (unsigned j = 0; j < size; ++j)
4798 out[num_out++] = LLVMBuildExtractElement(
4799 builder, param, LLVMConstInt(ctx->i32, j, 0), "");
4800 }
4801
4802 if (ctx->args.args[i].file == AC_ARG_SGPR)
4803 num_out_sgpr = num_out;
4804 }
4805
4806 memcpy(initial, out, sizeof(out));
4807 initial_num_out = num_out;
4808 initial_num_out_sgpr = num_out_sgpr;
4809
4810 /* Now chain the parts. */
4811 LLVMValueRef ret = NULL;
4812 for (unsigned part = 0; part < num_parts; ++part) {
4813 LLVMValueRef in[AC_MAX_ARGS];
4814 LLVMTypeRef ret_type;
4815 unsigned out_idx = 0;
4816 unsigned num_params = LLVMCountParams(parts[part]);
4817
4818 /* Merged shaders are executed conditionally depending
4819 * on the number of enabled threads passed in the input SGPRs. */
4820 if (is_multi_part_shader(ctx) && part == 0) {
4821 LLVMValueRef ena, count = initial[3];
4822
4823 count = LLVMBuildAnd(builder, count,
4824 LLVMConstInt(ctx->i32, 0x7f, 0), "");
4825 ena = LLVMBuildICmp(builder, LLVMIntULT,
4826 ac_get_thread_id(&ctx->ac), count, "");
4827 ac_build_ifcc(&ctx->ac, ena, 6506);
4828 }
4829
4830 /* Derive arguments for the next part from outputs of the
4831 * previous one.
4832 */
4833 for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
4834 LLVMValueRef param;
4835 LLVMTypeRef param_type;
4836 bool is_sgpr;
4837 unsigned param_size;
4838 LLVMValueRef arg = NULL;
4839
4840 param = LLVMGetParam(parts[part], param_idx);
4841 param_type = LLVMTypeOf(param);
4842 param_size = ac_get_type_size(param_type) / 4;
4843 is_sgpr = ac_is_sgpr_param(param);
4844
4845 if (is_sgpr) {
4846 ac_add_function_attr(ctx->ac.context, parts[part],
4847 param_idx + 1, AC_FUNC_ATTR_INREG);
4848 } else if (out_idx < num_out_sgpr) {
4849 /* Skip returned SGPRs the current part doesn't
4850 * declare on the input. */
4851 out_idx = num_out_sgpr;
4852 }
4853
4854 assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
4855
4856 if (param_size == 1)
4857 arg = out[out_idx];
4858 else
4859 arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
4860
4861 if (LLVMTypeOf(arg) != param_type) {
4862 if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
4863 if (LLVMGetPointerAddressSpace(param_type) ==
4864 AC_ADDR_SPACE_CONST_32BIT) {
4865 arg = LLVMBuildBitCast(builder, arg, ctx->i32, "");
4866 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
4867 } else {
4868 arg = LLVMBuildBitCast(builder, arg, ctx->i64, "");
4869 arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
4870 }
4871 } else {
4872 arg = LLVMBuildBitCast(builder, arg, param_type, "");
4873 }
4874 }
4875
4876 in[param_idx] = arg;
4877 out_idx += param_size;
4878 }
4879
4880 ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
4881
4882 if (is_multi_part_shader(ctx) &&
4883 part + 1 == next_shader_first_part) {
4884 ac_build_endif(&ctx->ac, 6506);
4885
4886 /* The second half of the merged shader should use
4887 * the inputs from the toplevel (wrapper) function,
4888 * not the return value from the last call.
4889 *
4890 * That's because the last call was executed condi-
4891 * tionally, so we can't consume it in the main
4892 * block.
4893 */
4894 memcpy(out, initial, sizeof(initial));
4895 num_out = initial_num_out;
4896 num_out_sgpr = initial_num_out_sgpr;
4897 continue;
4898 }
4899
4900 /* Extract the returned GPRs. */
4901 ret_type = LLVMTypeOf(ret);
4902 num_out = 0;
4903 num_out_sgpr = 0;
4904
4905 if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
4906 assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
4907
4908 unsigned ret_size = LLVMCountStructElementTypes(ret_type);
4909
4910 for (unsigned i = 0; i < ret_size; ++i) {
4911 LLVMValueRef val =
4912 LLVMBuildExtractValue(builder, ret, i, "");
4913
4914 assert(num_out < ARRAY_SIZE(out));
4915 out[num_out++] = val;
4916
4917 if (LLVMTypeOf(val) == ctx->i32) {
4918 assert(num_out_sgpr + 1 == num_out);
4919 num_out_sgpr = num_out;
4920 }
4921 }
4922 }
4923 }
4924
4925 /* Return the value from the last part. */
4926 if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
4927 LLVMBuildRetVoid(builder);
4928 else
4929 LLVMBuildRet(builder, ret);
4930 }
4931
4932 static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
4933 struct si_shader_selector *sel)
4934 {
4935 if (!compiler->low_opt_passes)
4936 return false;
4937
4938 /* Assume a slow CPU. */
4939 assert(!sel->screen->info.has_dedicated_vram &&
4940 sel->screen->info.chip_class <= GFX8);
4941
4942 /* For a crazy dEQP test containing 2597 memory opcodes, mostly
4943 * buffer stores. */
4944 return sel->type == PIPE_SHADER_COMPUTE &&
4945 sel->info.num_memory_instructions > 1000;
4946 }
4947
4948 static struct nir_shader *get_nir_shader(struct si_shader_selector *sel,
4949 bool *free_nir)
4950 {
4951 *free_nir = false;
4952
4953 if (sel->nir) {
4954 return sel->nir;
4955 } else if (sel->nir_binary) {
4956 struct pipe_screen *screen = &sel->screen->b;
4957 const void *options =
4958 screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR,
4959 sel->type);
4960
4961 struct blob_reader blob_reader;
4962 blob_reader_init(&blob_reader, sel->nir_binary, sel->nir_size);
4963 *free_nir = true;
4964 return nir_deserialize(NULL, options, &blob_reader);
4965 }
4966 return NULL;
4967 }
4968
4969 int si_compile_shader(struct si_screen *sscreen,
4970 struct ac_llvm_compiler *compiler,
4971 struct si_shader *shader,
4972 struct pipe_debug_callback *debug)
4973 {
4974 struct si_shader_selector *sel = shader->selector;
4975 struct si_shader_context ctx;
4976 bool free_nir;
4977 struct nir_shader *nir = get_nir_shader(sel, &free_nir);
4978 int r = -1;
4979
4980 /* Dump NIR before doing NIR->LLVM conversion in case the
4981 * conversion fails. */
4982 if (si_can_dump_shader(sscreen, sel->type) &&
4983 !(sscreen->debug_flags & DBG(NO_NIR))) {
4984 nir_print_shader(nir, stderr);
4985 si_dump_streamout(&sel->so);
4986 }
4987
4988 si_llvm_context_init(&ctx, sscreen, compiler, si_get_shader_wave_size(shader));
4989 si_llvm_context_set_ir(&ctx, shader);
4990
4991 memset(shader->info.vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
4992 sizeof(shader->info.vs_output_param_offset));
4993
4994 shader->info.uses_instanceid = sel->info.uses_instanceid;
4995
4996 if (!si_build_main_function(&ctx, nir, free_nir)) {
4997 si_llvm_dispose(&ctx);
4998 return -1;
4999 }
5000
5001 if (shader->is_monolithic && ctx.type == PIPE_SHADER_VERTEX) {
5002 LLVMValueRef parts[2];
5003 bool need_prolog = si_vs_needs_prolog(sel, &shader->key.part.vs.prolog);
5004
5005 parts[1] = ctx.main_fn;
5006
5007 if (need_prolog) {
5008 union si_shader_part_key prolog_key;
5009 si_get_vs_prolog_key(&sel->info,
5010 shader->info.num_input_sgprs,
5011 &shader->key.part.vs.prolog,
5012 shader, &prolog_key);
5013 prolog_key.vs_prolog.is_monolithic = true;
5014 si_build_vs_prolog_function(&ctx, &prolog_key);
5015 parts[0] = ctx.main_fn;
5016 }
5017
5018 si_build_wrapper_function(&ctx, parts + !need_prolog,
5019 1 + need_prolog, need_prolog, 0);
5020
5021 if (ctx.shader->key.opt.vs_as_prim_discard_cs)
5022 si_build_prim_discard_compute_shader(&ctx);
5023 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_TESS_CTRL) {
5024 if (sscreen->info.chip_class >= GFX9) {
5025 struct si_shader_selector *ls = shader->key.part.tcs.ls;
5026 LLVMValueRef parts[4];
5027 bool vs_needs_prolog =
5028 si_vs_needs_prolog(ls, &shader->key.part.tcs.ls_prolog);
5029
5030 /* TCS main part */
5031 parts[2] = ctx.main_fn;
5032
5033 /* TCS epilog */
5034 union si_shader_part_key tcs_epilog_key;
5035 memset(&tcs_epilog_key, 0, sizeof(tcs_epilog_key));
5036 tcs_epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5037 si_build_tcs_epilog_function(&ctx, &tcs_epilog_key);
5038 parts[3] = ctx.main_fn;
5039
5040 /* VS as LS main part */
5041 nir = get_nir_shader(ls, &free_nir);
5042 struct si_shader shader_ls = {};
5043 shader_ls.selector = ls;
5044 shader_ls.key.as_ls = 1;
5045 shader_ls.key.mono = shader->key.mono;
5046 shader_ls.key.opt = shader->key.opt;
5047 shader_ls.is_monolithic = true;
5048 si_llvm_context_set_ir(&ctx, &shader_ls);
5049
5050 if (!si_build_main_function(&ctx, nir, free_nir)) {
5051 si_llvm_dispose(&ctx);
5052 return -1;
5053 }
5054 shader->info.uses_instanceid |= ls->info.uses_instanceid;
5055 parts[1] = ctx.main_fn;
5056
5057 /* LS prolog */
5058 if (vs_needs_prolog) {
5059 union si_shader_part_key vs_prolog_key;
5060 si_get_vs_prolog_key(&ls->info,
5061 shader_ls.info.num_input_sgprs,
5062 &shader->key.part.tcs.ls_prolog,
5063 shader, &vs_prolog_key);
5064 vs_prolog_key.vs_prolog.is_monolithic = true;
5065 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5066 parts[0] = ctx.main_fn;
5067 }
5068
5069 /* Reset the shader context. */
5070 ctx.shader = shader;
5071 ctx.type = PIPE_SHADER_TESS_CTRL;
5072
5073 si_build_wrapper_function(&ctx,
5074 parts + !vs_needs_prolog,
5075 4 - !vs_needs_prolog, vs_needs_prolog,
5076 vs_needs_prolog ? 2 : 1);
5077 } else {
5078 LLVMValueRef parts[2];
5079 union si_shader_part_key epilog_key;
5080
5081 parts[0] = ctx.main_fn;
5082
5083 memset(&epilog_key, 0, sizeof(epilog_key));
5084 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5085 si_build_tcs_epilog_function(&ctx, &epilog_key);
5086 parts[1] = ctx.main_fn;
5087
5088 si_build_wrapper_function(&ctx, parts, 2, 0, 0);
5089 }
5090 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_GEOMETRY) {
5091 if (ctx.screen->info.chip_class >= GFX9) {
5092 struct si_shader_selector *es = shader->key.part.gs.es;
5093 LLVMValueRef es_prolog = NULL;
5094 LLVMValueRef es_main = NULL;
5095 LLVMValueRef gs_prolog = NULL;
5096 LLVMValueRef gs_main = ctx.main_fn;
5097
5098 /* GS prolog */
5099 union si_shader_part_key gs_prolog_key;
5100 memset(&gs_prolog_key, 0, sizeof(gs_prolog_key));
5101 gs_prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5102 gs_prolog_key.gs_prolog.is_monolithic = true;
5103 gs_prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
5104 si_build_gs_prolog_function(&ctx, &gs_prolog_key);
5105 gs_prolog = ctx.main_fn;
5106
5107 /* ES main part */
5108 nir = get_nir_shader(es, &free_nir);
5109 struct si_shader shader_es = {};
5110 shader_es.selector = es;
5111 shader_es.key.as_es = 1;
5112 shader_es.key.as_ngg = shader->key.as_ngg;
5113 shader_es.key.mono = shader->key.mono;
5114 shader_es.key.opt = shader->key.opt;
5115 shader_es.is_monolithic = true;
5116 si_llvm_context_set_ir(&ctx, &shader_es);
5117
5118 if (!si_build_main_function(&ctx, nir, free_nir)) {
5119 si_llvm_dispose(&ctx);
5120 return -1;
5121 }
5122 shader->info.uses_instanceid |= es->info.uses_instanceid;
5123 es_main = ctx.main_fn;
5124
5125 /* ES prolog */
5126 if (es->type == PIPE_SHADER_VERTEX &&
5127 si_vs_needs_prolog(es, &shader->key.part.gs.vs_prolog)) {
5128 union si_shader_part_key vs_prolog_key;
5129 si_get_vs_prolog_key(&es->info,
5130 shader_es.info.num_input_sgprs,
5131 &shader->key.part.gs.vs_prolog,
5132 shader, &vs_prolog_key);
5133 vs_prolog_key.vs_prolog.is_monolithic = true;
5134 si_build_vs_prolog_function(&ctx, &vs_prolog_key);
5135 es_prolog = ctx.main_fn;
5136 }
5137
5138 /* Reset the shader context. */
5139 ctx.shader = shader;
5140 ctx.type = PIPE_SHADER_GEOMETRY;
5141
5142 /* Prepare the array of shader parts. */
5143 LLVMValueRef parts[4];
5144 unsigned num_parts = 0, main_part, next_first_part;
5145
5146 if (es_prolog)
5147 parts[num_parts++] = es_prolog;
5148
5149 parts[main_part = num_parts++] = es_main;
5150 parts[next_first_part = num_parts++] = gs_prolog;
5151 parts[num_parts++] = gs_main;
5152
5153 si_build_wrapper_function(&ctx, parts, num_parts,
5154 main_part, next_first_part);
5155 } else {
5156 LLVMValueRef parts[2];
5157 union si_shader_part_key prolog_key;
5158
5159 parts[1] = ctx.main_fn;
5160
5161 memset(&prolog_key, 0, sizeof(prolog_key));
5162 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5163 si_build_gs_prolog_function(&ctx, &prolog_key);
5164 parts[0] = ctx.main_fn;
5165
5166 si_build_wrapper_function(&ctx, parts, 2, 1, 0);
5167 }
5168 } else if (shader->is_monolithic && ctx.type == PIPE_SHADER_FRAGMENT) {
5169 si_llvm_build_monolithic_ps(&ctx, shader);
5170 }
5171
5172 si_llvm_optimize_module(&ctx);
5173
5174 /* Post-optimization transformations and analysis. */
5175 si_optimize_vs_outputs(&ctx);
5176
5177 if ((debug && debug->debug_message) ||
5178 si_can_dump_shader(sscreen, ctx.type)) {
5179 ctx.shader->info.private_mem_vgprs =
5180 ac_count_scratch_private_memory(ctx.main_fn);
5181 }
5182
5183 /* Make sure the input is a pointer and not integer followed by inttoptr. */
5184 assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) ==
5185 LLVMPointerTypeKind);
5186
5187 /* Compile to bytecode. */
5188 r = si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler,
5189 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
5190 si_get_shader_name(shader),
5191 si_should_optimize_less(compiler, shader->selector));
5192 si_llvm_dispose(&ctx);
5193 if (r) {
5194 fprintf(stderr, "LLVM failed to compile shader\n");
5195 return r;
5196 }
5197
5198 /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
5199 * LLVM 3.9svn has this bug.
5200 */
5201 if (sel->type == PIPE_SHADER_COMPUTE) {
5202 unsigned wave_size = sscreen->compute_wave_size;
5203 unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd *
5204 (wave_size == 32 ? 2 : 1);
5205 unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;
5206 unsigned max_sgprs_per_wave = 128;
5207 unsigned simds_per_tg = 4; /* assuming WGP mode on gfx10 */
5208 unsigned threads_per_tg = si_get_max_workgroup_size(shader);
5209 unsigned waves_per_tg = DIV_ROUND_UP(threads_per_tg, wave_size);
5210 unsigned waves_per_simd = DIV_ROUND_UP(waves_per_tg, simds_per_tg);
5211
5212 max_vgprs = max_vgprs / waves_per_simd;
5213 max_sgprs = MIN2(max_sgprs / waves_per_simd, max_sgprs_per_wave);
5214
5215 if (shader->config.num_sgprs > max_sgprs ||
5216 shader->config.num_vgprs > max_vgprs) {
5217 fprintf(stderr, "LLVM failed to compile a shader correctly: "
5218 "SGPR:VGPR usage is %u:%u, but the hw limit is %u:%u\n",
5219 shader->config.num_sgprs, shader->config.num_vgprs,
5220 max_sgprs, max_vgprs);
5221
5222 /* Just terminate the process, because dependent
5223 * shaders can hang due to bad input data, but use
5224 * the env var to allow shader-db to work.
5225 */
5226 if (!debug_get_bool_option("SI_PASS_BAD_SHADERS", false))
5227 abort();
5228 }
5229 }
5230
5231 /* Add the scratch offset to input SGPRs. */
5232 if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(&ctx))
5233 shader->info.num_input_sgprs += 1; /* scratch byte offset */
5234
5235 /* Calculate the number of fragment input VGPRs. */
5236 if (ctx.type == PIPE_SHADER_FRAGMENT) {
5237 shader->info.num_input_vgprs = ac_get_fs_input_vgpr_cnt(&shader->config,
5238 &shader->info.face_vgpr_index,
5239 &shader->info.ancillary_vgpr_index);
5240 }
5241
5242 si_calculate_max_simd_waves(shader);
5243 si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
5244 return 0;
5245 }
5246
5247 /**
5248 * Create, compile and return a shader part (prolog or epilog).
5249 *
5250 * \param sscreen screen
5251 * \param list list of shader parts of the same category
5252 * \param type shader type
5253 * \param key shader part key
5254 * \param prolog whether the part being requested is a prolog
5255 * \param tm LLVM target machine
5256 * \param debug debug callback
5257 * \param build the callback responsible for building the main function
5258 * \return non-NULL on success
5259 */
5260 static struct si_shader_part *
5261 si_get_shader_part(struct si_screen *sscreen,
5262 struct si_shader_part **list,
5263 enum pipe_shader_type type,
5264 bool prolog,
5265 union si_shader_part_key *key,
5266 struct ac_llvm_compiler *compiler,
5267 struct pipe_debug_callback *debug,
5268 void (*build)(struct si_shader_context *,
5269 union si_shader_part_key *),
5270 const char *name)
5271 {
5272 struct si_shader_part *result;
5273
5274 simple_mtx_lock(&sscreen->shader_parts_mutex);
5275
5276 /* Find existing. */
5277 for (result = *list; result; result = result->next) {
5278 if (memcmp(&result->key, key, sizeof(*key)) == 0) {
5279 simple_mtx_unlock(&sscreen->shader_parts_mutex);
5280 return result;
5281 }
5282 }
5283
5284 /* Compile a new one. */
5285 result = CALLOC_STRUCT(si_shader_part);
5286 result->key = *key;
5287
5288 struct si_shader shader = {};
5289
5290 switch (type) {
5291 case PIPE_SHADER_VERTEX:
5292 shader.key.as_ls = key->vs_prolog.as_ls;
5293 shader.key.as_es = key->vs_prolog.as_es;
5294 shader.key.as_ngg = key->vs_prolog.as_ngg;
5295 break;
5296 case PIPE_SHADER_TESS_CTRL:
5297 assert(!prolog);
5298 shader.key.part.tcs.epilog = key->tcs_epilog.states;
5299 break;
5300 case PIPE_SHADER_GEOMETRY:
5301 assert(prolog);
5302 shader.key.as_ngg = key->gs_prolog.as_ngg;
5303 break;
5304 case PIPE_SHADER_FRAGMENT:
5305 if (prolog)
5306 shader.key.part.ps.prolog = key->ps_prolog.states;
5307 else
5308 shader.key.part.ps.epilog = key->ps_epilog.states;
5309 break;
5310 default:
5311 unreachable("bad shader part");
5312 }
5313
5314 struct si_shader_context ctx;
5315 si_llvm_context_init(&ctx, sscreen, compiler,
5316 si_get_wave_size(sscreen, type, shader.key.as_ngg,
5317 shader.key.as_es));
5318 ctx.shader = &shader;
5319 ctx.type = type;
5320
5321 build(&ctx, key);
5322
5323 /* Compile. */
5324 si_llvm_optimize_module(&ctx);
5325
5326 if (si_compile_llvm(sscreen, &result->binary, &result->config, compiler,
5327 ctx.ac.module, debug, ctx.type, ctx.ac.wave_size,
5328 name, false)) {
5329 FREE(result);
5330 result = NULL;
5331 goto out;
5332 }
5333
5334 result->next = *list;
5335 *list = result;
5336
5337 out:
5338 si_llvm_dispose(&ctx);
5339 simple_mtx_unlock(&sscreen->shader_parts_mutex);
5340 return result;
5341 }
5342
5343 /**
5344 * Build the vertex shader prolog function.
5345 *
5346 * The inputs are the same as VS (a lot of SGPRs and 4 VGPR system values).
5347 * All inputs are returned unmodified. The vertex load indices are
5348 * stored after them, which will be used by the API VS for fetching inputs.
5349 *
5350 * For example, the expected outputs for instance_divisors[] = {0, 1, 2} are:
5351 * input_v0,
5352 * input_v1,
5353 * input_v2,
5354 * input_v3,
5355 * (VertexID + BaseVertex),
5356 * (InstanceID + StartInstance),
5357 * (InstanceID / 2 + StartInstance)
5358 */
5359 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
5360 union si_shader_part_key *key)
5361 {
5362 LLVMTypeRef *returns;
5363 LLVMValueRef ret, func;
5364 int num_returns, i;
5365 unsigned first_vs_vgpr = key->vs_prolog.num_merged_next_stage_vgprs;
5366 unsigned num_input_vgprs = key->vs_prolog.num_merged_next_stage_vgprs + 4;
5367 struct ac_arg input_sgpr_param[key->vs_prolog.num_input_sgprs];
5368 struct ac_arg input_vgpr_param[9];
5369 LLVMValueRef input_vgprs[9];
5370 unsigned num_all_input_regs = key->vs_prolog.num_input_sgprs +
5371 num_input_vgprs;
5372 unsigned user_sgpr_base = key->vs_prolog.num_merged_next_stage_vgprs ? 8 : 0;
5373
5374 memset(&ctx->args, 0, sizeof(ctx->args));
5375
5376 /* 4 preloaded VGPRs + vertex load indices as prolog outputs */
5377 returns = alloca((num_all_input_regs + key->vs_prolog.num_inputs) *
5378 sizeof(LLVMTypeRef));
5379 num_returns = 0;
5380
5381 /* Declare input and output SGPRs. */
5382 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5383 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5384 &input_sgpr_param[i]);
5385 returns[num_returns++] = ctx->i32;
5386 }
5387
5388 struct ac_arg merged_wave_info = input_sgpr_param[3];
5389
5390 /* Preloaded VGPRs (outputs must be floats) */
5391 for (i = 0; i < num_input_vgprs; i++) {
5392 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &input_vgpr_param[i]);
5393 returns[num_returns++] = ctx->f32;
5394 }
5395
5396 /* Vertex load indices. */
5397 for (i = 0; i < key->vs_prolog.num_inputs; i++)
5398 returns[num_returns++] = ctx->f32;
5399
5400 /* Create the function. */
5401 si_llvm_create_func(ctx, "vs_prolog", returns, num_returns, 0);
5402 func = ctx->main_fn;
5403
5404 for (i = 0; i < num_input_vgprs; i++) {
5405 input_vgprs[i] = ac_get_arg(&ctx->ac, input_vgpr_param[i]);
5406 }
5407
5408 if (key->vs_prolog.num_merged_next_stage_vgprs) {
5409 if (!key->vs_prolog.is_monolithic)
5410 si_init_exec_from_input(ctx, merged_wave_info, 0);
5411
5412 if (key->vs_prolog.as_ls &&
5413 ctx->screen->info.has_ls_vgpr_init_bug) {
5414 /* If there are no HS threads, SPI loads the LS VGPRs
5415 * starting at VGPR 0. Shift them back to where they
5416 * belong.
5417 */
5418 LLVMValueRef has_hs_threads =
5419 LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
5420 si_unpack_param(ctx, input_sgpr_param[3], 8, 8),
5421 ctx->i32_0, "");
5422
5423 for (i = 4; i > 0; --i) {
5424 input_vgprs[i + 1] =
5425 LLVMBuildSelect(ctx->ac.builder, has_hs_threads,
5426 input_vgprs[i + 1],
5427 input_vgprs[i - 1], "");
5428 }
5429 }
5430 }
5431
5432 unsigned vertex_id_vgpr = first_vs_vgpr;
5433 unsigned instance_id_vgpr =
5434 ctx->screen->info.chip_class >= GFX10 ?
5435 first_vs_vgpr + 3 :
5436 first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
5437
5438 ctx->abi.vertex_id = input_vgprs[vertex_id_vgpr];
5439 ctx->abi.instance_id = input_vgprs[instance_id_vgpr];
5440
5441 /* InstanceID = VertexID >> 16;
5442 * VertexID = VertexID & 0xffff;
5443 */
5444 if (key->vs_prolog.states.unpack_instance_id_from_vertex_id) {
5445 ctx->abi.instance_id = LLVMBuildLShr(ctx->ac.builder, ctx->abi.vertex_id,
5446 LLVMConstInt(ctx->i32, 16, 0), "");
5447 ctx->abi.vertex_id = LLVMBuildAnd(ctx->ac.builder, ctx->abi.vertex_id,
5448 LLVMConstInt(ctx->i32, 0xffff, 0), "");
5449 }
5450
5451 /* Copy inputs to outputs. This should be no-op, as the registers match,
5452 * but it will prevent the compiler from overwriting them unintentionally.
5453 */
5454 ret = ctx->return_value;
5455 for (i = 0; i < key->vs_prolog.num_input_sgprs; i++) {
5456 LLVMValueRef p = LLVMGetParam(func, i);
5457 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p, i, "");
5458 }
5459 for (i = 0; i < num_input_vgprs; i++) {
5460 LLVMValueRef p = input_vgprs[i];
5461
5462 if (i == vertex_id_vgpr)
5463 p = ctx->abi.vertex_id;
5464 else if (i == instance_id_vgpr)
5465 p = ctx->abi.instance_id;
5466
5467 p = ac_to_float(&ctx->ac, p);
5468 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, p,
5469 key->vs_prolog.num_input_sgprs + i, "");
5470 }
5471
5472 /* Compute vertex load indices from instance divisors. */
5473 LLVMValueRef instance_divisor_constbuf = NULL;
5474
5475 if (key->vs_prolog.states.instance_divisor_is_fetched) {
5476 LLVMValueRef list = si_prolog_get_rw_buffers(ctx);
5477 LLVMValueRef buf_index =
5478 LLVMConstInt(ctx->i32, SI_VS_CONST_INSTANCE_DIVISORS, 0);
5479 instance_divisor_constbuf =
5480 ac_build_load_to_sgpr(&ctx->ac, list, buf_index);
5481 }
5482
5483 for (i = 0; i < key->vs_prolog.num_inputs; i++) {
5484 bool divisor_is_one =
5485 key->vs_prolog.states.instance_divisor_is_one & (1u << i);
5486 bool divisor_is_fetched =
5487 key->vs_prolog.states.instance_divisor_is_fetched & (1u << i);
5488 LLVMValueRef index = NULL;
5489
5490 if (divisor_is_one) {
5491 index = ctx->abi.instance_id;
5492 } else if (divisor_is_fetched) {
5493 LLVMValueRef udiv_factors[4];
5494
5495 for (unsigned j = 0; j < 4; j++) {
5496 udiv_factors[j] =
5497 si_buffer_load_const(ctx, instance_divisor_constbuf,
5498 LLVMConstInt(ctx->i32, i*16 + j*4, 0));
5499 udiv_factors[j] = ac_to_integer(&ctx->ac, udiv_factors[j]);
5500 }
5501 /* The faster NUW version doesn't work when InstanceID == UINT_MAX.
5502 * Such InstanceID might not be achievable in a reasonable time though.
5503 */
5504 index = ac_build_fast_udiv_nuw(&ctx->ac, ctx->abi.instance_id,
5505 udiv_factors[0], udiv_factors[1],
5506 udiv_factors[2], udiv_factors[3]);
5507 }
5508
5509 if (divisor_is_one || divisor_is_fetched) {
5510 /* Add StartInstance. */
5511 index = LLVMBuildAdd(ctx->ac.builder, index,
5512 LLVMGetParam(ctx->main_fn, user_sgpr_base +
5513 SI_SGPR_START_INSTANCE), "");
5514 } else {
5515 /* VertexID + BaseVertex */
5516 index = LLVMBuildAdd(ctx->ac.builder,
5517 ctx->abi.vertex_id,
5518 LLVMGetParam(func, user_sgpr_base +
5519 SI_SGPR_BASE_VERTEX), "");
5520 }
5521
5522 index = ac_to_float(&ctx->ac, index);
5523 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, index,
5524 ctx->args.arg_count + i, "");
5525 }
5526
5527 si_llvm_build_ret(ctx, ret);
5528 }
5529
5530 static bool si_get_vs_prolog(struct si_screen *sscreen,
5531 struct ac_llvm_compiler *compiler,
5532 struct si_shader *shader,
5533 struct pipe_debug_callback *debug,
5534 struct si_shader *main_part,
5535 const struct si_vs_prolog_bits *key)
5536 {
5537 struct si_shader_selector *vs = main_part->selector;
5538
5539 if (!si_vs_needs_prolog(vs, key))
5540 return true;
5541
5542 /* Get the prolog. */
5543 union si_shader_part_key prolog_key;
5544 si_get_vs_prolog_key(&vs->info, main_part->info.num_input_sgprs,
5545 key, shader, &prolog_key);
5546
5547 shader->prolog =
5548 si_get_shader_part(sscreen, &sscreen->vs_prologs,
5549 PIPE_SHADER_VERTEX, true, &prolog_key, compiler,
5550 debug, si_build_vs_prolog_function,
5551 "Vertex Shader Prolog");
5552 return shader->prolog != NULL;
5553 }
5554
5555 /**
5556 * Select and compile (or reuse) vertex shader parts (prolog & epilog).
5557 */
5558 static bool si_shader_select_vs_parts(struct si_screen *sscreen,
5559 struct ac_llvm_compiler *compiler,
5560 struct si_shader *shader,
5561 struct pipe_debug_callback *debug)
5562 {
5563 return si_get_vs_prolog(sscreen, compiler, shader, debug, shader,
5564 &shader->key.part.vs.prolog);
5565 }
5566
5567 /**
5568 * Compile the TCS epilog function. This writes tesselation factors to memory
5569 * based on the output primitive type of the tesselator (determined by TES).
5570 */
5571 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
5572 union si_shader_part_key *key)
5573 {
5574 memset(&ctx->args, 0, sizeof(ctx->args));
5575
5576 if (ctx->screen->info.chip_class >= GFX9) {
5577 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5578 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5579 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5580 &ctx->tcs_offchip_offset);
5581 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* wave info */
5582 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5583 &ctx->tcs_factor_offset);
5584 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5585 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5586 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5587 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5588 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5589 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5590 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5591 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5592 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5593 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5594 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5595 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5596 &ctx->tcs_offchip_layout);
5597 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5598 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5599 &ctx->tcs_out_lds_layout);
5600 } else {
5601 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5602 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5603 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5604 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5605 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5606 &ctx->tcs_offchip_layout);
5607 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5608 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5609 &ctx->tcs_out_lds_layout);
5610 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
5611 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5612 &ctx->tcs_offchip_offset);
5613 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT,
5614 &ctx->tcs_factor_offset);
5615 }
5616
5617 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
5618 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* VGPR gap */
5619 struct ac_arg rel_patch_id; /* patch index within the wave (REL_PATCH_ID) */
5620 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &rel_patch_id);
5621 struct ac_arg invocation_id; /* invocation ID within the patch */
5622 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &invocation_id);
5623 struct ac_arg tcs_out_current_patch_data_offset; /* LDS offset where tess factors should be loaded from */
5624 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
5625 &tcs_out_current_patch_data_offset);
5626
5627 struct ac_arg tess_factors[6];
5628 for (unsigned i = 0; i < 6; i++)
5629 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &tess_factors[i]);
5630
5631 /* Create the function. */
5632 si_llvm_create_func(ctx, "tcs_epilog", NULL, 0,
5633 ctx->screen->info.chip_class >= GFX7 ? 128 : 0);
5634 ac_declare_lds_as_pointer(&ctx->ac);
5635
5636 LLVMValueRef invoc0_tess_factors[6];
5637 for (unsigned i = 0; i < 6; i++)
5638 invoc0_tess_factors[i] = ac_get_arg(&ctx->ac, tess_factors[i]);
5639
5640 si_write_tess_factors(ctx,
5641 ac_get_arg(&ctx->ac, rel_patch_id),
5642 ac_get_arg(&ctx->ac, invocation_id),
5643 ac_get_arg(&ctx->ac, tcs_out_current_patch_data_offset),
5644 invoc0_tess_factors, invoc0_tess_factors + 4);
5645
5646 LLVMBuildRetVoid(ctx->ac.builder);
5647 }
5648
5649 /**
5650 * Select and compile (or reuse) TCS parts (epilog).
5651 */
5652 static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
5653 struct ac_llvm_compiler *compiler,
5654 struct si_shader *shader,
5655 struct pipe_debug_callback *debug)
5656 {
5657 if (sscreen->info.chip_class >= GFX9) {
5658 struct si_shader *ls_main_part =
5659 shader->key.part.tcs.ls->main_shader_part_ls;
5660
5661 if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
5662 &shader->key.part.tcs.ls_prolog))
5663 return false;
5664
5665 shader->previous_stage = ls_main_part;
5666 }
5667
5668 /* Get the epilog. */
5669 union si_shader_part_key epilog_key;
5670 memset(&epilog_key, 0, sizeof(epilog_key));
5671 epilog_key.tcs_epilog.states = shader->key.part.tcs.epilog;
5672
5673 shader->epilog = si_get_shader_part(sscreen, &sscreen->tcs_epilogs,
5674 PIPE_SHADER_TESS_CTRL, false,
5675 &epilog_key, compiler, debug,
5676 si_build_tcs_epilog_function,
5677 "Tessellation Control Shader Epilog");
5678 return shader->epilog != NULL;
5679 }
5680
5681 /**
5682 * Select and compile (or reuse) GS parts (prolog).
5683 */
5684 static bool si_shader_select_gs_parts(struct si_screen *sscreen,
5685 struct ac_llvm_compiler *compiler,
5686 struct si_shader *shader,
5687 struct pipe_debug_callback *debug)
5688 {
5689 if (sscreen->info.chip_class >= GFX9) {
5690 struct si_shader *es_main_part;
5691 enum pipe_shader_type es_type = shader->key.part.gs.es->type;
5692
5693 if (shader->key.as_ngg)
5694 es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
5695 else
5696 es_main_part = shader->key.part.gs.es->main_shader_part_es;
5697
5698 if (es_type == PIPE_SHADER_VERTEX &&
5699 !si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
5700 &shader->key.part.gs.vs_prolog))
5701 return false;
5702
5703 shader->previous_stage = es_main_part;
5704 }
5705
5706 if (!shader->key.part.gs.prolog.tri_strip_adj_fix)
5707 return true;
5708
5709 union si_shader_part_key prolog_key;
5710 memset(&prolog_key, 0, sizeof(prolog_key));
5711 prolog_key.gs_prolog.states = shader->key.part.gs.prolog;
5712 prolog_key.gs_prolog.as_ngg = shader->key.as_ngg;
5713
5714 shader->prolog2 = si_get_shader_part(sscreen, &sscreen->gs_prologs,
5715 PIPE_SHADER_GEOMETRY, true,
5716 &prolog_key, compiler, debug,
5717 si_build_gs_prolog_function,
5718 "Geometry Shader Prolog");
5719 return shader->prolog2 != NULL;
5720 }
5721
5722 /**
5723 * Compute the PS prolog key, which contains all the information needed to
5724 * build the PS prolog function, and set related bits in shader->config.
5725 */
5726 void si_get_ps_prolog_key(struct si_shader *shader,
5727 union si_shader_part_key *key,
5728 bool separate_prolog)
5729 {
5730 struct si_shader_info *info = &shader->selector->info;
5731
5732 memset(key, 0, sizeof(*key));
5733 key->ps_prolog.states = shader->key.part.ps.prolog;
5734 key->ps_prolog.colors_read = info->colors_read;
5735 key->ps_prolog.num_input_sgprs = shader->info.num_input_sgprs;
5736 key->ps_prolog.num_input_vgprs = shader->info.num_input_vgprs;
5737 key->ps_prolog.wqm = info->uses_derivatives &&
5738 (key->ps_prolog.colors_read ||
5739 key->ps_prolog.states.force_persp_sample_interp ||
5740 key->ps_prolog.states.force_linear_sample_interp ||
5741 key->ps_prolog.states.force_persp_center_interp ||
5742 key->ps_prolog.states.force_linear_center_interp ||
5743 key->ps_prolog.states.bc_optimize_for_persp ||
5744 key->ps_prolog.states.bc_optimize_for_linear);
5745 key->ps_prolog.ancillary_vgpr_index = shader->info.ancillary_vgpr_index;
5746
5747 if (info->colors_read) {
5748 unsigned *color = shader->selector->color_attr_index;
5749
5750 if (shader->key.part.ps.prolog.color_two_side) {
5751 /* BCOLORs are stored after the last input. */
5752 key->ps_prolog.num_interp_inputs = info->num_inputs;
5753 key->ps_prolog.face_vgpr_index = shader->info.face_vgpr_index;
5754 if (separate_prolog)
5755 shader->config.spi_ps_input_ena |= S_0286CC_FRONT_FACE_ENA(1);
5756 }
5757
5758 for (unsigned i = 0; i < 2; i++) {
5759 unsigned interp = info->input_interpolate[color[i]];
5760 unsigned location = info->input_interpolate_loc[color[i]];
5761
5762 if (!(info->colors_read & (0xf << i*4)))
5763 continue;
5764
5765 key->ps_prolog.color_attr_index[i] = color[i];
5766
5767 if (shader->key.part.ps.prolog.flatshade_colors &&
5768 interp == TGSI_INTERPOLATE_COLOR)
5769 interp = TGSI_INTERPOLATE_CONSTANT;
5770
5771 switch (interp) {
5772 case TGSI_INTERPOLATE_CONSTANT:
5773 key->ps_prolog.color_interp_vgpr_index[i] = -1;
5774 break;
5775 case TGSI_INTERPOLATE_PERSPECTIVE:
5776 case TGSI_INTERPOLATE_COLOR:
5777 /* Force the interpolation location for colors here. */
5778 if (shader->key.part.ps.prolog.force_persp_sample_interp)
5779 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5780 if (shader->key.part.ps.prolog.force_persp_center_interp)
5781 location = TGSI_INTERPOLATE_LOC_CENTER;
5782
5783 switch (location) {
5784 case TGSI_INTERPOLATE_LOC_SAMPLE:
5785 key->ps_prolog.color_interp_vgpr_index[i] = 0;
5786 if (separate_prolog) {
5787 shader->config.spi_ps_input_ena |=
5788 S_0286CC_PERSP_SAMPLE_ENA(1);
5789 }
5790 break;
5791 case TGSI_INTERPOLATE_LOC_CENTER:
5792 key->ps_prolog.color_interp_vgpr_index[i] = 2;
5793 if (separate_prolog) {
5794 shader->config.spi_ps_input_ena |=
5795 S_0286CC_PERSP_CENTER_ENA(1);
5796 }
5797 break;
5798 case TGSI_INTERPOLATE_LOC_CENTROID:
5799 key->ps_prolog.color_interp_vgpr_index[i] = 4;
5800 if (separate_prolog) {
5801 shader->config.spi_ps_input_ena |=
5802 S_0286CC_PERSP_CENTROID_ENA(1);
5803 }
5804 break;
5805 default:
5806 assert(0);
5807 }
5808 break;
5809 case TGSI_INTERPOLATE_LINEAR:
5810 /* Force the interpolation location for colors here. */
5811 if (shader->key.part.ps.prolog.force_linear_sample_interp)
5812 location = TGSI_INTERPOLATE_LOC_SAMPLE;
5813 if (shader->key.part.ps.prolog.force_linear_center_interp)
5814 location = TGSI_INTERPOLATE_LOC_CENTER;
5815
5816 /* The VGPR assignment for non-monolithic shaders
5817 * works because InitialPSInputAddr is set on the
5818 * main shader and PERSP_PULL_MODEL is never used.
5819 */
5820 switch (location) {
5821 case TGSI_INTERPOLATE_LOC_SAMPLE:
5822 key->ps_prolog.color_interp_vgpr_index[i] =
5823 separate_prolog ? 6 : 9;
5824 if (separate_prolog) {
5825 shader->config.spi_ps_input_ena |=
5826 S_0286CC_LINEAR_SAMPLE_ENA(1);
5827 }
5828 break;
5829 case TGSI_INTERPOLATE_LOC_CENTER:
5830 key->ps_prolog.color_interp_vgpr_index[i] =
5831 separate_prolog ? 8 : 11;
5832 if (separate_prolog) {
5833 shader->config.spi_ps_input_ena |=
5834 S_0286CC_LINEAR_CENTER_ENA(1);
5835 }
5836 break;
5837 case TGSI_INTERPOLATE_LOC_CENTROID:
5838 key->ps_prolog.color_interp_vgpr_index[i] =
5839 separate_prolog ? 10 : 13;
5840 if (separate_prolog) {
5841 shader->config.spi_ps_input_ena |=
5842 S_0286CC_LINEAR_CENTROID_ENA(1);
5843 }
5844 break;
5845 default:
5846 assert(0);
5847 }
5848 break;
5849 default:
5850 assert(0);
5851 }
5852 }
5853 }
5854 }
5855
5856 /**
5857 * Check whether a PS prolog is required based on the key.
5858 */
5859 bool si_need_ps_prolog(const union si_shader_part_key *key)
5860 {
5861 return key->ps_prolog.colors_read ||
5862 key->ps_prolog.states.force_persp_sample_interp ||
5863 key->ps_prolog.states.force_linear_sample_interp ||
5864 key->ps_prolog.states.force_persp_center_interp ||
5865 key->ps_prolog.states.force_linear_center_interp ||
5866 key->ps_prolog.states.bc_optimize_for_persp ||
5867 key->ps_prolog.states.bc_optimize_for_linear ||
5868 key->ps_prolog.states.poly_stipple ||
5869 key->ps_prolog.states.samplemask_log_ps_iter;
5870 }
5871
5872 /**
5873 * Compute the PS epilog key, which contains all the information needed to
5874 * build the PS epilog function.
5875 */
5876 void si_get_ps_epilog_key(struct si_shader *shader,
5877 union si_shader_part_key *key)
5878 {
5879 struct si_shader_info *info = &shader->selector->info;
5880 memset(key, 0, sizeof(*key));
5881 key->ps_epilog.colors_written = info->colors_written;
5882 key->ps_epilog.writes_z = info->writes_z;
5883 key->ps_epilog.writes_stencil = info->writes_stencil;
5884 key->ps_epilog.writes_samplemask = info->writes_samplemask;
5885 key->ps_epilog.states = shader->key.part.ps.epilog;
5886 }
5887
5888 /**
5889 * Select and compile (or reuse) pixel shader parts (prolog & epilog).
5890 */
5891 static bool si_shader_select_ps_parts(struct si_screen *sscreen,
5892 struct ac_llvm_compiler *compiler,
5893 struct si_shader *shader,
5894 struct pipe_debug_callback *debug)
5895 {
5896 union si_shader_part_key prolog_key;
5897 union si_shader_part_key epilog_key;
5898
5899 /* Get the prolog. */
5900 si_get_ps_prolog_key(shader, &prolog_key, true);
5901
5902 /* The prolog is a no-op if these aren't set. */
5903 if (si_need_ps_prolog(&prolog_key)) {
5904 shader->prolog =
5905 si_get_shader_part(sscreen, &sscreen->ps_prologs,
5906 PIPE_SHADER_FRAGMENT, true,
5907 &prolog_key, compiler, debug,
5908 si_llvm_build_ps_prolog,
5909 "Fragment Shader Prolog");
5910 if (!shader->prolog)
5911 return false;
5912 }
5913
5914 /* Get the epilog. */
5915 si_get_ps_epilog_key(shader, &epilog_key);
5916
5917 shader->epilog =
5918 si_get_shader_part(sscreen, &sscreen->ps_epilogs,
5919 PIPE_SHADER_FRAGMENT, false,
5920 &epilog_key, compiler, debug,
5921 si_llvm_build_ps_epilog,
5922 "Fragment Shader Epilog");
5923 if (!shader->epilog)
5924 return false;
5925
5926 /* Enable POS_FIXED_PT if polygon stippling is enabled. */
5927 if (shader->key.part.ps.prolog.poly_stipple) {
5928 shader->config.spi_ps_input_ena |= S_0286CC_POS_FIXED_PT_ENA(1);
5929 assert(G_0286CC_POS_FIXED_PT_ENA(shader->config.spi_ps_input_addr));
5930 }
5931
5932 /* Set up the enable bits for per-sample shading if needed. */
5933 if (shader->key.part.ps.prolog.force_persp_sample_interp &&
5934 (G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_ena) ||
5935 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5936 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTER_ENA;
5937 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
5938 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_SAMPLE_ENA(1);
5939 }
5940 if (shader->key.part.ps.prolog.force_linear_sample_interp &&
5941 (G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_ena) ||
5942 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5943 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTER_ENA;
5944 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
5945 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_SAMPLE_ENA(1);
5946 }
5947 if (shader->key.part.ps.prolog.force_persp_center_interp &&
5948 (G_0286CC_PERSP_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
5949 G_0286CC_PERSP_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5950 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_SAMPLE_ENA;
5951 shader->config.spi_ps_input_ena &= C_0286CC_PERSP_CENTROID_ENA;
5952 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
5953 }
5954 if (shader->key.part.ps.prolog.force_linear_center_interp &&
5955 (G_0286CC_LINEAR_SAMPLE_ENA(shader->config.spi_ps_input_ena) ||
5956 G_0286CC_LINEAR_CENTROID_ENA(shader->config.spi_ps_input_ena))) {
5957 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_SAMPLE_ENA;
5958 shader->config.spi_ps_input_ena &= C_0286CC_LINEAR_CENTROID_ENA;
5959 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
5960 }
5961
5962 /* POW_W_FLOAT requires that one of the perspective weights is enabled. */
5963 if (G_0286CC_POS_W_FLOAT_ENA(shader->config.spi_ps_input_ena) &&
5964 !(shader->config.spi_ps_input_ena & 0xf)) {
5965 shader->config.spi_ps_input_ena |= S_0286CC_PERSP_CENTER_ENA(1);
5966 assert(G_0286CC_PERSP_CENTER_ENA(shader->config.spi_ps_input_addr));
5967 }
5968
5969 /* At least one pair of interpolation weights must be enabled. */
5970 if (!(shader->config.spi_ps_input_ena & 0x7f)) {
5971 shader->config.spi_ps_input_ena |= S_0286CC_LINEAR_CENTER_ENA(1);
5972 assert(G_0286CC_LINEAR_CENTER_ENA(shader->config.spi_ps_input_addr));
5973 }
5974
5975 /* Samplemask fixup requires the sample ID. */
5976 if (shader->key.part.ps.prolog.samplemask_log_ps_iter) {
5977 shader->config.spi_ps_input_ena |= S_0286CC_ANCILLARY_ENA(1);
5978 assert(G_0286CC_ANCILLARY_ENA(shader->config.spi_ps_input_addr));
5979 }
5980
5981 /* The sample mask input is always enabled, because the API shader always
5982 * passes it through to the epilog. Disable it here if it's unused.
5983 */
5984 if (!shader->key.part.ps.epilog.poly_line_smoothing &&
5985 !shader->selector->info.reads_samplemask)
5986 shader->config.spi_ps_input_ena &= C_0286CC_SAMPLE_COVERAGE_ENA;
5987
5988 return true;
5989 }
5990
5991 void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
5992 unsigned *lds_size)
5993 {
5994 /* If tessellation is all offchip and on-chip GS isn't used, this
5995 * workaround is not needed.
5996 */
5997 return;
5998
5999 /* SPI barrier management bug:
6000 * Make sure we have at least 4k of LDS in use to avoid the bug.
6001 * It applies to workgroup sizes of more than one wavefront.
6002 */
6003 if (sscreen->info.family == CHIP_BONAIRE ||
6004 sscreen->info.family == CHIP_KABINI)
6005 *lds_size = MAX2(*lds_size, 8);
6006 }
6007
6008 static void si_fix_resource_usage(struct si_screen *sscreen,
6009 struct si_shader *shader)
6010 {
6011 unsigned min_sgprs = shader->info.num_input_sgprs + 2; /* VCC */
6012
6013 shader->config.num_sgprs = MAX2(shader->config.num_sgprs, min_sgprs);
6014
6015 if (shader->selector->type == PIPE_SHADER_COMPUTE &&
6016 si_get_max_workgroup_size(shader) > sscreen->compute_wave_size) {
6017 si_multiwave_lds_size_workaround(sscreen,
6018 &shader->config.lds_size);
6019 }
6020 }
6021
6022 bool si_create_shader_variant(struct si_screen *sscreen,
6023 struct ac_llvm_compiler *compiler,
6024 struct si_shader *shader,
6025 struct pipe_debug_callback *debug)
6026 {
6027 struct si_shader_selector *sel = shader->selector;
6028 struct si_shader *mainp = *si_get_main_shader_part(sel, &shader->key);
6029 int r;
6030
6031 /* LS, ES, VS are compiled on demand if the main part hasn't been
6032 * compiled for that stage.
6033 *
6034 * GS are compiled on demand if the main part hasn't been compiled
6035 * for the chosen NGG-ness.
6036 *
6037 * Vertex shaders are compiled on demand when a vertex fetch
6038 * workaround must be applied.
6039 */
6040 if (shader->is_monolithic) {
6041 /* Monolithic shader (compiled as a whole, has many variants,
6042 * may take a long time to compile).
6043 */
6044 r = si_compile_shader(sscreen, compiler, shader, debug);
6045 if (r)
6046 return false;
6047 } else {
6048 /* The shader consists of several parts:
6049 *
6050 * - the middle part is the user shader, it has 1 variant only
6051 * and it was compiled during the creation of the shader
6052 * selector
6053 * - the prolog part is inserted at the beginning
6054 * - the epilog part is inserted at the end
6055 *
6056 * The prolog and epilog have many (but simple) variants.
6057 *
6058 * Starting with gfx9, geometry and tessellation control
6059 * shaders also contain the prolog and user shader parts of
6060 * the previous shader stage.
6061 */
6062
6063 if (!mainp)
6064 return false;
6065
6066 /* Copy the compiled shader data over. */
6067 shader->is_binary_shared = true;
6068 shader->binary = mainp->binary;
6069 shader->config = mainp->config;
6070 shader->info.num_input_sgprs = mainp->info.num_input_sgprs;
6071 shader->info.num_input_vgprs = mainp->info.num_input_vgprs;
6072 shader->info.face_vgpr_index = mainp->info.face_vgpr_index;
6073 shader->info.ancillary_vgpr_index = mainp->info.ancillary_vgpr_index;
6074 memcpy(shader->info.vs_output_param_offset,
6075 mainp->info.vs_output_param_offset,
6076 sizeof(mainp->info.vs_output_param_offset));
6077 shader->info.uses_instanceid = mainp->info.uses_instanceid;
6078 shader->info.nr_pos_exports = mainp->info.nr_pos_exports;
6079 shader->info.nr_param_exports = mainp->info.nr_param_exports;
6080
6081 /* Select prologs and/or epilogs. */
6082 switch (sel->type) {
6083 case PIPE_SHADER_VERTEX:
6084 if (!si_shader_select_vs_parts(sscreen, compiler, shader, debug))
6085 return false;
6086 break;
6087 case PIPE_SHADER_TESS_CTRL:
6088 if (!si_shader_select_tcs_parts(sscreen, compiler, shader, debug))
6089 return false;
6090 break;
6091 case PIPE_SHADER_TESS_EVAL:
6092 break;
6093 case PIPE_SHADER_GEOMETRY:
6094 if (!si_shader_select_gs_parts(sscreen, compiler, shader, debug))
6095 return false;
6096 break;
6097 case PIPE_SHADER_FRAGMENT:
6098 if (!si_shader_select_ps_parts(sscreen, compiler, shader, debug))
6099 return false;
6100
6101 /* Make sure we have at least as many VGPRs as there
6102 * are allocated inputs.
6103 */
6104 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6105 shader->info.num_input_vgprs);
6106 break;
6107 default:;
6108 }
6109
6110 /* Update SGPR and VGPR counts. */
6111 if (shader->prolog) {
6112 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6113 shader->prolog->config.num_sgprs);
6114 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6115 shader->prolog->config.num_vgprs);
6116 }
6117 if (shader->previous_stage) {
6118 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6119 shader->previous_stage->config.num_sgprs);
6120 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6121 shader->previous_stage->config.num_vgprs);
6122 shader->config.spilled_sgprs =
6123 MAX2(shader->config.spilled_sgprs,
6124 shader->previous_stage->config.spilled_sgprs);
6125 shader->config.spilled_vgprs =
6126 MAX2(shader->config.spilled_vgprs,
6127 shader->previous_stage->config.spilled_vgprs);
6128 shader->info.private_mem_vgprs =
6129 MAX2(shader->info.private_mem_vgprs,
6130 shader->previous_stage->info.private_mem_vgprs);
6131 shader->config.scratch_bytes_per_wave =
6132 MAX2(shader->config.scratch_bytes_per_wave,
6133 shader->previous_stage->config.scratch_bytes_per_wave);
6134 shader->info.uses_instanceid |=
6135 shader->previous_stage->info.uses_instanceid;
6136 }
6137 if (shader->prolog2) {
6138 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6139 shader->prolog2->config.num_sgprs);
6140 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6141 shader->prolog2->config.num_vgprs);
6142 }
6143 if (shader->epilog) {
6144 shader->config.num_sgprs = MAX2(shader->config.num_sgprs,
6145 shader->epilog->config.num_sgprs);
6146 shader->config.num_vgprs = MAX2(shader->config.num_vgprs,
6147 shader->epilog->config.num_vgprs);
6148 }
6149 si_calculate_max_simd_waves(shader);
6150 }
6151
6152 if (shader->key.as_ngg) {
6153 assert(!shader->key.as_es && !shader->key.as_ls);
6154 gfx10_ngg_calculate_subgroup_info(shader);
6155 } else if (sscreen->info.chip_class >= GFX9 && sel->type == PIPE_SHADER_GEOMETRY) {
6156 gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
6157 }
6158
6159 si_fix_resource_usage(sscreen, shader);
6160 si_shader_dump(sscreen, shader, debug, stderr, true);
6161
6162 /* Upload. */
6163 if (!si_shader_binary_upload(sscreen, shader, 0)) {
6164 fprintf(stderr, "LLVM failed to upload shader\n");
6165 return false;
6166 }
6167
6168 return true;
6169 }
6170
6171 void si_shader_destroy(struct si_shader *shader)
6172 {
6173 if (shader->scratch_bo)
6174 si_resource_reference(&shader->scratch_bo, NULL);
6175
6176 si_resource_reference(&shader->bo, NULL);
6177
6178 if (!shader->is_binary_shared)
6179 si_shader_binary_clean(&shader->binary);
6180
6181 free(shader->shader_log);
6182 }