radeonsi: move VS_STATE.LS_OUT_PATCH_SIZE a few bits higher to make space there
[mesa.git] / src / gallium / drivers / radeonsi / si_shader.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <llvm/Config/llvm-config.h>
26
27 #include "util/u_memory.h"
28 #include "tgsi/tgsi_strings.h"
29 #include "tgsi/tgsi_from_mesa.h"
30
31 #include "ac_exp_param.h"
32 #include "ac_shader_util.h"
33 #include "ac_rtld.h"
34 #include "ac_llvm_util.h"
35 #include "si_shader_internal.h"
36 #include "si_pipe.h"
37 #include "sid.h"
38
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_serialize.h"
41
42 static const char scratch_rsrc_dword0_symbol[] =
43 "SCRATCH_RSRC_DWORD0";
44
45 static const char scratch_rsrc_dword1_symbol[] =
46 "SCRATCH_RSRC_DWORD1";
47
48 static void si_llvm_emit_barrier(struct si_shader_context *ctx);
49
50 static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
51
52 static void si_build_vs_prolog_function(struct si_shader_context *ctx,
53 union si_shader_part_key *key);
54 static void si_build_tcs_epilog_function(struct si_shader_context *ctx,
55 union si_shader_part_key *key);
56 static void si_fix_resource_usage(struct si_screen *sscreen,
57 struct si_shader *shader);
58
59 static bool llvm_type_is_64bit(struct si_shader_context *ctx,
60 LLVMTypeRef type)
61 {
62 if (type == ctx->ac.i64 || type == ctx->ac.f64)
63 return true;
64
65 return false;
66 }
67
68 /** Whether the shader runs as a combination of multiple API shaders */
69 static bool is_multi_part_shader(struct si_shader_context *ctx)
70 {
71 if (ctx->screen->info.chip_class <= GFX8)
72 return false;
73
74 return ctx->shader->key.as_ls ||
75 ctx->shader->key.as_es ||
76 ctx->type == PIPE_SHADER_TESS_CTRL ||
77 ctx->type == PIPE_SHADER_GEOMETRY;
78 }
79
80 /** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
81 bool si_is_merged_shader(struct si_shader_context *ctx)
82 {
83 return ctx->shader->key.as_ngg || is_multi_part_shader(ctx);
84 }
85
86 /**
87 * Returns a unique index for a per-patch semantic name and index. The index
88 * must be less than 32, so that a 32-bit bitmask of used inputs or outputs
89 * can be calculated.
90 */
91 unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index)
92 {
93 switch (semantic_name) {
94 case TGSI_SEMANTIC_TESSOUTER:
95 return 0;
96 case TGSI_SEMANTIC_TESSINNER:
97 return 1;
98 case TGSI_SEMANTIC_PATCH:
99 assert(index < 30);
100 return 2 + index;
101
102 default:
103 assert(!"invalid semantic name");
104 return 0;
105 }
106 }
107
108 /**
109 * Returns a unique index for a semantic name and index. The index must be
110 * less than 64, so that a 64-bit bitmask of used inputs or outputs can be
111 * calculated.
112 */
113 unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index,
114 unsigned is_varying)
115 {
116 switch (semantic_name) {
117 case TGSI_SEMANTIC_POSITION:
118 return 0;
119 case TGSI_SEMANTIC_GENERIC:
120 /* Since some shader stages use the the highest used IO index
121 * to determine the size to allocate for inputs/outputs
122 * (in LDS, tess and GS rings). GENERIC should be placed right
123 * after POSITION to make that size as small as possible.
124 */
125 if (index < SI_MAX_IO_GENERIC)
126 return 1 + index;
127
128 assert(!"invalid generic index");
129 return 0;
130 case TGSI_SEMANTIC_FOG:
131 return SI_MAX_IO_GENERIC + 1;
132 case TGSI_SEMANTIC_COLOR:
133 assert(index < 2);
134 return SI_MAX_IO_GENERIC + 2 + index;
135 case TGSI_SEMANTIC_BCOLOR:
136 assert(index < 2);
137 /* If it's a varying, COLOR and BCOLOR alias. */
138 if (is_varying)
139 return SI_MAX_IO_GENERIC + 2 + index;
140 else
141 return SI_MAX_IO_GENERIC + 4 + index;
142 case TGSI_SEMANTIC_TEXCOORD:
143 assert(index < 8);
144 return SI_MAX_IO_GENERIC + 6 + index;
145
146 /* These are rarely used between LS and HS or ES and GS. */
147 case TGSI_SEMANTIC_CLIPDIST:
148 assert(index < 2);
149 return SI_MAX_IO_GENERIC + 6 + 8 + index;
150 case TGSI_SEMANTIC_CLIPVERTEX:
151 return SI_MAX_IO_GENERIC + 6 + 8 + 2;
152 case TGSI_SEMANTIC_PSIZE:
153 return SI_MAX_IO_GENERIC + 6 + 8 + 3;
154
155 /* These can't be written by LS, HS, and ES. */
156 case TGSI_SEMANTIC_LAYER:
157 return SI_MAX_IO_GENERIC + 6 + 8 + 4;
158 case TGSI_SEMANTIC_VIEWPORT_INDEX:
159 return SI_MAX_IO_GENERIC + 6 + 8 + 5;
160 case TGSI_SEMANTIC_PRIMID:
161 STATIC_ASSERT(SI_MAX_IO_GENERIC + 6 + 8 + 6 <= 63);
162 return SI_MAX_IO_GENERIC + 6 + 8 + 6;
163 default:
164 fprintf(stderr, "invalid semantic name = %u\n", semantic_name);
165 assert(!"invalid semantic name");
166 return 0;
167 }
168 }
169
170 /**
171 * Get the value of a shader input parameter and extract a bitfield.
172 */
173 static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx,
174 LLVMValueRef value, unsigned rshift,
175 unsigned bitwidth)
176 {
177 if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
178 value = ac_to_integer(&ctx->ac, value);
179
180 if (rshift)
181 value = LLVMBuildLShr(ctx->ac.builder, value,
182 LLVMConstInt(ctx->i32, rshift, 0), "");
183
184 if (rshift + bitwidth < 32) {
185 unsigned mask = (1 << bitwidth) - 1;
186 value = LLVMBuildAnd(ctx->ac.builder, value,
187 LLVMConstInt(ctx->i32, mask, 0), "");
188 }
189
190 return value;
191 }
192
193 LLVMValueRef si_unpack_param(struct si_shader_context *ctx,
194 struct ac_arg param, unsigned rshift,
195 unsigned bitwidth)
196 {
197 LLVMValueRef value = ac_get_arg(&ctx->ac, param);
198
199 return unpack_llvm_param(ctx, value, rshift, bitwidth);
200 }
201
202 static LLVMValueRef get_rel_patch_id(struct si_shader_context *ctx)
203 {
204 switch (ctx->type) {
205 case PIPE_SHADER_TESS_CTRL:
206 return si_unpack_param(ctx, ctx->args.tcs_rel_ids, 0, 8);
207
208 case PIPE_SHADER_TESS_EVAL:
209 return ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id);
210
211 default:
212 assert(0);
213 return NULL;
214 }
215 }
216
217 /* Tessellation shaders pass outputs to the next shader using LDS.
218 *
219 * LS outputs = TCS inputs
220 * TCS outputs = TES inputs
221 *
222 * The LDS layout is:
223 * - TCS inputs for patch 0
224 * - TCS inputs for patch 1
225 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
226 * - ...
227 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
228 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
229 * - TCS outputs for patch 1
230 * - Per-patch TCS outputs for patch 1
231 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
232 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
233 * - ...
234 *
235 * All three shaders VS(LS), TCS, TES share the same LDS space.
236 */
237
238 static LLVMValueRef
239 get_tcs_in_patch_stride(struct si_shader_context *ctx)
240 {
241 return si_unpack_param(ctx, ctx->vs_state_bits, 11, 13);
242 }
243
244 static unsigned get_tcs_out_vertex_dw_stride_constant(struct si_shader_context *ctx)
245 {
246 assert(ctx->type == PIPE_SHADER_TESS_CTRL);
247
248 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
249 return util_last_bit64(ctx->shader->key.mono.u.ff_tcs_inputs_to_copy) * 4;
250
251 return util_last_bit64(ctx->shader->selector->outputs_written) * 4;
252 }
253
254 static LLVMValueRef get_tcs_out_vertex_dw_stride(struct si_shader_context *ctx)
255 {
256 unsigned stride = get_tcs_out_vertex_dw_stride_constant(ctx);
257
258 return LLVMConstInt(ctx->i32, stride, 0);
259 }
260
261 static LLVMValueRef get_tcs_out_patch_stride(struct si_shader_context *ctx)
262 {
263 if (ctx->shader->key.mono.u.ff_tcs_inputs_to_copy)
264 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 0, 13);
265
266 const struct si_shader_info *info = &ctx->shader->selector->info;
267 unsigned tcs_out_vertices = info->properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
268 unsigned vertex_dw_stride = get_tcs_out_vertex_dw_stride_constant(ctx);
269 unsigned num_patch_outputs = util_last_bit64(ctx->shader->selector->patch_outputs_written);
270 unsigned patch_dw_stride = tcs_out_vertices * vertex_dw_stride +
271 num_patch_outputs * 4;
272 return LLVMConstInt(ctx->i32, patch_dw_stride, 0);
273 }
274
275 static LLVMValueRef
276 get_tcs_out_patch0_offset(struct si_shader_context *ctx)
277 {
278 return LLVMBuildMul(ctx->ac.builder,
279 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 0, 16),
280 LLVMConstInt(ctx->i32, 4, 0), "");
281 }
282
283 static LLVMValueRef
284 get_tcs_out_patch0_patch_data_offset(struct si_shader_context *ctx)
285 {
286 return LLVMBuildMul(ctx->ac.builder,
287 si_unpack_param(ctx, ctx->tcs_out_lds_offsets, 16, 16),
288 LLVMConstInt(ctx->i32, 4, 0), "");
289 }
290
291 static LLVMValueRef
292 get_tcs_in_current_patch_offset(struct si_shader_context *ctx)
293 {
294 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
295 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
296
297 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
298 }
299
300 static LLVMValueRef
301 get_tcs_out_current_patch_offset(struct si_shader_context *ctx)
302 {
303 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
304 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
305 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
306
307 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_offset);
308 }
309
310 static LLVMValueRef
311 get_tcs_out_current_patch_data_offset(struct si_shader_context *ctx)
312 {
313 LLVMValueRef patch0_patch_data_offset =
314 get_tcs_out_patch0_patch_data_offset(ctx);
315 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
316 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
317
318 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id, patch0_patch_data_offset);
319 }
320
321 static LLVMValueRef get_num_tcs_out_vertices(struct si_shader_context *ctx)
322 {
323 unsigned tcs_out_vertices =
324 ctx->shader->selector ?
325 ctx->shader->selector->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 0;
326
327 /* If !tcs_out_vertices, it's either the fixed-func TCS or the TCS epilog. */
328 if (ctx->type == PIPE_SHADER_TESS_CTRL && tcs_out_vertices)
329 return LLVMConstInt(ctx->i32, tcs_out_vertices, 0);
330
331 return si_unpack_param(ctx, ctx->tcs_offchip_layout, 6, 6);
332 }
333
334 static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
335 {
336 unsigned stride;
337
338 switch (ctx->type) {
339 case PIPE_SHADER_VERTEX:
340 stride = ctx->shader->selector->lshs_vertex_stride / 4;
341 return LLVMConstInt(ctx->i32, stride, 0);
342
343 case PIPE_SHADER_TESS_CTRL:
344 if (ctx->screen->info.chip_class >= GFX9 &&
345 ctx->shader->is_monolithic) {
346 stride = ctx->shader->key.part.tcs.ls->lshs_vertex_stride / 4;
347 return LLVMConstInt(ctx->i32, stride, 0);
348 }
349 return si_unpack_param(ctx, ctx->vs_state_bits, 24, 8);
350
351 default:
352 assert(0);
353 return NULL;
354 }
355 }
356
357 static LLVMValueRef unpack_sint16(struct si_shader_context *ctx,
358 LLVMValueRef i32, unsigned index)
359 {
360 assert(index <= 1);
361
362 if (index == 1)
363 return LLVMBuildAShr(ctx->ac.builder, i32,
364 LLVMConstInt(ctx->i32, 16, 0), "");
365
366 return LLVMBuildSExt(ctx->ac.builder,
367 LLVMBuildTrunc(ctx->ac.builder, i32,
368 ctx->ac.i16, ""),
369 ctx->i32, "");
370 }
371
372 void si_llvm_load_input_vs(
373 struct si_shader_context *ctx,
374 unsigned input_index,
375 LLVMValueRef out[4])
376 {
377 const struct si_shader_info *info = &ctx->shader->selector->info;
378 unsigned vs_blit_property = info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
379
380 if (vs_blit_property) {
381 LLVMValueRef vertex_id = ctx->abi.vertex_id;
382 LLVMValueRef sel_x1 = LLVMBuildICmp(ctx->ac.builder,
383 LLVMIntULE, vertex_id,
384 ctx->i32_1, "");
385 /* Use LLVMIntNE, because we have 3 vertices and only
386 * the middle one should use y2.
387 */
388 LLVMValueRef sel_y1 = LLVMBuildICmp(ctx->ac.builder,
389 LLVMIntNE, vertex_id,
390 ctx->i32_1, "");
391
392 unsigned param_vs_blit_inputs = ctx->vs_blit_inputs.arg_index;
393 if (input_index == 0) {
394 /* Position: */
395 LLVMValueRef x1y1 = LLVMGetParam(ctx->main_fn,
396 param_vs_blit_inputs);
397 LLVMValueRef x2y2 = LLVMGetParam(ctx->main_fn,
398 param_vs_blit_inputs + 1);
399
400 LLVMValueRef x1 = unpack_sint16(ctx, x1y1, 0);
401 LLVMValueRef y1 = unpack_sint16(ctx, x1y1, 1);
402 LLVMValueRef x2 = unpack_sint16(ctx, x2y2, 0);
403 LLVMValueRef y2 = unpack_sint16(ctx, x2y2, 1);
404
405 LLVMValueRef x = LLVMBuildSelect(ctx->ac.builder, sel_x1,
406 x1, x2, "");
407 LLVMValueRef y = LLVMBuildSelect(ctx->ac.builder, sel_y1,
408 y1, y2, "");
409
410 out[0] = LLVMBuildSIToFP(ctx->ac.builder, x, ctx->f32, "");
411 out[1] = LLVMBuildSIToFP(ctx->ac.builder, y, ctx->f32, "");
412 out[2] = LLVMGetParam(ctx->main_fn,
413 param_vs_blit_inputs + 2);
414 out[3] = ctx->ac.f32_1;
415 return;
416 }
417
418 /* Color or texture coordinates: */
419 assert(input_index == 1);
420
421 if (vs_blit_property == SI_VS_BLIT_SGPRS_POS_COLOR) {
422 for (int i = 0; i < 4; i++) {
423 out[i] = LLVMGetParam(ctx->main_fn,
424 param_vs_blit_inputs + 3 + i);
425 }
426 } else {
427 assert(vs_blit_property == SI_VS_BLIT_SGPRS_POS_TEXCOORD);
428 LLVMValueRef x1 = LLVMGetParam(ctx->main_fn,
429 param_vs_blit_inputs + 3);
430 LLVMValueRef y1 = LLVMGetParam(ctx->main_fn,
431 param_vs_blit_inputs + 4);
432 LLVMValueRef x2 = LLVMGetParam(ctx->main_fn,
433 param_vs_blit_inputs + 5);
434 LLVMValueRef y2 = LLVMGetParam(ctx->main_fn,
435 param_vs_blit_inputs + 6);
436
437 out[0] = LLVMBuildSelect(ctx->ac.builder, sel_x1,
438 x1, x2, "");
439 out[1] = LLVMBuildSelect(ctx->ac.builder, sel_y1,
440 y1, y2, "");
441 out[2] = LLVMGetParam(ctx->main_fn,
442 param_vs_blit_inputs + 7);
443 out[3] = LLVMGetParam(ctx->main_fn,
444 param_vs_blit_inputs + 8);
445 }
446 return;
447 }
448
449 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
450 union si_vs_fix_fetch fix_fetch;
451 LLVMValueRef vb_desc;
452 LLVMValueRef vertex_index;
453 LLVMValueRef tmp;
454
455 if (input_index < num_vbos_in_user_sgprs) {
456 vb_desc = ac_get_arg(&ctx->ac, ctx->vb_descriptors[input_index]);
457 } else {
458 unsigned index= input_index - num_vbos_in_user_sgprs;
459 vb_desc = ac_build_load_to_sgpr(&ctx->ac,
460 ac_get_arg(&ctx->ac, ctx->vertex_buffers),
461 LLVMConstInt(ctx->i32, index, 0));
462 }
463
464 vertex_index = LLVMGetParam(ctx->main_fn,
465 ctx->vertex_index0.arg_index +
466 input_index);
467
468 /* Use the open-coded implementation for all loads of doubles and
469 * of dword-sized data that needs fixups. We need to insert conversion
470 * code anyway, and the amd/common code does it for us.
471 *
472 * Note: On LLVM <= 8, we can only open-code formats with
473 * channel size >= 4 bytes.
474 */
475 bool opencode = ctx->shader->key.mono.vs_fetch_opencode & (1 << input_index);
476 fix_fetch.bits = ctx->shader->key.mono.vs_fix_fetch[input_index].bits;
477 if (opencode ||
478 (fix_fetch.u.log_size == 3 && fix_fetch.u.format == AC_FETCH_FORMAT_FLOAT) ||
479 (fix_fetch.u.log_size == 2)) {
480 tmp = ac_build_opencoded_load_format(
481 &ctx->ac, fix_fetch.u.log_size, fix_fetch.u.num_channels_m1 + 1,
482 fix_fetch.u.format, fix_fetch.u.reverse, !opencode,
483 vb_desc, vertex_index, ctx->ac.i32_0, ctx->ac.i32_0, 0, true);
484 for (unsigned i = 0; i < 4; ++i)
485 out[i] = LLVMBuildExtractElement(ctx->ac.builder, tmp, LLVMConstInt(ctx->i32, i, false), "");
486 return;
487 }
488
489 /* Do multiple loads for special formats. */
490 unsigned required_channels = util_last_bit(info->input_usage_mask[input_index]);
491 LLVMValueRef fetches[4];
492 unsigned num_fetches;
493 unsigned fetch_stride;
494 unsigned channels_per_fetch;
495
496 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2) {
497 num_fetches = MIN2(required_channels, 3);
498 fetch_stride = 1 << fix_fetch.u.log_size;
499 channels_per_fetch = 1;
500 } else {
501 num_fetches = 1;
502 fetch_stride = 0;
503 channels_per_fetch = required_channels;
504 }
505
506 for (unsigned i = 0; i < num_fetches; ++i) {
507 LLVMValueRef voffset = LLVMConstInt(ctx->i32, fetch_stride * i, 0);
508 fetches[i] = ac_build_buffer_load_format(&ctx->ac, vb_desc, vertex_index, voffset,
509 channels_per_fetch, 0, true);
510 }
511
512 if (num_fetches == 1 && channels_per_fetch > 1) {
513 LLVMValueRef fetch = fetches[0];
514 for (unsigned i = 0; i < channels_per_fetch; ++i) {
515 tmp = LLVMConstInt(ctx->i32, i, false);
516 fetches[i] = LLVMBuildExtractElement(
517 ctx->ac.builder, fetch, tmp, "");
518 }
519 num_fetches = channels_per_fetch;
520 channels_per_fetch = 1;
521 }
522
523 for (unsigned i = num_fetches; i < 4; ++i)
524 fetches[i] = LLVMGetUndef(ctx->f32);
525
526 if (fix_fetch.u.log_size <= 1 && fix_fetch.u.num_channels_m1 == 2 &&
527 required_channels == 4) {
528 if (fix_fetch.u.format == AC_FETCH_FORMAT_UINT || fix_fetch.u.format == AC_FETCH_FORMAT_SINT)
529 fetches[3] = ctx->ac.i32_1;
530 else
531 fetches[3] = ctx->ac.f32_1;
532 } else if (fix_fetch.u.log_size == 3 &&
533 (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ||
534 fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED ||
535 fix_fetch.u.format == AC_FETCH_FORMAT_SINT) &&
536 required_channels == 4) {
537 /* For 2_10_10_10, the hardware returns an unsigned value;
538 * convert it to a signed one.
539 */
540 LLVMValueRef tmp = fetches[3];
541 LLVMValueRef c30 = LLVMConstInt(ctx->i32, 30, 0);
542
543 /* First, recover the sign-extended signed integer value. */
544 if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED)
545 tmp = LLVMBuildFPToUI(ctx->ac.builder, tmp, ctx->i32, "");
546 else
547 tmp = ac_to_integer(&ctx->ac, tmp);
548
549 /* For the integer-like cases, do a natural sign extension.
550 *
551 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
552 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
553 * exponent.
554 */
555 tmp = LLVMBuildShl(ctx->ac.builder, tmp,
556 fix_fetch.u.format == AC_FETCH_FORMAT_SNORM ?
557 LLVMConstInt(ctx->i32, 7, 0) : c30, "");
558 tmp = LLVMBuildAShr(ctx->ac.builder, tmp, c30, "");
559
560 /* Convert back to the right type. */
561 if (fix_fetch.u.format == AC_FETCH_FORMAT_SNORM) {
562 LLVMValueRef clamp;
563 LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
564 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
565 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, tmp, neg_one, "");
566 tmp = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, tmp, "");
567 } else if (fix_fetch.u.format == AC_FETCH_FORMAT_SSCALED) {
568 tmp = LLVMBuildSIToFP(ctx->ac.builder, tmp, ctx->f32, "");
569 }
570
571 fetches[3] = tmp;
572 }
573
574 for (unsigned i = 0; i < 4; ++i)
575 out[i] = ac_to_float(&ctx->ac, fetches[i]);
576 }
577
578 LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx,
579 unsigned swizzle)
580 {
581 if (swizzle > 0)
582 return ctx->i32_0;
583
584 switch (ctx->type) {
585 case PIPE_SHADER_VERTEX:
586 return ac_get_arg(&ctx->ac, ctx->vs_prim_id);
587 case PIPE_SHADER_TESS_CTRL:
588 return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
589 case PIPE_SHADER_TESS_EVAL:
590 return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
591 case PIPE_SHADER_GEOMETRY:
592 return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
593 default:
594 assert(0);
595 return ctx->i32_0;
596 }
597 }
598
599 static LLVMValueRef get_dw_address_from_generic_indices(struct si_shader_context *ctx,
600 LLVMValueRef vertex_dw_stride,
601 LLVMValueRef base_addr,
602 LLVMValueRef vertex_index,
603 LLVMValueRef param_index,
604 ubyte name, ubyte index)
605 {
606 if (vertex_dw_stride) {
607 base_addr = ac_build_imad(&ctx->ac, vertex_index,
608 vertex_dw_stride, base_addr);
609 }
610
611 if (param_index) {
612 base_addr = ac_build_imad(&ctx->ac, param_index,
613 LLVMConstInt(ctx->i32, 4, 0), base_addr);
614 }
615
616 int param = name == TGSI_SEMANTIC_PATCH ||
617 name == TGSI_SEMANTIC_TESSINNER ||
618 name == TGSI_SEMANTIC_TESSOUTER ?
619 si_shader_io_get_unique_index_patch(name, index) :
620 si_shader_io_get_unique_index(name, index, false);
621
622 /* Add the base address of the element. */
623 return LLVMBuildAdd(ctx->ac.builder, base_addr,
624 LLVMConstInt(ctx->i32, param * 4, 0), "");
625 }
626
627 /* The offchip buffer layout for TCS->TES is
628 *
629 * - attribute 0 of patch 0 vertex 0
630 * - attribute 0 of patch 0 vertex 1
631 * - attribute 0 of patch 0 vertex 2
632 * ...
633 * - attribute 0 of patch 1 vertex 0
634 * - attribute 0 of patch 1 vertex 1
635 * ...
636 * - attribute 1 of patch 0 vertex 0
637 * - attribute 1 of patch 0 vertex 1
638 * ...
639 * - per patch attribute 0 of patch 0
640 * - per patch attribute 0 of patch 1
641 * ...
642 *
643 * Note that every attribute has 4 components.
644 */
645 static LLVMValueRef get_tcs_tes_buffer_address(struct si_shader_context *ctx,
646 LLVMValueRef rel_patch_id,
647 LLVMValueRef vertex_index,
648 LLVMValueRef param_index)
649 {
650 LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
651 LLVMValueRef param_stride, constant16;
652
653 vertices_per_patch = get_num_tcs_out_vertices(ctx);
654 num_patches = si_unpack_param(ctx, ctx->tcs_offchip_layout, 0, 6);
655 total_vertices = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
656 num_patches, "");
657
658 constant16 = LLVMConstInt(ctx->i32, 16, 0);
659 if (vertex_index) {
660 base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
661 vertices_per_patch, vertex_index);
662 param_stride = total_vertices;
663 } else {
664 base_addr = rel_patch_id;
665 param_stride = num_patches;
666 }
667
668 base_addr = ac_build_imad(&ctx->ac, param_index, param_stride, base_addr);
669 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
670
671 if (!vertex_index) {
672 LLVMValueRef patch_data_offset =
673 si_unpack_param(ctx, ctx->tcs_offchip_layout, 12, 20);
674
675 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
676 patch_data_offset, "");
677 }
678 return base_addr;
679 }
680
681 static LLVMValueRef get_tcs_tes_buffer_address_from_generic_indices(
682 struct si_shader_context *ctx,
683 LLVMValueRef vertex_index,
684 LLVMValueRef param_index,
685 ubyte name, ubyte index)
686 {
687 unsigned param_index_base;
688
689 param_index_base = name == TGSI_SEMANTIC_PATCH ||
690 name == TGSI_SEMANTIC_TESSINNER ||
691 name == TGSI_SEMANTIC_TESSOUTER ?
692 si_shader_io_get_unique_index_patch(name, index) :
693 si_shader_io_get_unique_index(name, index, false);
694
695 if (param_index) {
696 param_index = LLVMBuildAdd(ctx->ac.builder, param_index,
697 LLVMConstInt(ctx->i32, param_index_base, 0),
698 "");
699 } else {
700 param_index = LLVMConstInt(ctx->i32, param_index_base, 0);
701 }
702
703 return get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx),
704 vertex_index, param_index);
705 }
706
707 static LLVMValueRef si_build_gather_64bit(struct si_shader_context *ctx,
708 LLVMTypeRef type,
709 LLVMValueRef val1,
710 LLVMValueRef val2)
711 {
712 LLVMValueRef values[2] = {
713 ac_to_integer(&ctx->ac, val1),
714 ac_to_integer(&ctx->ac, val2),
715 };
716 LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, 2);
717 return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
718 }
719
720 static LLVMValueRef buffer_load(struct si_shader_context *ctx,
721 LLVMTypeRef type, unsigned swizzle,
722 LLVMValueRef buffer, LLVMValueRef offset,
723 LLVMValueRef base, bool can_speculate)
724 {
725 LLVMValueRef value, value2;
726 LLVMTypeRef vec_type = LLVMVectorType(type, 4);
727
728 if (swizzle == ~0) {
729 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
730 0, ac_glc, can_speculate, false);
731
732 return LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
733 }
734
735 if (!llvm_type_is_64bit(ctx, type)) {
736 value = ac_build_buffer_load(&ctx->ac, buffer, 4, NULL, base, offset,
737 0, ac_glc, can_speculate, false);
738
739 value = LLVMBuildBitCast(ctx->ac.builder, value, vec_type, "");
740 return LLVMBuildExtractElement(ctx->ac.builder, value,
741 LLVMConstInt(ctx->i32, swizzle, 0), "");
742 }
743
744 value = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
745 swizzle * 4, ac_glc, can_speculate, false);
746
747 value2 = ac_build_buffer_load(&ctx->ac, buffer, 1, NULL, base, offset,
748 swizzle * 4 + 4, ac_glc, can_speculate, false);
749
750 return si_build_gather_64bit(ctx, type, value, value2);
751 }
752
753 /**
754 * Load from LSHS LDS storage.
755 *
756 * \param type output value type
757 * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
758 * \param dw_addr address in dwords
759 */
760 static LLVMValueRef lshs_lds_load(struct si_shader_context *ctx,
761 LLVMTypeRef type, unsigned swizzle,
762 LLVMValueRef dw_addr)
763 {
764 LLVMValueRef value;
765
766 if (swizzle == ~0) {
767 LLVMValueRef values[4];
768
769 for (unsigned chan = 0; chan < 4; chan++)
770 values[chan] = lshs_lds_load(ctx, type, chan, dw_addr);
771
772 return ac_build_gather_values(&ctx->ac, values, 4);
773 }
774
775 /* Split 64-bit loads. */
776 if (llvm_type_is_64bit(ctx, type)) {
777 LLVMValueRef lo, hi;
778
779 lo = lshs_lds_load(ctx, ctx->i32, swizzle, dw_addr);
780 hi = lshs_lds_load(ctx, ctx->i32, swizzle + 1, dw_addr);
781 return si_build_gather_64bit(ctx, type, lo, hi);
782 }
783
784 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
785 LLVMConstInt(ctx->i32, swizzle, 0), "");
786
787 value = ac_lds_load(&ctx->ac, dw_addr);
788
789 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
790 }
791
792 /**
793 * Store to LSHS LDS storage.
794 *
795 * \param swizzle offset (typically 0..3)
796 * \param dw_addr address in dwords
797 * \param value value to store
798 */
799 static void lshs_lds_store(struct si_shader_context *ctx,
800 unsigned dw_offset_imm, LLVMValueRef dw_addr,
801 LLVMValueRef value)
802 {
803 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
804 LLVMConstInt(ctx->i32, dw_offset_imm, 0), "");
805
806 ac_lds_store(&ctx->ac, dw_addr, value);
807 }
808
809 enum si_tess_ring {
810 TCS_FACTOR_RING,
811 TESS_OFFCHIP_RING_TCS,
812 TESS_OFFCHIP_RING_TES,
813 };
814
815 static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx,
816 enum si_tess_ring ring)
817 {
818 LLVMBuilderRef builder = ctx->ac.builder;
819 LLVMValueRef addr = ac_get_arg(&ctx->ac,
820 ring == TESS_OFFCHIP_RING_TES ?
821 ctx->tes_offchip_addr :
822 ctx->tcs_out_lds_layout);
823
824 /* TCS only receives high 13 bits of the address. */
825 if (ring == TESS_OFFCHIP_RING_TCS || ring == TCS_FACTOR_RING) {
826 addr = LLVMBuildAnd(builder, addr,
827 LLVMConstInt(ctx->i32, 0xfff80000, 0), "");
828 }
829
830 if (ring == TCS_FACTOR_RING) {
831 unsigned tf_offset = ctx->screen->tess_offchip_ring_size;
832 addr = LLVMBuildAdd(builder, addr,
833 LLVMConstInt(ctx->i32, tf_offset, 0), "");
834 }
835
836 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
837 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
838 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
839 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
840
841 if (ctx->screen->info.chip_class >= GFX10)
842 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
843 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
844 S_008F0C_RESOURCE_LEVEL(1);
845 else
846 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
847 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
848
849 LLVMValueRef desc[4];
850 desc[0] = addr;
851 desc[1] = LLVMConstInt(ctx->i32,
852 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
853 desc[2] = LLVMConstInt(ctx->i32, 0xffffffff, 0);
854 desc[3] = LLVMConstInt(ctx->i32, rsrc3, false);
855
856 return ac_build_gather_values(&ctx->ac, desc, 4);
857 }
858
859 static LLVMValueRef si_nir_load_tcs_varyings(struct ac_shader_abi *abi,
860 LLVMTypeRef type,
861 LLVMValueRef vertex_index,
862 LLVMValueRef param_index,
863 unsigned const_index,
864 unsigned location,
865 unsigned driver_location,
866 unsigned component,
867 unsigned num_components,
868 bool is_patch,
869 bool is_compact,
870 bool load_input)
871 {
872 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
873 struct si_shader_info *info = &ctx->shader->selector->info;
874 LLVMValueRef dw_addr, stride;
875 ubyte name, index;
876
877 driver_location = driver_location / 4;
878
879 if (load_input) {
880 name = info->input_semantic_name[driver_location];
881 index = info->input_semantic_index[driver_location];
882 } else {
883 name = info->output_semantic_name[driver_location];
884 index = info->output_semantic_index[driver_location];
885 }
886
887 assert((name == TGSI_SEMANTIC_PATCH ||
888 name == TGSI_SEMANTIC_TESSINNER ||
889 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
890
891 if (load_input) {
892 stride = get_tcs_in_vertex_dw_stride(ctx);
893 dw_addr = get_tcs_in_current_patch_offset(ctx);
894 } else {
895 if (is_patch) {
896 stride = NULL;
897 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
898 } else {
899 stride = get_tcs_out_vertex_dw_stride(ctx);
900 dw_addr = get_tcs_out_current_patch_offset(ctx);
901 }
902 }
903
904 if (!param_index) {
905 param_index = LLVMConstInt(ctx->i32, const_index, 0);
906 }
907
908 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
909 vertex_index, param_index,
910 name, index);
911
912 LLVMValueRef value[4];
913 for (unsigned i = 0; i < num_components; i++) {
914 unsigned offset = i;
915 if (llvm_type_is_64bit(ctx, type))
916 offset *= 2;
917
918 offset += component;
919 value[i + component] = lshs_lds_load(ctx, type, offset, dw_addr);
920 }
921
922 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
923 }
924
925 LLVMValueRef si_nir_load_input_tes(struct ac_shader_abi *abi,
926 LLVMTypeRef type,
927 LLVMValueRef vertex_index,
928 LLVMValueRef param_index,
929 unsigned const_index,
930 unsigned location,
931 unsigned driver_location,
932 unsigned component,
933 unsigned num_components,
934 bool is_patch,
935 bool is_compact,
936 bool load_input)
937 {
938 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
939 struct si_shader_info *info = &ctx->shader->selector->info;
940 LLVMValueRef base, addr;
941
942 driver_location = driver_location / 4;
943 ubyte name = info->input_semantic_name[driver_location];
944 ubyte index = info->input_semantic_index[driver_location];
945
946 assert((name == TGSI_SEMANTIC_PATCH ||
947 name == TGSI_SEMANTIC_TESSINNER ||
948 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
949
950 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
951
952 if (!param_index) {
953 param_index = LLVMConstInt(ctx->i32, const_index, 0);
954 }
955
956 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
957 param_index,
958 name, index);
959
960 /* TODO: This will generate rather ordinary llvm code, although it
961 * should be easy for the optimiser to fix up. In future we might want
962 * to refactor buffer_load().
963 */
964 LLVMValueRef value[4];
965 for (unsigned i = 0; i < num_components; i++) {
966 unsigned offset = i;
967 if (llvm_type_is_64bit(ctx, type)) {
968 offset *= 2;
969 if (offset == 4) {
970 ubyte name = info->input_semantic_name[driver_location + 1];
971 ubyte index = info->input_semantic_index[driver_location + 1];
972 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
973 vertex_index,
974 param_index,
975 name, index);
976 }
977
978 offset = offset % 4;
979 }
980
981 offset += component;
982 value[i + component] = buffer_load(ctx, type, offset,
983 ctx->tess_offchip_ring, base, addr, true);
984 }
985
986 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
987 }
988
989 static void si_nir_store_output_tcs(struct ac_shader_abi *abi,
990 const struct nir_variable *var,
991 LLVMValueRef vertex_index,
992 LLVMValueRef param_index,
993 unsigned const_index,
994 LLVMValueRef src,
995 unsigned writemask)
996 {
997 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
998 struct si_shader_info *info = &ctx->shader->selector->info;
999 const unsigned component = var->data.location_frac;
1000 unsigned driver_location = var->data.driver_location;
1001 LLVMValueRef dw_addr, stride;
1002 LLVMValueRef buffer, base, addr;
1003 LLVMValueRef values[8];
1004 bool skip_lds_store;
1005 bool is_tess_factor = false, is_tess_inner = false;
1006
1007 driver_location = driver_location / 4;
1008 ubyte name = info->output_semantic_name[driver_location];
1009 ubyte index = info->output_semantic_index[driver_location];
1010
1011 bool is_const = !param_index;
1012 if (!param_index)
1013 param_index = LLVMConstInt(ctx->i32, const_index, 0);
1014
1015 const bool is_patch = var->data.patch ||
1016 var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
1017 var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER;
1018
1019 assert((name == TGSI_SEMANTIC_PATCH ||
1020 name == TGSI_SEMANTIC_TESSINNER ||
1021 name == TGSI_SEMANTIC_TESSOUTER) == is_patch);
1022
1023 if (!is_patch) {
1024 stride = get_tcs_out_vertex_dw_stride(ctx);
1025 dw_addr = get_tcs_out_current_patch_offset(ctx);
1026 dw_addr = get_dw_address_from_generic_indices(ctx, stride, dw_addr,
1027 vertex_index, param_index,
1028 name, index);
1029
1030 skip_lds_store = !info->reads_pervertex_outputs;
1031 } else {
1032 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1033 dw_addr = get_dw_address_from_generic_indices(ctx, NULL, dw_addr,
1034 vertex_index, param_index,
1035 name, index);
1036
1037 skip_lds_store = !info->reads_perpatch_outputs;
1038
1039 if (is_const && const_index == 0) {
1040 int name = info->output_semantic_name[driver_location];
1041
1042 /* Always write tess factors into LDS for the TCS epilog. */
1043 if (name == TGSI_SEMANTIC_TESSINNER ||
1044 name == TGSI_SEMANTIC_TESSOUTER) {
1045 /* The epilog doesn't read LDS if invocation 0 defines tess factors. */
1046 skip_lds_store = !info->reads_tessfactor_outputs &&
1047 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs;
1048 is_tess_factor = true;
1049 is_tess_inner = name == TGSI_SEMANTIC_TESSINNER;
1050 }
1051 }
1052 }
1053
1054 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1055
1056 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1057
1058 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx, vertex_index,
1059 param_index, name, index);
1060
1061 for (unsigned chan = component; chan < 8; chan++) {
1062 if (!(writemask & (1 << chan)))
1063 continue;
1064 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1065
1066 unsigned buffer_store_offset = chan % 4;
1067 if (chan == 4) {
1068 ubyte name = info->output_semantic_name[driver_location + 1];
1069 ubyte index = info->output_semantic_index[driver_location + 1];
1070 addr = get_tcs_tes_buffer_address_from_generic_indices(ctx,
1071 vertex_index,
1072 param_index,
1073 name, index);
1074 }
1075
1076 /* Skip LDS stores if there is no LDS read of this output. */
1077 if (!skip_lds_store)
1078 lshs_lds_store(ctx, chan, dw_addr, value);
1079
1080 value = ac_to_integer(&ctx->ac, value);
1081 values[chan] = value;
1082
1083 if (writemask != 0xF && !is_tess_factor) {
1084 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 1,
1085 addr, base,
1086 4 * buffer_store_offset,
1087 ac_glc);
1088 }
1089
1090 /* Write tess factors into VGPRs for the epilog. */
1091 if (is_tess_factor &&
1092 ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
1093 if (!is_tess_inner) {
1094 LLVMBuildStore(ctx->ac.builder, value, /* outer */
1095 ctx->invoc0_tess_factors[chan]);
1096 } else if (chan < 2) {
1097 LLVMBuildStore(ctx->ac.builder, value, /* inner */
1098 ctx->invoc0_tess_factors[4 + chan]);
1099 }
1100 }
1101 }
1102
1103 if (writemask == 0xF && !is_tess_factor) {
1104 LLVMValueRef value = ac_build_gather_values(&ctx->ac,
1105 values, 4);
1106 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, addr,
1107 base, 0, ac_glc);
1108 }
1109 }
1110
1111 static LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi,
1112 unsigned input_index,
1113 unsigned vtx_offset_param,
1114 LLVMTypeRef type,
1115 unsigned swizzle)
1116 {
1117 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1118 struct si_shader *shader = ctx->shader;
1119 LLVMValueRef vtx_offset, soffset;
1120 struct si_shader_info *info = &shader->selector->info;
1121 unsigned semantic_name = info->input_semantic_name[input_index];
1122 unsigned semantic_index = info->input_semantic_index[input_index];
1123 unsigned param;
1124 LLVMValueRef value;
1125
1126 param = si_shader_io_get_unique_index(semantic_name, semantic_index, false);
1127
1128 /* GFX9 has the ESGS ring in LDS. */
1129 if (ctx->screen->info.chip_class >= GFX9) {
1130 unsigned index = vtx_offset_param;
1131
1132 switch (index / 2) {
1133 case 0:
1134 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx01_offset,
1135 index % 2 ? 16 : 0, 16);
1136 break;
1137 case 1:
1138 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx23_offset,
1139 index % 2 ? 16 : 0, 16);
1140 break;
1141 case 2:
1142 vtx_offset = si_unpack_param(ctx, ctx->gs_vtx45_offset,
1143 index % 2 ? 16 : 0, 16);
1144 break;
1145 default:
1146 assert(0);
1147 return NULL;
1148 }
1149
1150 unsigned offset = param * 4 + swizzle;
1151 vtx_offset = LLVMBuildAdd(ctx->ac.builder, vtx_offset,
1152 LLVMConstInt(ctx->i32, offset, false), "");
1153
1154 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->esgs_ring, vtx_offset);
1155 LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, ptr, "");
1156 if (llvm_type_is_64bit(ctx, type)) {
1157 ptr = LLVMBuildGEP(ctx->ac.builder, ptr,
1158 &ctx->ac.i32_1, 1, "");
1159 LLVMValueRef values[2] = {
1160 value,
1161 LLVMBuildLoad(ctx->ac.builder, ptr, "")
1162 };
1163 value = ac_build_gather_values(&ctx->ac, values, 2);
1164 }
1165 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1166 }
1167
1168 /* GFX6: input load from the ESGS ring in memory. */
1169 if (swizzle == ~0) {
1170 LLVMValueRef values[4];
1171 unsigned chan;
1172 for (chan = 0; chan < 4; chan++) {
1173 values[chan] = si_llvm_load_input_gs(abi, input_index, vtx_offset_param,
1174 type, chan);
1175 }
1176 return ac_build_gather_values(&ctx->ac, values, 4);
1177 }
1178
1179 /* Get the vertex offset parameter on GFX6. */
1180 LLVMValueRef gs_vtx_offset = ac_get_arg(&ctx->ac,
1181 ctx->gs_vtx_offset[vtx_offset_param]);
1182
1183 vtx_offset = LLVMBuildMul(ctx->ac.builder, gs_vtx_offset,
1184 LLVMConstInt(ctx->i32, 4, 0), "");
1185
1186 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle) * 256, 0);
1187
1188 value = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1, ctx->i32_0,
1189 vtx_offset, soffset, 0, ac_glc, true, false);
1190 if (llvm_type_is_64bit(ctx, type)) {
1191 LLVMValueRef value2;
1192 soffset = LLVMConstInt(ctx->i32, (param * 4 + swizzle + 1) * 256, 0);
1193
1194 value2 = ac_build_buffer_load(&ctx->ac, ctx->esgs_ring, 1,
1195 ctx->i32_0, vtx_offset, soffset,
1196 0, ac_glc, true, false);
1197 return si_build_gather_64bit(ctx, type, value, value2);
1198 }
1199 return LLVMBuildBitCast(ctx->ac.builder, value, type, "");
1200 }
1201
1202 static LLVMValueRef si_nir_load_input_gs(struct ac_shader_abi *abi,
1203 unsigned location,
1204 unsigned driver_location,
1205 unsigned component,
1206 unsigned num_components,
1207 unsigned vertex_index,
1208 unsigned const_index,
1209 LLVMTypeRef type)
1210 {
1211 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1212
1213 LLVMValueRef value[4];
1214 for (unsigned i = 0; i < num_components; i++) {
1215 unsigned offset = i;
1216 if (llvm_type_is_64bit(ctx, type))
1217 offset *= 2;
1218
1219 offset += component;
1220 value[i + component] = si_llvm_load_input_gs(&ctx->abi, driver_location / 4 + const_index,
1221 vertex_index, type, offset);
1222 }
1223
1224 return ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1225 }
1226
1227 static LLVMValueRef get_base_vertex(struct ac_shader_abi *abi)
1228 {
1229 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1230
1231 /* For non-indexed draws, the base vertex set by the driver
1232 * (for direct draws) or the CP (for indirect draws) is the
1233 * first vertex ID, but GLSL expects 0 to be returned.
1234 */
1235 LLVMValueRef vs_state = ac_get_arg(&ctx->ac,
1236 ctx->vs_state_bits);
1237 LLVMValueRef indexed;
1238
1239 indexed = LLVMBuildLShr(ctx->ac.builder, vs_state, ctx->i32_1, "");
1240 indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->i1, "");
1241
1242 return LLVMBuildSelect(ctx->ac.builder, indexed,
1243 ac_get_arg(&ctx->ac, ctx->args.base_vertex),
1244 ctx->i32_0, "");
1245 }
1246
1247 static LLVMValueRef get_block_size(struct ac_shader_abi *abi)
1248 {
1249 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1250
1251 LLVMValueRef values[3];
1252 LLVMValueRef result;
1253 unsigned i;
1254 unsigned *properties = ctx->shader->selector->info.properties;
1255
1256 if (properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] != 0) {
1257 unsigned sizes[3] = {
1258 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH],
1259 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT],
1260 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH]
1261 };
1262
1263 for (i = 0; i < 3; ++i)
1264 values[i] = LLVMConstInt(ctx->i32, sizes[i], 0);
1265
1266 result = ac_build_gather_values(&ctx->ac, values, 3);
1267 } else {
1268 result = ac_get_arg(&ctx->ac, ctx->block_size);
1269 }
1270
1271 return result;
1272 }
1273
1274 static LLVMValueRef si_load_tess_coord(struct ac_shader_abi *abi)
1275 {
1276 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1277 LLVMValueRef coord[4] = {
1278 ac_get_arg(&ctx->ac, ctx->tes_u),
1279 ac_get_arg(&ctx->ac, ctx->tes_v),
1280 ctx->ac.f32_0,
1281 ctx->ac.f32_0
1282 };
1283
1284 /* For triangles, the vector should be (u, v, 1-u-v). */
1285 if (ctx->shader->selector->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] ==
1286 PIPE_PRIM_TRIANGLES) {
1287 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1288 LLVMBuildFAdd(ctx->ac.builder,
1289 coord[0], coord[1], ""), "");
1290 }
1291 return ac_build_gather_values(&ctx->ac, coord, 4);
1292 }
1293
1294 static LLVMValueRef load_tess_level(struct si_shader_context *ctx,
1295 unsigned semantic_name)
1296 {
1297 LLVMValueRef base, addr;
1298
1299 int param = si_shader_io_get_unique_index_patch(semantic_name, 0);
1300
1301 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1302 addr = get_tcs_tes_buffer_address(ctx, get_rel_patch_id(ctx), NULL,
1303 LLVMConstInt(ctx->i32, param, 0));
1304
1305 return buffer_load(ctx, ctx->f32,
1306 ~0, ctx->tess_offchip_ring, base, addr, true);
1307
1308 }
1309
1310 static LLVMValueRef load_tess_level_default(struct si_shader_context *ctx,
1311 unsigned semantic_name)
1312 {
1313 LLVMValueRef buf, slot, val[4];
1314 int i, offset;
1315
1316 slot = LLVMConstInt(ctx->i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
1317 buf = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1318 buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
1319 offset = semantic_name == TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL ? 4 : 0;
1320
1321 for (i = 0; i < 4; i++)
1322 val[i] = si_buffer_load_const(ctx, buf,
1323 LLVMConstInt(ctx->i32, (offset + i) * 4, 0));
1324 return ac_build_gather_values(&ctx->ac, val, 4);
1325 }
1326
1327 static LLVMValueRef si_load_tess_level(struct ac_shader_abi *abi,
1328 unsigned varying_id,
1329 bool load_default_state)
1330 {
1331 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1332 unsigned semantic_name;
1333
1334 if (load_default_state) {
1335 switch (varying_id) {
1336 case VARYING_SLOT_TESS_LEVEL_INNER:
1337 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL;
1338 break;
1339 case VARYING_SLOT_TESS_LEVEL_OUTER:
1340 semantic_name = TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL;
1341 break;
1342 default:
1343 unreachable("unknown tess level");
1344 }
1345 return load_tess_level_default(ctx, semantic_name);
1346 }
1347
1348 switch (varying_id) {
1349 case VARYING_SLOT_TESS_LEVEL_INNER:
1350 semantic_name = TGSI_SEMANTIC_TESSINNER;
1351 break;
1352 case VARYING_SLOT_TESS_LEVEL_OUTER:
1353 semantic_name = TGSI_SEMANTIC_TESSOUTER;
1354 break;
1355 default:
1356 unreachable("unknown tess level");
1357 }
1358
1359 return load_tess_level(ctx, semantic_name);
1360
1361 }
1362
1363 static LLVMValueRef si_load_patch_vertices_in(struct ac_shader_abi *abi)
1364 {
1365 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1366 if (ctx->type == PIPE_SHADER_TESS_CTRL)
1367 return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 13, 6);
1368 else if (ctx->type == PIPE_SHADER_TESS_EVAL)
1369 return get_num_tcs_out_vertices(ctx);
1370 else
1371 unreachable("invalid shader stage for TGSI_SEMANTIC_VERTICESIN");
1372 }
1373
1374 void si_declare_compute_memory(struct si_shader_context *ctx)
1375 {
1376 struct si_shader_selector *sel = ctx->shader->selector;
1377 unsigned lds_size = sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE];
1378
1379 LLVMTypeRef i8p = LLVMPointerType(ctx->i8, AC_ADDR_SPACE_LDS);
1380 LLVMValueRef var;
1381
1382 assert(!ctx->ac.lds);
1383
1384 var = LLVMAddGlobalInAddressSpace(ctx->ac.module,
1385 LLVMArrayType(ctx->i8, lds_size),
1386 "compute_lds",
1387 AC_ADDR_SPACE_LDS);
1388 LLVMSetAlignment(var, 64 * 1024);
1389
1390 ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
1391 }
1392
1393 static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *ctx)
1394 {
1395 LLVMValueRef ptr =
1396 ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1397 struct si_shader_selector *sel = ctx->shader->selector;
1398
1399 /* Do the bounds checking with a descriptor, because
1400 * doing computation and manual bounds checking of 64-bit
1401 * addresses generates horrible VALU code with very high
1402 * VGPR usage and very low SIMD occupancy.
1403 */
1404 ptr = LLVMBuildPtrToInt(ctx->ac.builder, ptr, ctx->ac.intptr, "");
1405
1406 LLVMValueRef desc0, desc1;
1407 desc0 = ptr;
1408 desc1 = LLVMConstInt(ctx->i32,
1409 S_008F04_BASE_ADDRESS_HI(ctx->screen->info.address32_hi), 0);
1410
1411 uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1412 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1413 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1414 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
1415
1416 if (ctx->screen->info.chip_class >= GFX10)
1417 rsrc3 |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1418 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1419 S_008F0C_RESOURCE_LEVEL(1);
1420 else
1421 rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1422 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1423
1424 LLVMValueRef desc_elems[] = {
1425 desc0,
1426 desc1,
1427 LLVMConstInt(ctx->i32, sel->info.constbuf0_num_slots * 16, 0),
1428 LLVMConstInt(ctx->i32, rsrc3, false)
1429 };
1430
1431 return ac_build_gather_values(&ctx->ac, desc_elems, 4);
1432 }
1433
1434 static LLVMValueRef load_ubo(struct ac_shader_abi *abi, LLVMValueRef index)
1435 {
1436 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1437 struct si_shader_selector *sel = ctx->shader->selector;
1438
1439 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->const_and_shader_buffers);
1440
1441 if (sel->info.const_buffers_declared == 1 &&
1442 sel->info.shader_buffers_declared == 0) {
1443 return load_const_buffer_desc_fast_path(ctx);
1444 }
1445
1446 index = si_llvm_bound_index(ctx, index, ctx->num_const_buffers);
1447 index = LLVMBuildAdd(ctx->ac.builder, index,
1448 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS, 0), "");
1449
1450 return ac_build_load_to_sgpr(&ctx->ac, ptr, index);
1451 }
1452
1453 static LLVMValueRef
1454 load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, bool write)
1455 {
1456 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
1457 LLVMValueRef rsrc_ptr = ac_get_arg(&ctx->ac,
1458 ctx->const_and_shader_buffers);
1459
1460 index = si_llvm_bound_index(ctx, index, ctx->num_shader_buffers);
1461 index = LLVMBuildSub(ctx->ac.builder,
1462 LLVMConstInt(ctx->i32, SI_NUM_SHADER_BUFFERS - 1, 0),
1463 index, "");
1464
1465 return ac_build_load_to_sgpr(&ctx->ac, rsrc_ptr, index);
1466 }
1467
1468 /* Initialize arguments for the shader export intrinsic */
1469 static void si_llvm_init_vs_export_args(struct si_shader_context *ctx,
1470 LLVMValueRef *values,
1471 unsigned target,
1472 struct ac_export_args *args)
1473 {
1474 args->enabled_channels = 0xf; /* writemask - default is 0xf */
1475 args->valid_mask = 0; /* Specify whether the EXEC mask represents the valid mask */
1476 args->done = 0; /* Specify whether this is the last export */
1477 args->target = target; /* Specify the target we are exporting */
1478 args->compr = false;
1479
1480 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
1481 }
1482
1483 static void si_llvm_emit_clipvertex(struct si_shader_context *ctx,
1484 struct ac_export_args *pos, LLVMValueRef *out_elts)
1485 {
1486 unsigned reg_index;
1487 unsigned chan;
1488 unsigned const_chan;
1489 LLVMValueRef base_elt;
1490 LLVMValueRef ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
1491 LLVMValueRef constbuf_index = LLVMConstInt(ctx->i32,
1492 SI_VS_CONST_CLIP_PLANES, 0);
1493 LLVMValueRef const_resource = ac_build_load_to_sgpr(&ctx->ac, ptr, constbuf_index);
1494
1495 for (reg_index = 0; reg_index < 2; reg_index ++) {
1496 struct ac_export_args *args = &pos[2 + reg_index];
1497
1498 args->out[0] =
1499 args->out[1] =
1500 args->out[2] =
1501 args->out[3] = LLVMConstReal(ctx->f32, 0.0f);
1502
1503 /* Compute dot products of position and user clip plane vectors */
1504 for (chan = 0; chan < 4; chan++) {
1505 for (const_chan = 0; const_chan < 4; const_chan++) {
1506 LLVMValueRef addr =
1507 LLVMConstInt(ctx->i32, ((reg_index * 4 + chan) * 4 +
1508 const_chan) * 4, 0);
1509 base_elt = si_buffer_load_const(ctx, const_resource,
1510 addr);
1511 args->out[chan] = ac_build_fmad(&ctx->ac, base_elt,
1512 out_elts[const_chan], args->out[chan]);
1513 }
1514 }
1515
1516 args->enabled_channels = 0xf;
1517 args->valid_mask = 0;
1518 args->done = 0;
1519 args->target = V_008DFC_SQ_EXP_POS + 2 + reg_index;
1520 args->compr = 0;
1521 }
1522 }
1523
1524 static void si_dump_streamout(struct pipe_stream_output_info *so)
1525 {
1526 unsigned i;
1527
1528 if (so->num_outputs)
1529 fprintf(stderr, "STREAMOUT\n");
1530
1531 for (i = 0; i < so->num_outputs; i++) {
1532 unsigned mask = ((1 << so->output[i].num_components) - 1) <<
1533 so->output[i].start_component;
1534 fprintf(stderr, " %i: BUF%i[%i..%i] <- OUT[%i].%s%s%s%s\n",
1535 i, so->output[i].output_buffer,
1536 so->output[i].dst_offset, so->output[i].dst_offset + so->output[i].num_components - 1,
1537 so->output[i].register_index,
1538 mask & 1 ? "x" : "",
1539 mask & 2 ? "y" : "",
1540 mask & 4 ? "z" : "",
1541 mask & 8 ? "w" : "");
1542 }
1543 }
1544
1545 void si_emit_streamout_output(struct si_shader_context *ctx,
1546 LLVMValueRef const *so_buffers,
1547 LLVMValueRef const *so_write_offsets,
1548 struct pipe_stream_output *stream_out,
1549 struct si_shader_output_values *shader_out)
1550 {
1551 unsigned buf_idx = stream_out->output_buffer;
1552 unsigned start = stream_out->start_component;
1553 unsigned num_comps = stream_out->num_components;
1554 LLVMValueRef out[4];
1555
1556 assert(num_comps && num_comps <= 4);
1557 if (!num_comps || num_comps > 4)
1558 return;
1559
1560 /* Load the output as int. */
1561 for (int j = 0; j < num_comps; j++) {
1562 assert(stream_out->stream == shader_out->vertex_stream[start + j]);
1563
1564 out[j] = ac_to_integer(&ctx->ac, shader_out->values[start + j]);
1565 }
1566
1567 /* Pack the output. */
1568 LLVMValueRef vdata = NULL;
1569
1570 switch (num_comps) {
1571 case 1: /* as i32 */
1572 vdata = out[0];
1573 break;
1574 case 2: /* as v2i32 */
1575 case 3: /* as v3i32 */
1576 if (ac_has_vec3_support(ctx->screen->info.chip_class, false)) {
1577 vdata = ac_build_gather_values(&ctx->ac, out, num_comps);
1578 break;
1579 }
1580 /* as v4i32 (aligned to 4) */
1581 out[3] = LLVMGetUndef(ctx->i32);
1582 /* fall through */
1583 case 4: /* as v4i32 */
1584 vdata = ac_build_gather_values(&ctx->ac, out, util_next_power_of_two(num_comps));
1585 break;
1586 }
1587
1588 ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf_idx],
1589 vdata, num_comps,
1590 so_write_offsets[buf_idx],
1591 ctx->i32_0,
1592 stream_out->dst_offset * 4, ac_glc | ac_slc);
1593 }
1594
1595 /**
1596 * Write streamout data to buffers for vertex stream @p stream (different
1597 * vertex streams can occur for GS copy shaders).
1598 */
1599 static void si_llvm_emit_streamout(struct si_shader_context *ctx,
1600 struct si_shader_output_values *outputs,
1601 unsigned noutput, unsigned stream)
1602 {
1603 struct si_shader_selector *sel = ctx->shader->selector;
1604 struct pipe_stream_output_info *so = &sel->so;
1605 LLVMBuilderRef builder = ctx->ac.builder;
1606 int i;
1607
1608 /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
1609 LLVMValueRef so_vtx_count =
1610 si_unpack_param(ctx, ctx->streamout_config, 16, 7);
1611
1612 LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
1613
1614 /* can_emit = tid < so_vtx_count; */
1615 LLVMValueRef can_emit =
1616 LLVMBuildICmp(builder, LLVMIntULT, tid, so_vtx_count, "");
1617
1618 /* Emit the streamout code conditionally. This actually avoids
1619 * out-of-bounds buffer access. The hw tells us via the SGPR
1620 * (so_vtx_count) which threads are allowed to emit streamout data. */
1621 ac_build_ifcc(&ctx->ac, can_emit, 6501);
1622 {
1623 /* The buffer offset is computed as follows:
1624 * ByteOffset = streamout_offset[buffer_id]*4 +
1625 * (streamout_write_index + thread_id)*stride[buffer_id] +
1626 * attrib_offset
1627 */
1628
1629 LLVMValueRef so_write_index =
1630 ac_get_arg(&ctx->ac,
1631 ctx->streamout_write_index);
1632
1633 /* Compute (streamout_write_index + thread_id). */
1634 so_write_index = LLVMBuildAdd(builder, so_write_index, tid, "");
1635
1636 /* Load the descriptor and compute the write offset for each
1637 * enabled buffer. */
1638 LLVMValueRef so_write_offset[4] = {};
1639 LLVMValueRef so_buffers[4];
1640 LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac,
1641 ctx->rw_buffers);
1642
1643 for (i = 0; i < 4; i++) {
1644 if (!so->stride[i])
1645 continue;
1646
1647 LLVMValueRef offset = LLVMConstInt(ctx->i32,
1648 SI_VS_STREAMOUT_BUF0 + i, 0);
1649
1650 so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
1651
1652 LLVMValueRef so_offset = ac_get_arg(&ctx->ac,
1653 ctx->streamout_offset[i]);
1654 so_offset = LLVMBuildMul(builder, so_offset, LLVMConstInt(ctx->i32, 4, 0), "");
1655
1656 so_write_offset[i] = ac_build_imad(&ctx->ac, so_write_index,
1657 LLVMConstInt(ctx->i32, so->stride[i]*4, 0),
1658 so_offset);
1659 }
1660
1661 /* Write streamout data. */
1662 for (i = 0; i < so->num_outputs; i++) {
1663 unsigned reg = so->output[i].register_index;
1664
1665 if (reg >= noutput)
1666 continue;
1667
1668 if (stream != so->output[i].stream)
1669 continue;
1670
1671 si_emit_streamout_output(ctx, so_buffers, so_write_offset,
1672 &so->output[i], &outputs[reg]);
1673 }
1674 }
1675 ac_build_endif(&ctx->ac, 6501);
1676 }
1677
1678 static void si_export_param(struct si_shader_context *ctx, unsigned index,
1679 LLVMValueRef *values)
1680 {
1681 struct ac_export_args args;
1682
1683 si_llvm_init_vs_export_args(ctx, values,
1684 V_008DFC_SQ_EXP_PARAM + index, &args);
1685 ac_build_export(&ctx->ac, &args);
1686 }
1687
1688 static void si_build_param_exports(struct si_shader_context *ctx,
1689 struct si_shader_output_values *outputs,
1690 unsigned noutput)
1691 {
1692 struct si_shader *shader = ctx->shader;
1693 unsigned param_count = 0;
1694
1695 for (unsigned i = 0; i < noutput; i++) {
1696 unsigned semantic_name = outputs[i].semantic_name;
1697 unsigned semantic_index = outputs[i].semantic_index;
1698
1699 if (outputs[i].vertex_stream[0] != 0 &&
1700 outputs[i].vertex_stream[1] != 0 &&
1701 outputs[i].vertex_stream[2] != 0 &&
1702 outputs[i].vertex_stream[3] != 0)
1703 continue;
1704
1705 switch (semantic_name) {
1706 case TGSI_SEMANTIC_LAYER:
1707 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1708 case TGSI_SEMANTIC_CLIPDIST:
1709 case TGSI_SEMANTIC_COLOR:
1710 case TGSI_SEMANTIC_BCOLOR:
1711 case TGSI_SEMANTIC_PRIMID:
1712 case TGSI_SEMANTIC_FOG:
1713 case TGSI_SEMANTIC_TEXCOORD:
1714 case TGSI_SEMANTIC_GENERIC:
1715 break;
1716 default:
1717 continue;
1718 }
1719
1720 if ((semantic_name != TGSI_SEMANTIC_GENERIC ||
1721 semantic_index < SI_MAX_IO_GENERIC) &&
1722 shader->key.opt.kill_outputs &
1723 (1ull << si_shader_io_get_unique_index(semantic_name,
1724 semantic_index, true)))
1725 continue;
1726
1727 si_export_param(ctx, param_count, outputs[i].values);
1728
1729 assert(i < ARRAY_SIZE(shader->info.vs_output_param_offset));
1730 shader->info.vs_output_param_offset[i] = param_count++;
1731 }
1732
1733 shader->info.nr_param_exports = param_count;
1734 }
1735
1736 /**
1737 * Vertex color clamping.
1738 *
1739 * This uses a state constant loaded in a user data SGPR and
1740 * an IF statement is added that clamps all colors if the constant
1741 * is true.
1742 */
1743 static void si_vertex_color_clamping(struct si_shader_context *ctx,
1744 struct si_shader_output_values *outputs,
1745 unsigned noutput)
1746 {
1747 LLVMValueRef addr[SI_MAX_VS_OUTPUTS][4];
1748 bool has_colors = false;
1749
1750 /* Store original colors to alloca variables. */
1751 for (unsigned i = 0; i < noutput; i++) {
1752 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1753 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1754 continue;
1755
1756 for (unsigned j = 0; j < 4; j++) {
1757 addr[i][j] = ac_build_alloca_undef(&ctx->ac, ctx->f32, "");
1758 LLVMBuildStore(ctx->ac.builder, outputs[i].values[j], addr[i][j]);
1759 }
1760 has_colors = true;
1761 }
1762
1763 if (!has_colors)
1764 return;
1765
1766 /* The state is in the first bit of the user SGPR. */
1767 LLVMValueRef cond = ac_get_arg(&ctx->ac, ctx->vs_state_bits);
1768 cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
1769
1770 ac_build_ifcc(&ctx->ac, cond, 6502);
1771
1772 /* Store clamped colors to alloca variables within the conditional block. */
1773 for (unsigned i = 0; i < noutput; i++) {
1774 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1775 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1776 continue;
1777
1778 for (unsigned j = 0; j < 4; j++) {
1779 LLVMBuildStore(ctx->ac.builder,
1780 ac_build_clamp(&ctx->ac, outputs[i].values[j]),
1781 addr[i][j]);
1782 }
1783 }
1784 ac_build_endif(&ctx->ac, 6502);
1785
1786 /* Load clamped colors */
1787 for (unsigned i = 0; i < noutput; i++) {
1788 if (outputs[i].semantic_name != TGSI_SEMANTIC_COLOR &&
1789 outputs[i].semantic_name != TGSI_SEMANTIC_BCOLOR)
1790 continue;
1791
1792 for (unsigned j = 0; j < 4; j++) {
1793 outputs[i].values[j] =
1794 LLVMBuildLoad(ctx->ac.builder, addr[i][j], "");
1795 }
1796 }
1797 }
1798
1799 /* Generate export instructions for hardware VS shader stage or NGG GS stage
1800 * (position and parameter data only).
1801 */
1802 void si_llvm_export_vs(struct si_shader_context *ctx,
1803 struct si_shader_output_values *outputs,
1804 unsigned noutput)
1805 {
1806 struct si_shader *shader = ctx->shader;
1807 struct ac_export_args pos_args[4] = {};
1808 LLVMValueRef psize_value = NULL, edgeflag_value = NULL, layer_value = NULL, viewport_index_value = NULL;
1809 unsigned pos_idx;
1810 int i;
1811
1812 si_vertex_color_clamping(ctx, outputs, noutput);
1813
1814 /* Build position exports. */
1815 for (i = 0; i < noutput; i++) {
1816 switch (outputs[i].semantic_name) {
1817 case TGSI_SEMANTIC_POSITION:
1818 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1819 V_008DFC_SQ_EXP_POS, &pos_args[0]);
1820 break;
1821 case TGSI_SEMANTIC_PSIZE:
1822 psize_value = outputs[i].values[0];
1823 break;
1824 case TGSI_SEMANTIC_LAYER:
1825 layer_value = outputs[i].values[0];
1826 break;
1827 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1828 viewport_index_value = outputs[i].values[0];
1829 break;
1830 case TGSI_SEMANTIC_EDGEFLAG:
1831 edgeflag_value = outputs[i].values[0];
1832 break;
1833 case TGSI_SEMANTIC_CLIPDIST:
1834 if (!shader->key.opt.clip_disable) {
1835 unsigned index = 2 + outputs[i].semantic_index;
1836 si_llvm_init_vs_export_args(ctx, outputs[i].values,
1837 V_008DFC_SQ_EXP_POS + index,
1838 &pos_args[index]);
1839 }
1840 break;
1841 case TGSI_SEMANTIC_CLIPVERTEX:
1842 if (!shader->key.opt.clip_disable) {
1843 si_llvm_emit_clipvertex(ctx, pos_args,
1844 outputs[i].values);
1845 }
1846 break;
1847 }
1848 }
1849
1850 /* We need to add the position output manually if it's missing. */
1851 if (!pos_args[0].out[0]) {
1852 pos_args[0].enabled_channels = 0xf; /* writemask */
1853 pos_args[0].valid_mask = 0; /* EXEC mask */
1854 pos_args[0].done = 0; /* last export? */
1855 pos_args[0].target = V_008DFC_SQ_EXP_POS;
1856 pos_args[0].compr = 0; /* COMPR flag */
1857 pos_args[0].out[0] = ctx->ac.f32_0; /* X */
1858 pos_args[0].out[1] = ctx->ac.f32_0; /* Y */
1859 pos_args[0].out[2] = ctx->ac.f32_0; /* Z */
1860 pos_args[0].out[3] = ctx->ac.f32_1; /* W */
1861 }
1862
1863 bool pos_writes_edgeflag = shader->selector->info.writes_edgeflag &&
1864 !shader->key.as_ngg;
1865
1866 /* Write the misc vector (point size, edgeflag, layer, viewport). */
1867 if (shader->selector->info.writes_psize ||
1868 pos_writes_edgeflag ||
1869 shader->selector->info.writes_viewport_index ||
1870 shader->selector->info.writes_layer) {
1871 pos_args[1].enabled_channels = shader->selector->info.writes_psize |
1872 (pos_writes_edgeflag << 1) |
1873 (shader->selector->info.writes_layer << 2);
1874
1875 pos_args[1].valid_mask = 0; /* EXEC mask */
1876 pos_args[1].done = 0; /* last export? */
1877 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
1878 pos_args[1].compr = 0; /* COMPR flag */
1879 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
1880 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
1881 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
1882 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
1883
1884 if (shader->selector->info.writes_psize)
1885 pos_args[1].out[0] = psize_value;
1886
1887 if (pos_writes_edgeflag) {
1888 /* The output is a float, but the hw expects an integer
1889 * with the first bit containing the edge flag. */
1890 edgeflag_value = LLVMBuildFPToUI(ctx->ac.builder,
1891 edgeflag_value,
1892 ctx->i32, "");
1893 edgeflag_value = ac_build_umin(&ctx->ac,
1894 edgeflag_value,
1895 ctx->i32_1);
1896
1897 /* The LLVM intrinsic expects a float. */
1898 pos_args[1].out[1] = ac_to_float(&ctx->ac, edgeflag_value);
1899 }
1900
1901 if (ctx->screen->info.chip_class >= GFX9) {
1902 /* GFX9 has the layer in out.z[10:0] and the viewport
1903 * index in out.z[19:16].
1904 */
1905 if (shader->selector->info.writes_layer)
1906 pos_args[1].out[2] = layer_value;
1907
1908 if (shader->selector->info.writes_viewport_index) {
1909 LLVMValueRef v = viewport_index_value;
1910
1911 v = ac_to_integer(&ctx->ac, v);
1912 v = LLVMBuildShl(ctx->ac.builder, v,
1913 LLVMConstInt(ctx->i32, 16, 0), "");
1914 v = LLVMBuildOr(ctx->ac.builder, v,
1915 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
1916 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
1917 pos_args[1].enabled_channels |= 1 << 2;
1918 }
1919 } else {
1920 if (shader->selector->info.writes_layer)
1921 pos_args[1].out[2] = layer_value;
1922
1923 if (shader->selector->info.writes_viewport_index) {
1924 pos_args[1].out[3] = viewport_index_value;
1925 pos_args[1].enabled_channels |= 1 << 3;
1926 }
1927 }
1928 }
1929
1930 for (i = 0; i < 4; i++)
1931 if (pos_args[i].out[0])
1932 shader->info.nr_pos_exports++;
1933
1934 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
1935 * Setting valid_mask=1 prevents it and has no other effect.
1936 */
1937 if (ctx->screen->info.family == CHIP_NAVI10 ||
1938 ctx->screen->info.family == CHIP_NAVI12 ||
1939 ctx->screen->info.family == CHIP_NAVI14)
1940 pos_args[0].valid_mask = 1;
1941
1942 pos_idx = 0;
1943 for (i = 0; i < 4; i++) {
1944 if (!pos_args[i].out[0])
1945 continue;
1946
1947 /* Specify the target we are exporting */
1948 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
1949
1950 if (pos_idx == shader->info.nr_pos_exports)
1951 /* Specify that this is the last export */
1952 pos_args[i].done = 1;
1953
1954 ac_build_export(&ctx->ac, &pos_args[i]);
1955 }
1956
1957 /* Build parameter exports. */
1958 si_build_param_exports(ctx, outputs, noutput);
1959 }
1960
1961 /**
1962 * Forward all outputs from the vertex shader to the TES. This is only used
1963 * for the fixed function TCS.
1964 */
1965 static void si_copy_tcs_inputs(struct si_shader_context *ctx)
1966 {
1967 LLVMValueRef invocation_id, buffer, buffer_offset;
1968 LLVMValueRef lds_vertex_stride, lds_base;
1969 uint64_t inputs;
1970
1971 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
1972 buffer = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
1973 buffer_offset = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
1974
1975 lds_vertex_stride = get_tcs_in_vertex_dw_stride(ctx);
1976 lds_base = get_tcs_in_current_patch_offset(ctx);
1977 lds_base = ac_build_imad(&ctx->ac, invocation_id, lds_vertex_stride,
1978 lds_base);
1979
1980 inputs = ctx->shader->key.mono.u.ff_tcs_inputs_to_copy;
1981 while (inputs) {
1982 unsigned i = u_bit_scan64(&inputs);
1983
1984 LLVMValueRef lds_ptr = LLVMBuildAdd(ctx->ac.builder, lds_base,
1985 LLVMConstInt(ctx->i32, 4 * i, 0),
1986 "");
1987
1988 LLVMValueRef buffer_addr = get_tcs_tes_buffer_address(ctx,
1989 get_rel_patch_id(ctx),
1990 invocation_id,
1991 LLVMConstInt(ctx->i32, i, 0));
1992
1993 LLVMValueRef value = lshs_lds_load(ctx, ctx->ac.i32, ~0, lds_ptr);
1994
1995 ac_build_buffer_store_dword(&ctx->ac, buffer, value, 4, buffer_addr,
1996 buffer_offset, 0, ac_glc);
1997 }
1998 }
1999
2000 static void si_write_tess_factors(struct si_shader_context *ctx,
2001 LLVMValueRef rel_patch_id,
2002 LLVMValueRef invocation_id,
2003 LLVMValueRef tcs_out_current_patch_data_offset,
2004 LLVMValueRef invoc0_tf_outer[4],
2005 LLVMValueRef invoc0_tf_inner[2])
2006 {
2007 struct si_shader *shader = ctx->shader;
2008 unsigned tess_inner_index, tess_outer_index;
2009 LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
2010 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2011 unsigned stride, outer_comps, inner_comps, i, offset;
2012
2013 /* Add a barrier before loading tess factors from LDS. */
2014 if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
2015 si_llvm_emit_barrier(ctx);
2016
2017 /* Do this only for invocation 0, because the tess levels are per-patch,
2018 * not per-vertex.
2019 *
2020 * This can't jump, because invocation 0 executes this. It should
2021 * at least mask out the loads and stores for other invocations.
2022 */
2023 ac_build_ifcc(&ctx->ac,
2024 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2025 invocation_id, ctx->i32_0, ""), 6503);
2026
2027 /* Determine the layout of one tess factor element in the buffer. */
2028 switch (shader->key.part.tcs.epilog.prim_mode) {
2029 case PIPE_PRIM_LINES:
2030 stride = 2; /* 2 dwords, 1 vec2 store */
2031 outer_comps = 2;
2032 inner_comps = 0;
2033 break;
2034 case PIPE_PRIM_TRIANGLES:
2035 stride = 4; /* 4 dwords, 1 vec4 store */
2036 outer_comps = 3;
2037 inner_comps = 1;
2038 break;
2039 case PIPE_PRIM_QUADS:
2040 stride = 6; /* 6 dwords, 2 stores (vec4 + vec2) */
2041 outer_comps = 4;
2042 inner_comps = 2;
2043 break;
2044 default:
2045 assert(0);
2046 return;
2047 }
2048
2049 for (i = 0; i < 4; i++) {
2050 inner[i] = LLVMGetUndef(ctx->i32);
2051 outer[i] = LLVMGetUndef(ctx->i32);
2052 }
2053
2054 if (shader->key.part.tcs.epilog.invoc0_tess_factors_are_def) {
2055 /* Tess factors are in VGPRs. */
2056 for (i = 0; i < outer_comps; i++)
2057 outer[i] = out[i] = invoc0_tf_outer[i];
2058 for (i = 0; i < inner_comps; i++)
2059 inner[i] = out[outer_comps+i] = invoc0_tf_inner[i];
2060 } else {
2061 /* Load tess_inner and tess_outer from LDS.
2062 * Any invocation can write them, so we can't get them from a temporary.
2063 */
2064 tess_inner_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0);
2065 tess_outer_index = si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0);
2066
2067 lds_base = tcs_out_current_patch_data_offset;
2068 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2069 LLVMConstInt(ctx->i32,
2070 tess_inner_index * 4, 0), "");
2071 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2072 LLVMConstInt(ctx->i32,
2073 tess_outer_index * 4, 0), "");
2074
2075 for (i = 0; i < outer_comps; i++) {
2076 outer[i] = out[i] =
2077 lshs_lds_load(ctx, ctx->ac.i32, i, lds_outer);
2078 }
2079 for (i = 0; i < inner_comps; i++) {
2080 inner[i] = out[outer_comps+i] =
2081 lshs_lds_load(ctx, ctx->ac.i32, i, lds_inner);
2082 }
2083 }
2084
2085 if (shader->key.part.tcs.epilog.prim_mode == PIPE_PRIM_LINES) {
2086 /* For isolines, the hardware expects tess factors in the
2087 * reverse order from what NIR specifies.
2088 */
2089 LLVMValueRef tmp = out[0];
2090 out[0] = out[1];
2091 out[1] = tmp;
2092 }
2093
2094 /* Convert the outputs to vectors for stores. */
2095 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2096 vec1 = NULL;
2097
2098 if (stride > 4)
2099 vec1 = ac_build_gather_values(&ctx->ac, out+4, stride - 4);
2100
2101 /* Get the buffer. */
2102 buffer = get_tess_ring_descriptor(ctx, TCS_FACTOR_RING);
2103
2104 /* Get the offset. */
2105 tf_base = ac_get_arg(&ctx->ac,
2106 ctx->tcs_factor_offset);
2107 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2108 LLVMConstInt(ctx->i32, 4 * stride, 0), "");
2109
2110 ac_build_ifcc(&ctx->ac,
2111 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2112 rel_patch_id, ctx->i32_0, ""), 6504);
2113
2114 /* Store the dynamic HS control word. */
2115 offset = 0;
2116 if (ctx->screen->info.chip_class <= GFX8) {
2117 ac_build_buffer_store_dword(&ctx->ac, buffer,
2118 LLVMConstInt(ctx->i32, 0x80000000, 0),
2119 1, ctx->i32_0, tf_base,
2120 offset, ac_glc);
2121 offset += 4;
2122 }
2123
2124 ac_build_endif(&ctx->ac, 6504);
2125
2126 /* Store the tessellation factors. */
2127 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2128 MIN2(stride, 4), byteoffset, tf_base,
2129 offset, ac_glc);
2130 offset += 16;
2131 if (vec1)
2132 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2133 stride - 4, byteoffset, tf_base,
2134 offset, ac_glc);
2135
2136 /* Store the tess factors into the offchip buffer if TES reads them. */
2137 if (shader->key.part.tcs.epilog.tes_reads_tess_factors) {
2138 LLVMValueRef buf, base, inner_vec, outer_vec, tf_outer_offset;
2139 LLVMValueRef tf_inner_offset;
2140 unsigned param_outer, param_inner;
2141
2142 buf = get_tess_ring_descriptor(ctx, TESS_OFFCHIP_RING_TCS);
2143 base = ac_get_arg(&ctx->ac, ctx->tcs_offchip_offset);
2144
2145 param_outer = si_shader_io_get_unique_index_patch(
2146 TGSI_SEMANTIC_TESSOUTER, 0);
2147 tf_outer_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2148 LLVMConstInt(ctx->i32, param_outer, 0));
2149
2150 unsigned outer_vec_size =
2151 ac_has_vec3_support(ctx->screen->info.chip_class, false) ?
2152 outer_comps : util_next_power_of_two(outer_comps);
2153 outer_vec = ac_build_gather_values(&ctx->ac, outer, outer_vec_size);
2154
2155 ac_build_buffer_store_dword(&ctx->ac, buf, outer_vec,
2156 outer_comps, tf_outer_offset,
2157 base, 0, ac_glc);
2158 if (inner_comps) {
2159 param_inner = si_shader_io_get_unique_index_patch(
2160 TGSI_SEMANTIC_TESSINNER, 0);
2161 tf_inner_offset = get_tcs_tes_buffer_address(ctx, rel_patch_id, NULL,
2162 LLVMConstInt(ctx->i32, param_inner, 0));
2163
2164 inner_vec = inner_comps == 1 ? inner[0] :
2165 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2166 ac_build_buffer_store_dword(&ctx->ac, buf, inner_vec,
2167 inner_comps, tf_inner_offset,
2168 base, 0, ac_glc);
2169 }
2170 }
2171
2172 ac_build_endif(&ctx->ac, 6503);
2173 }
2174
2175 LLVMValueRef si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
2176 struct ac_arg param, unsigned return_index)
2177 {
2178 return LLVMBuildInsertValue(ctx->ac.builder, ret,
2179 ac_get_arg(&ctx->ac, param),
2180 return_index, "");
2181 }
2182
2183 LLVMValueRef si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
2184 struct ac_arg param, unsigned return_index)
2185 {
2186 LLVMBuilderRef builder = ctx->ac.builder;
2187 LLVMValueRef p = ac_get_arg(&ctx->ac, param);
2188
2189 return LLVMBuildInsertValue(builder, ret,
2190 ac_to_float(&ctx->ac, p),
2191 return_index, "");
2192 }
2193
2194 LLVMValueRef si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
2195 struct ac_arg param, unsigned return_index)
2196 {
2197 LLVMBuilderRef builder = ctx->ac.builder;
2198 LLVMValueRef ptr = ac_get_arg(&ctx->ac, param);
2199 ptr = LLVMBuildPtrToInt(builder, ptr, ctx->i32, "");
2200 return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
2201 }
2202
2203 /* This only writes the tessellation factor levels. */
2204 static void si_llvm_emit_tcs_epilogue(struct ac_shader_abi *abi,
2205 unsigned max_outputs,
2206 LLVMValueRef *addrs)
2207 {
2208 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2209 LLVMBuilderRef builder = ctx->ac.builder;
2210 LLVMValueRef rel_patch_id, invocation_id, tf_lds_offset;
2211
2212 si_copy_tcs_inputs(ctx);
2213
2214 rel_patch_id = get_rel_patch_id(ctx);
2215 invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
2216 tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
2217
2218 if (ctx->screen->info.chip_class >= GFX9) {
2219 LLVMBasicBlockRef blocks[2] = {
2220 LLVMGetInsertBlock(builder),
2221 ctx->merged_wrap_if_entry_block
2222 };
2223 LLVMValueRef values[2];
2224
2225 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2226
2227 values[0] = rel_patch_id;
2228 values[1] = LLVMGetUndef(ctx->i32);
2229 rel_patch_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2230
2231 values[0] = tf_lds_offset;
2232 values[1] = LLVMGetUndef(ctx->i32);
2233 tf_lds_offset = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2234
2235 values[0] = invocation_id;
2236 values[1] = ctx->i32_1; /* cause the epilog to skip threads */
2237 invocation_id = ac_build_phi(&ctx->ac, ctx->i32, 2, values, blocks);
2238 }
2239
2240 /* Return epilog parameters from this function. */
2241 LLVMValueRef ret = ctx->return_value;
2242 unsigned vgpr;
2243
2244 if (ctx->screen->info.chip_class >= GFX9) {
2245 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2246 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2247 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2248 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2249 /* Tess offchip and tess factor offsets are at the beginning. */
2250 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2251 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2252 vgpr = 8 + GFX9_SGPR_TCS_OUT_LAYOUT + 1;
2253 } else {
2254 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2255 GFX6_SGPR_TCS_OFFCHIP_LAYOUT);
2256 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2257 GFX6_SGPR_TCS_OUT_LAYOUT);
2258 /* Tess offchip and tess factor offsets are after user SGPRs. */
2259 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset,
2260 GFX6_TCS_NUM_USER_SGPR);
2261 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset,
2262 GFX6_TCS_NUM_USER_SGPR + 1);
2263 vgpr = GFX6_TCS_NUM_USER_SGPR + 2;
2264 }
2265
2266 /* VGPRs */
2267 rel_patch_id = ac_to_float(&ctx->ac, rel_patch_id);
2268 invocation_id = ac_to_float(&ctx->ac, invocation_id);
2269 tf_lds_offset = ac_to_float(&ctx->ac, tf_lds_offset);
2270
2271 /* Leave a hole corresponding to the two input VGPRs. This ensures that
2272 * the invocation_id output does not alias the tcs_rel_ids input,
2273 * which saves a V_MOV on gfx9.
2274 */
2275 vgpr += 2;
2276
2277 ret = LLVMBuildInsertValue(builder, ret, rel_patch_id, vgpr++, "");
2278 ret = LLVMBuildInsertValue(builder, ret, invocation_id, vgpr++, "");
2279
2280 if (ctx->shader->selector->info.tessfactors_are_def_in_all_invocs) {
2281 vgpr++; /* skip the tess factor LDS offset */
2282 for (unsigned i = 0; i < 6; i++) {
2283 LLVMValueRef value =
2284 LLVMBuildLoad(builder, ctx->invoc0_tess_factors[i], "");
2285 value = ac_to_float(&ctx->ac, value);
2286 ret = LLVMBuildInsertValue(builder, ret, value, vgpr++, "");
2287 }
2288 } else {
2289 ret = LLVMBuildInsertValue(builder, ret, tf_lds_offset, vgpr++, "");
2290 }
2291 ctx->return_value = ret;
2292 }
2293
2294 /* Pass TCS inputs from LS to TCS on GFX9. */
2295 static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
2296 {
2297 LLVMValueRef ret = ctx->return_value;
2298
2299 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2300 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2301 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 2);
2302 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2303 ret = si_insert_input_ret(ctx, ret, ctx->tcs_factor_offset, 4);
2304 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2305
2306 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2307 8 + SI_SGPR_RW_BUFFERS);
2308 ret = si_insert_input_ptr(ctx, ret,
2309 ctx->bindless_samplers_and_images,
2310 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2311
2312 ret = si_insert_input_ret(ctx, ret, ctx->vs_state_bits,
2313 8 + SI_SGPR_VS_STATE_BITS);
2314
2315 ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout,
2316 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
2317 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_offsets,
2318 8 + GFX9_SGPR_TCS_OUT_OFFSETS);
2319 ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout,
2320 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
2321
2322 unsigned vgpr = 8 + GFX9_TCS_NUM_USER_SGPR;
2323 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2324 ac_to_float(&ctx->ac,
2325 ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id)),
2326 vgpr++, "");
2327 ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
2328 ac_to_float(&ctx->ac,
2329 ac_get_arg(&ctx->ac, ctx->args.tcs_rel_ids)),
2330 vgpr++, "");
2331 ctx->return_value = ret;
2332 }
2333
2334 /* Pass GS inputs from ES to GS on GFX9. */
2335 static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
2336 {
2337 LLVMValueRef ret = ctx->return_value;
2338
2339 ret = si_insert_input_ptr(ctx, ret, ctx->other_const_and_shader_buffers, 0);
2340 ret = si_insert_input_ptr(ctx, ret, ctx->other_samplers_and_images, 1);
2341 if (ctx->shader->key.as_ngg)
2342 ret = si_insert_input_ptr(ctx, ret, ctx->gs_tg_info, 2);
2343 else
2344 ret = si_insert_input_ret(ctx, ret, ctx->gs2vs_offset, 2);
2345 ret = si_insert_input_ret(ctx, ret, ctx->merged_wave_info, 3);
2346 ret = si_insert_input_ret(ctx, ret, ctx->merged_scratch_offset, 5);
2347
2348 ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
2349 8 + SI_SGPR_RW_BUFFERS);
2350 ret = si_insert_input_ptr(ctx, ret,
2351 ctx->bindless_samplers_and_images,
2352 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
2353 if (ctx->screen->use_ngg) {
2354 ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits,
2355 8 + SI_SGPR_VS_STATE_BITS);
2356 }
2357
2358 unsigned vgpr;
2359 if (ctx->type == PIPE_SHADER_VERTEX)
2360 vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR;
2361 else
2362 vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
2363
2364 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx01_offset, vgpr++);
2365 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx23_offset, vgpr++);
2366 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
2367 ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
2368 ret = si_insert_input_ret_float(ctx, ret, ctx->gs_vtx45_offset, vgpr++);
2369 ctx->return_value = ret;
2370 }
2371
2372 static void si_llvm_emit_ls_epilogue(struct ac_shader_abi *abi,
2373 unsigned max_outputs,
2374 LLVMValueRef *addrs)
2375 {
2376 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2377 struct si_shader *shader = ctx->shader;
2378 struct si_shader_info *info = &shader->selector->info;
2379 unsigned i, chan;
2380 LLVMValueRef vertex_id = ac_get_arg(&ctx->ac, ctx->rel_auto_id);
2381 LLVMValueRef vertex_dw_stride = get_tcs_in_vertex_dw_stride(ctx);
2382 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2383 vertex_dw_stride, "");
2384
2385 /* Write outputs to LDS. The next shader (TCS aka HS) will read
2386 * its inputs from it. */
2387 for (i = 0; i < info->num_outputs; i++) {
2388 unsigned name = info->output_semantic_name[i];
2389 unsigned index = info->output_semantic_index[i];
2390
2391 /* The ARB_shader_viewport_layer_array spec contains the
2392 * following issue:
2393 *
2394 * 2) What happens if gl_ViewportIndex or gl_Layer is
2395 * written in the vertex shader and a geometry shader is
2396 * present?
2397 *
2398 * RESOLVED: The value written by the last vertex processing
2399 * stage is used. If the last vertex processing stage
2400 * (vertex, tessellation evaluation or geometry) does not
2401 * statically assign to gl_ViewportIndex or gl_Layer, index
2402 * or layer zero is assumed.
2403 *
2404 * So writes to those outputs in VS-as-LS are simply ignored.
2405 */
2406 if (name == TGSI_SEMANTIC_LAYER ||
2407 name == TGSI_SEMANTIC_VIEWPORT_INDEX)
2408 continue;
2409
2410 int param = si_shader_io_get_unique_index(name, index, false);
2411 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2412 LLVMConstInt(ctx->i32, param * 4, 0), "");
2413
2414 for (chan = 0; chan < 4; chan++) {
2415 if (!(info->output_usagemask[i] & (1 << chan)))
2416 continue;
2417
2418 lshs_lds_store(ctx, chan, dw_addr,
2419 LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], ""));
2420 }
2421 }
2422
2423 if (ctx->screen->info.chip_class >= GFX9)
2424 si_set_ls_return_value_for_tcs(ctx);
2425 }
2426
2427 static void si_llvm_emit_es_epilogue(struct ac_shader_abi *abi,
2428 unsigned max_outputs,
2429 LLVMValueRef *addrs)
2430 {
2431 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2432 struct si_shader *es = ctx->shader;
2433 struct si_shader_info *info = &es->selector->info;
2434 LLVMValueRef lds_base = NULL;
2435 unsigned chan;
2436 int i;
2437
2438 if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
2439 unsigned itemsize_dw = es->selector->esgs_itemsize / 4;
2440 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2441 LLVMValueRef wave_idx = si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
2442 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2443 LLVMBuildMul(ctx->ac.builder, wave_idx,
2444 LLVMConstInt(ctx->i32, ctx->ac.wave_size, false), ""), "");
2445 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2446 LLVMConstInt(ctx->i32, itemsize_dw, 0), "");
2447 }
2448
2449 for (i = 0; i < info->num_outputs; i++) {
2450 int param;
2451
2452 if (info->output_semantic_name[i] == TGSI_SEMANTIC_VIEWPORT_INDEX ||
2453 info->output_semantic_name[i] == TGSI_SEMANTIC_LAYER)
2454 continue;
2455
2456 param = si_shader_io_get_unique_index(info->output_semantic_name[i],
2457 info->output_semantic_index[i], false);
2458
2459 for (chan = 0; chan < 4; chan++) {
2460 if (!(info->output_usagemask[i] & (1 << chan)))
2461 continue;
2462
2463 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2464 out_val = ac_to_integer(&ctx->ac, out_val);
2465
2466 /* GFX9 has the ESGS ring in LDS. */
2467 if (ctx->screen->info.chip_class >= GFX9) {
2468 LLVMValueRef idx = LLVMConstInt(ctx->i32, param * 4 + chan, false);
2469 idx = LLVMBuildAdd(ctx->ac.builder, lds_base, idx, "");
2470 ac_build_indexed_store(&ctx->ac, ctx->esgs_ring, idx, out_val);
2471 continue;
2472 }
2473
2474 ac_build_buffer_store_dword(&ctx->ac,
2475 ctx->esgs_ring,
2476 out_val, 1, NULL,
2477 ac_get_arg(&ctx->ac, ctx->es2gs_offset),
2478 (4 * param + chan) * 4,
2479 ac_glc | ac_slc | ac_swizzled);
2480 }
2481 }
2482
2483 if (ctx->screen->info.chip_class >= GFX9)
2484 si_set_es_return_value_for_gs(ctx);
2485 }
2486
2487 static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
2488 {
2489 if (ctx->screen->info.chip_class >= GFX9)
2490 return si_unpack_param(ctx, ctx->merged_wave_info, 16, 8);
2491 else
2492 return ac_get_arg(&ctx->ac, ctx->gs_wave_id);
2493 }
2494
2495 static void emit_gs_epilogue(struct si_shader_context *ctx)
2496 {
2497 if (ctx->shader->key.as_ngg) {
2498 gfx10_ngg_gs_emit_epilogue(ctx);
2499 return;
2500 }
2501
2502 if (ctx->screen->info.chip_class >= GFX10)
2503 LLVMBuildFence(ctx->ac.builder, LLVMAtomicOrderingRelease, false, "");
2504
2505 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE,
2506 si_get_gs_wave_id(ctx));
2507
2508 if (ctx->screen->info.chip_class >= GFX9)
2509 ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
2510 }
2511
2512 static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
2513 unsigned max_outputs,
2514 LLVMValueRef *addrs)
2515 {
2516 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2517 struct si_shader_info UNUSED *info = &ctx->shader->selector->info;
2518
2519 assert(info->num_outputs <= max_outputs);
2520
2521 emit_gs_epilogue(ctx);
2522 }
2523
2524 static void si_llvm_emit_vs_epilogue(struct ac_shader_abi *abi,
2525 unsigned max_outputs,
2526 LLVMValueRef *addrs)
2527 {
2528 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2529 struct si_shader_info *info = &ctx->shader->selector->info;
2530 struct si_shader_output_values *outputs = NULL;
2531 int i,j;
2532
2533 assert(!ctx->shader->is_gs_copy_shader);
2534 assert(info->num_outputs <= max_outputs);
2535
2536 outputs = MALLOC((info->num_outputs + 1) * sizeof(outputs[0]));
2537
2538 for (i = 0; i < info->num_outputs; i++) {
2539 outputs[i].semantic_name = info->output_semantic_name[i];
2540 outputs[i].semantic_index = info->output_semantic_index[i];
2541
2542 for (j = 0; j < 4; j++) {
2543 outputs[i].values[j] =
2544 LLVMBuildLoad(ctx->ac.builder,
2545 addrs[4 * i + j],
2546 "");
2547 outputs[i].vertex_stream[j] =
2548 (info->output_streams[i] >> (2 * j)) & 3;
2549 }
2550 }
2551
2552 if (!ctx->screen->use_ngg_streamout &&
2553 ctx->shader->selector->so.num_outputs)
2554 si_llvm_emit_streamout(ctx, outputs, i, 0);
2555
2556 /* Export PrimitiveID. */
2557 if (ctx->shader->key.mono.u.vs_export_prim_id) {
2558 outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
2559 outputs[i].semantic_index = 0;
2560 outputs[i].values[0] = ac_to_float(&ctx->ac, si_get_primitive_id(ctx, 0));
2561 for (j = 1; j < 4; j++)
2562 outputs[i].values[j] = LLVMConstReal(ctx->f32, 0);
2563
2564 memset(outputs[i].vertex_stream, 0,
2565 sizeof(outputs[i].vertex_stream));
2566 i++;
2567 }
2568
2569 si_llvm_export_vs(ctx, outputs, i);
2570 FREE(outputs);
2571 }
2572
2573 static void si_llvm_emit_prim_discard_cs_epilogue(struct ac_shader_abi *abi,
2574 unsigned max_outputs,
2575 LLVMValueRef *addrs)
2576 {
2577 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2578 struct si_shader_info *info = &ctx->shader->selector->info;
2579 LLVMValueRef pos[4] = {};
2580
2581 assert(info->num_outputs <= max_outputs);
2582
2583 for (unsigned i = 0; i < info->num_outputs; i++) {
2584 if (info->output_semantic_name[i] != TGSI_SEMANTIC_POSITION)
2585 continue;
2586
2587 for (unsigned chan = 0; chan < 4; chan++)
2588 pos[chan] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2589 break;
2590 }
2591 assert(pos[0] != NULL);
2592
2593 /* Return the position output. */
2594 LLVMValueRef ret = ctx->return_value;
2595 for (unsigned chan = 0; chan < 4; chan++)
2596 ret = LLVMBuildInsertValue(ctx->ac.builder, ret, pos[chan], chan, "");
2597 ctx->return_value = ret;
2598 }
2599
2600 /* Emit one vertex from the geometry shader */
2601 static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
2602 unsigned stream,
2603 LLVMValueRef *addrs)
2604 {
2605 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2606
2607 if (ctx->shader->key.as_ngg) {
2608 gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
2609 return;
2610 }
2611
2612 struct si_shader_info *info = &ctx->shader->selector->info;
2613 struct si_shader *shader = ctx->shader;
2614 LLVMValueRef soffset = ac_get_arg(&ctx->ac, ctx->gs2vs_offset);
2615 LLVMValueRef gs_next_vertex;
2616 LLVMValueRef can_emit;
2617 unsigned chan, offset;
2618 int i;
2619
2620 /* Write vertex attribute values to GSVS ring */
2621 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
2622 ctx->gs_next_vertex[stream],
2623 "");
2624
2625 /* If this thread has already emitted the declared maximum number of
2626 * vertices, skip the write: excessive vertex emissions are not
2627 * supposed to have any effect.
2628 *
2629 * If the shader has no writes to memory, kill it instead. This skips
2630 * further memory loads and may allow LLVM to skip to the end
2631 * altogether.
2632 */
2633 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
2634 LLVMConstInt(ctx->i32,
2635 shader->selector->gs_max_out_vertices, 0), "");
2636
2637 bool use_kill = !info->writes_memory;
2638 if (use_kill) {
2639 ac_build_kill_if_false(&ctx->ac, can_emit);
2640 } else {
2641 ac_build_ifcc(&ctx->ac, can_emit, 6505);
2642 }
2643
2644 offset = 0;
2645 for (i = 0; i < info->num_outputs; i++) {
2646 for (chan = 0; chan < 4; chan++) {
2647 if (!(info->output_usagemask[i] & (1 << chan)) ||
2648 ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
2649 continue;
2650
2651 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
2652 LLVMValueRef voffset =
2653 LLVMConstInt(ctx->i32, offset *
2654 shader->selector->gs_max_out_vertices, 0);
2655 offset++;
2656
2657 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
2658 voffset = LLVMBuildMul(ctx->ac.builder, voffset,
2659 LLVMConstInt(ctx->i32, 4, 0), "");
2660
2661 out_val = ac_to_integer(&ctx->ac, out_val);
2662
2663 ac_build_buffer_store_dword(&ctx->ac,
2664 ctx->gsvs_ring[stream],
2665 out_val, 1,
2666 voffset, soffset, 0,
2667 ac_glc | ac_slc | ac_swizzled);
2668 }
2669 }
2670
2671 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex, ctx->i32_1, "");
2672 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
2673
2674 /* Signal vertex emission if vertex data was written. */
2675 if (offset) {
2676 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
2677 si_get_gs_wave_id(ctx));
2678 }
2679
2680 if (!use_kill)
2681 ac_build_endif(&ctx->ac, 6505);
2682 }
2683
2684 /* Cut one primitive from the geometry shader */
2685 static void si_llvm_emit_primitive(struct ac_shader_abi *abi,
2686 unsigned stream)
2687 {
2688 struct si_shader_context *ctx = si_shader_context_from_abi(abi);
2689
2690 if (ctx->shader->key.as_ngg) {
2691 LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
2692 return;
2693 }
2694
2695 /* Signal primitive cut */
2696 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8),
2697 si_get_gs_wave_id(ctx));
2698 }
2699
2700 static void si_llvm_emit_barrier(struct si_shader_context *ctx)
2701 {
2702 /* GFX6 only (thanks to a hw bug workaround):
2703 * The real barrier instruction isn’t needed, because an entire patch
2704 * always fits into a single wave.
2705 */
2706 if (ctx->screen->info.chip_class == GFX6 &&
2707 ctx->type == PIPE_SHADER_TESS_CTRL) {
2708 ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM | AC_WAIT_VLOAD | AC_WAIT_VSTORE);
2709 return;
2710 }
2711
2712 ac_build_s_barrier(&ctx->ac);
2713 }
2714
2715 static void declare_streamout_params(struct si_shader_context *ctx,
2716 struct pipe_stream_output_info *so)
2717 {
2718 if (ctx->screen->use_ngg_streamout) {
2719 if (ctx->type == PIPE_SHADER_TESS_EVAL)
2720 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2721 return;
2722 }
2723
2724 /* Streamout SGPRs. */
2725 if (so->num_outputs) {
2726 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_config);
2727 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_write_index);
2728 } else if (ctx->type == PIPE_SHADER_TESS_EVAL) {
2729 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
2730 }
2731
2732 /* A streamout buffer offset is loaded if the stride is non-zero. */
2733 for (int i = 0; i < 4; i++) {
2734 if (!so->stride[i])
2735 continue;
2736
2737 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->streamout_offset[i]);
2738 }
2739 }
2740
2741 static unsigned si_get_max_workgroup_size(const struct si_shader *shader)
2742 {
2743 switch (shader->selector->type) {
2744 case PIPE_SHADER_VERTEX:
2745 case PIPE_SHADER_TESS_EVAL:
2746 return shader->key.as_ngg ? 128 : 0;
2747
2748 case PIPE_SHADER_TESS_CTRL:
2749 /* Return this so that LLVM doesn't remove s_barrier
2750 * instructions on chips where we use s_barrier. */
2751 return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
2752
2753 case PIPE_SHADER_GEOMETRY:
2754 return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
2755
2756 case PIPE_SHADER_COMPUTE:
2757 break; /* see below */
2758
2759 default:
2760 return 0;
2761 }
2762
2763 const unsigned *properties = shader->selector->info.properties;
2764 unsigned max_work_group_size =
2765 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
2766 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
2767 properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
2768
2769 if (!max_work_group_size) {
2770 /* This is a variable group size compute shader,
2771 * compile it for the maximum possible group size.
2772 */
2773 max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
2774 }
2775 return max_work_group_size;
2776 }
2777
2778 static void declare_const_and_shader_buffers(struct si_shader_context *ctx,
2779 bool assign_params)
2780 {
2781 enum ac_arg_type const_shader_buf_type;
2782
2783 if (ctx->shader->selector->info.const_buffers_declared == 1 &&
2784 ctx->shader->selector->info.shader_buffers_declared == 0)
2785 const_shader_buf_type = AC_ARG_CONST_FLOAT_PTR;
2786 else
2787 const_shader_buf_type = AC_ARG_CONST_DESC_PTR;
2788
2789 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, const_shader_buf_type,
2790 assign_params ? &ctx->const_and_shader_buffers :
2791 &ctx->other_const_and_shader_buffers);
2792 }
2793
2794 static void declare_samplers_and_images(struct si_shader_context *ctx,
2795 bool assign_params)
2796 {
2797 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2798 assign_params ? &ctx->samplers_and_images :
2799 &ctx->other_samplers_and_images);
2800 }
2801
2802 static void declare_per_stage_desc_pointers(struct si_shader_context *ctx,
2803 bool assign_params)
2804 {
2805 declare_const_and_shader_buffers(ctx, assign_params);
2806 declare_samplers_and_images(ctx, assign_params);
2807 }
2808
2809 static void declare_global_desc_pointers(struct si_shader_context *ctx)
2810 {
2811 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR,
2812 &ctx->rw_buffers);
2813 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_IMAGE_PTR,
2814 &ctx->bindless_samplers_and_images);
2815 }
2816
2817 static void declare_vs_specific_input_sgprs(struct si_shader_context *ctx)
2818 {
2819 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->vs_state_bits);
2820 if (!ctx->shader->is_gs_copy_shader) {
2821 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.base_vertex);
2822 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.start_instance);
2823 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.draw_id);
2824 }
2825 }
2826
2827 static void declare_vb_descriptor_input_sgprs(struct si_shader_context *ctx)
2828 {
2829 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->vertex_buffers);
2830
2831 unsigned num_vbos_in_user_sgprs = ctx->shader->selector->num_vbos_in_user_sgprs;
2832 if (num_vbos_in_user_sgprs) {
2833 unsigned user_sgprs = ctx->args.num_sgprs_used;
2834
2835 if (si_is_merged_shader(ctx))
2836 user_sgprs -= 8;
2837 assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
2838
2839 /* Declare unused SGPRs to align VB descriptors to 4 SGPRs (hw requirement). */
2840 for (unsigned i = user_sgprs; i < SI_SGPR_VS_VB_DESCRIPTOR_FIRST; i++)
2841 ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
2842
2843 assert(num_vbos_in_user_sgprs <= ARRAY_SIZE(ctx->vb_descriptors));
2844 for (unsigned i = 0; i < num_vbos_in_user_sgprs; i++)
2845 ac_add_arg(&ctx->args, AC_ARG_SGPR, 4, AC_ARG_INT, &ctx->vb_descriptors[i]);
2846 }
2847 }
2848
2849 static void declare_vs_input_vgprs(struct si_shader_context *ctx,
2850 unsigned *num_prolog_vgprs)
2851 {
2852 struct si_shader *shader = ctx->shader;
2853
2854 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
2855 if (shader->key.as_ls) {
2856 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->rel_auto_id);
2857 if (ctx->screen->info.chip_class >= GFX10) {
2858 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
2859 ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);