radv: allow to emit a vertex to a specified stream
[mesa.git] / src / amd / vulkan / radv_nir_to_llvm.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
31 #include "nir/nir.h"
32
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #if HAVE_LLVM >= 0x0700
37 #include <llvm-c/Transforms/Utils.h>
38 #endif
39
40 #include "sid.h"
41 #include "gfx9d.h"
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_llvm_build.h"
45 #include "ac_shader_abi.h"
46 #include "ac_shader_util.h"
47 #include "ac_exp_param.h"
48
49 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
50
51 struct radv_shader_context {
52 struct ac_llvm_context ac;
53 const struct radv_nir_compiler_options *options;
54 struct radv_shader_variant_info *shader_info;
55 struct ac_shader_abi abi;
56
57 unsigned max_workgroup_size;
58 LLVMContextRef context;
59 LLVMValueRef main_function;
60
61 LLVMValueRef descriptor_sets[RADV_UD_MAX_SETS];
62 LLVMValueRef ring_offsets;
63
64 LLVMValueRef vertex_buffers;
65 LLVMValueRef rel_auto_id;
66 LLVMValueRef vs_prim_id;
67 LLVMValueRef es2gs_offset;
68
69 LLVMValueRef oc_lds;
70 LLVMValueRef merged_wave_info;
71 LLVMValueRef tess_factor_offset;
72 LLVMValueRef tes_rel_patch_id;
73 LLVMValueRef tes_u;
74 LLVMValueRef tes_v;
75
76 LLVMValueRef gs2vs_offset;
77 LLVMValueRef gs_wave_id;
78 LLVMValueRef gs_vtx_offset[6];
79
80 LLVMValueRef esgs_ring;
81 LLVMValueRef gsvs_ring[4];
82 LLVMValueRef hs_ring_tess_offchip;
83 LLVMValueRef hs_ring_tess_factor;
84
85 LLVMValueRef persp_sample, persp_center, persp_centroid;
86 LLVMValueRef linear_sample, linear_center, linear_centroid;
87
88 gl_shader_stage stage;
89
90 LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
91
92 uint64_t input_mask;
93 uint64_t output_mask;
94
95 bool is_gs_copy_shader;
96 LLVMValueRef gs_next_vertex[4];
97 unsigned gs_max_out_vertices;
98
99 unsigned tes_primitive_mode;
100
101 uint32_t tcs_patch_outputs_read;
102 uint64_t tcs_outputs_read;
103 uint32_t tcs_vertices_per_patch;
104 uint32_t tcs_num_inputs;
105 uint32_t tcs_num_patches;
106 uint32_t max_gsvs_emit_size;
107 uint32_t gsvs_vertex_size;
108 };
109
110 enum radeon_llvm_calling_convention {
111 RADEON_LLVM_AMDGPU_VS = 87,
112 RADEON_LLVM_AMDGPU_GS = 88,
113 RADEON_LLVM_AMDGPU_PS = 89,
114 RADEON_LLVM_AMDGPU_CS = 90,
115 RADEON_LLVM_AMDGPU_HS = 93,
116 };
117
118 static inline struct radv_shader_context *
119 radv_shader_context_from_abi(struct ac_shader_abi *abi)
120 {
121 struct radv_shader_context *ctx = NULL;
122 return container_of(abi, ctx, abi);
123 }
124
125 struct ac_build_if_state
126 {
127 struct radv_shader_context *ctx;
128 LLVMValueRef condition;
129 LLVMBasicBlockRef entry_block;
130 LLVMBasicBlockRef true_block;
131 LLVMBasicBlockRef false_block;
132 LLVMBasicBlockRef merge_block;
133 };
134
135 static LLVMBasicBlockRef
136 ac_build_insert_new_block(struct radv_shader_context *ctx, const char *name)
137 {
138 LLVMBasicBlockRef current_block;
139 LLVMBasicBlockRef next_block;
140 LLVMBasicBlockRef new_block;
141
142 /* get current basic block */
143 current_block = LLVMGetInsertBlock(ctx->ac.builder);
144
145 /* chqeck if there's another block after this one */
146 next_block = LLVMGetNextBasicBlock(current_block);
147 if (next_block) {
148 /* insert the new block before the next block */
149 new_block = LLVMInsertBasicBlockInContext(ctx->context, next_block, name);
150 }
151 else {
152 /* append new block after current block */
153 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
154 new_block = LLVMAppendBasicBlockInContext(ctx->context, function, name);
155 }
156 return new_block;
157 }
158
159 static void
160 ac_nir_build_if(struct ac_build_if_state *ifthen,
161 struct radv_shader_context *ctx,
162 LLVMValueRef condition)
163 {
164 LLVMBasicBlockRef block = LLVMGetInsertBlock(ctx->ac.builder);
165
166 memset(ifthen, 0, sizeof *ifthen);
167 ifthen->ctx = ctx;
168 ifthen->condition = condition;
169 ifthen->entry_block = block;
170
171 /* create endif/merge basic block for the phi functions */
172 ifthen->merge_block = ac_build_insert_new_block(ctx, "endif-block");
173
174 /* create/insert true_block before merge_block */
175 ifthen->true_block =
176 LLVMInsertBasicBlockInContext(ctx->context,
177 ifthen->merge_block,
178 "if-true-block");
179
180 /* successive code goes into the true block */
181 LLVMPositionBuilderAtEnd(ctx->ac.builder, ifthen->true_block);
182 }
183
184 /**
185 * End a conditional.
186 */
187 static void
188 ac_nir_build_endif(struct ac_build_if_state *ifthen)
189 {
190 LLVMBuilderRef builder = ifthen->ctx->ac.builder;
191
192 /* Insert branch to the merge block from current block */
193 LLVMBuildBr(builder, ifthen->merge_block);
194
195 /*
196 * Now patch in the various branch instructions.
197 */
198
199 /* Insert the conditional branch instruction at the end of entry_block */
200 LLVMPositionBuilderAtEnd(builder, ifthen->entry_block);
201 if (ifthen->false_block) {
202 /* we have an else clause */
203 LLVMBuildCondBr(builder, ifthen->condition,
204 ifthen->true_block, ifthen->false_block);
205 }
206 else {
207 /* no else clause */
208 LLVMBuildCondBr(builder, ifthen->condition,
209 ifthen->true_block, ifthen->merge_block);
210 }
211
212 /* Resume building code at end of the ifthen->merge_block */
213 LLVMPositionBuilderAtEnd(builder, ifthen->merge_block);
214 }
215
216
217 static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
218 {
219 switch (ctx->stage) {
220 case MESA_SHADER_TESS_CTRL:
221 return ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
222 case MESA_SHADER_TESS_EVAL:
223 return ctx->tes_rel_patch_id;
224 break;
225 default:
226 unreachable("Illegal stage");
227 }
228 }
229
230 static unsigned
231 get_tcs_num_patches(struct radv_shader_context *ctx)
232 {
233 unsigned num_tcs_input_cp = ctx->options->key.tcs.input_vertices;
234 unsigned num_tcs_output_cp = ctx->tcs_vertices_per_patch;
235 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
236 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
237 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
238 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
239 uint32_t output_vertex_size = num_tcs_outputs * 16;
240 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
241 uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
242 unsigned num_patches;
243 unsigned hardware_lds_size;
244
245 /* Ensure that we only need one wave per SIMD so we don't need to check
246 * resource usage. Also ensures that the number of tcs in and out
247 * vertices per threadgroup are at most 256.
248 */
249 num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
250 /* Make sure that the data fits in LDS. This assumes the shaders only
251 * use LDS for the inputs and outputs.
252 */
253 hardware_lds_size = ctx->options->chip_class >= CIK ? 65536 : 32768;
254 num_patches = MIN2(num_patches, hardware_lds_size / (input_patch_size + output_patch_size));
255 /* Make sure the output data fits in the offchip buffer */
256 num_patches = MIN2(num_patches, (ctx->options->tess_offchip_block_dw_size * 4) / output_patch_size);
257 /* Not necessary for correctness, but improves performance. The
258 * specific value is taken from the proprietary driver.
259 */
260 num_patches = MIN2(num_patches, 40);
261
262 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
263 if (ctx->options->chip_class == SI) {
264 unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
265 num_patches = MIN2(num_patches, one_wave);
266 }
267 return num_patches;
268 }
269
270 static unsigned
271 calculate_tess_lds_size(struct radv_shader_context *ctx)
272 {
273 unsigned num_tcs_input_cp = ctx->options->key.tcs.input_vertices;
274 unsigned num_tcs_output_cp;
275 unsigned num_tcs_outputs, num_tcs_patch_outputs;
276 unsigned input_vertex_size, output_vertex_size;
277 unsigned input_patch_size, output_patch_size;
278 unsigned pervertex_output_patch_size;
279 unsigned output_patch0_offset;
280 unsigned num_patches;
281 unsigned lds_size;
282
283 num_tcs_output_cp = ctx->tcs_vertices_per_patch;
284 num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
285 num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
286
287 input_vertex_size = ctx->tcs_num_inputs * 16;
288 output_vertex_size = num_tcs_outputs * 16;
289
290 input_patch_size = num_tcs_input_cp * input_vertex_size;
291
292 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
293 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
294
295 num_patches = ctx->tcs_num_patches;
296 output_patch0_offset = input_patch_size * num_patches;
297
298 lds_size = output_patch0_offset + output_patch_size * num_patches;
299 return lds_size;
300 }
301
302 /* Tessellation shaders pass outputs to the next shader using LDS.
303 *
304 * LS outputs = TCS inputs
305 * TCS outputs = TES inputs
306 *
307 * The LDS layout is:
308 * - TCS inputs for patch 0
309 * - TCS inputs for patch 1
310 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
311 * - ...
312 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
313 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
314 * - TCS outputs for patch 1
315 * - Per-patch TCS outputs for patch 1
316 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
317 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
318 * - ...
319 *
320 * All three shaders VS(LS), TCS, TES share the same LDS space.
321 */
322 static LLVMValueRef
323 get_tcs_in_patch_stride(struct radv_shader_context *ctx)
324 {
325 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
326 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
327 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
328
329 input_patch_size /= 4;
330 return LLVMConstInt(ctx->ac.i32, input_patch_size, false);
331 }
332
333 static LLVMValueRef
334 get_tcs_out_patch_stride(struct radv_shader_context *ctx)
335 {
336 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
337 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
338 uint32_t output_vertex_size = num_tcs_outputs * 16;
339 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
340 uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
341 output_patch_size /= 4;
342 return LLVMConstInt(ctx->ac.i32, output_patch_size, false);
343 }
344
345 static LLVMValueRef
346 get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
347 {
348 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
349 uint32_t output_vertex_size = num_tcs_outputs * 16;
350 output_vertex_size /= 4;
351 return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
352 }
353
354 static LLVMValueRef
355 get_tcs_out_patch0_offset(struct radv_shader_context *ctx)
356 {
357 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
358 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
359 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
360 uint32_t output_patch0_offset = input_patch_size;
361 unsigned num_patches = ctx->tcs_num_patches;
362
363 output_patch0_offset *= num_patches;
364 output_patch0_offset /= 4;
365 return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
366 }
367
368 static LLVMValueRef
369 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
370 {
371 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
372 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
373 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
374 uint32_t output_patch0_offset = input_patch_size;
375
376 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
377 uint32_t output_vertex_size = num_tcs_outputs * 16;
378 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
379 unsigned num_patches = ctx->tcs_num_patches;
380
381 output_patch0_offset *= num_patches;
382 output_patch0_offset += pervertex_output_patch_size;
383 output_patch0_offset /= 4;
384 return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
385 }
386
387 static LLVMValueRef
388 get_tcs_in_current_patch_offset(struct radv_shader_context *ctx)
389 {
390 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
391 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
392
393 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
394 }
395
396 static LLVMValueRef
397 get_tcs_out_current_patch_offset(struct radv_shader_context *ctx)
398 {
399 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
400 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
401 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
402
403 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id,
404 patch0_offset);
405 }
406
407 static LLVMValueRef
408 get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
409 {
410 LLVMValueRef patch0_patch_data_offset =
411 get_tcs_out_patch0_patch_data_offset(ctx);
412 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
413 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
414
415 return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id,
416 patch0_patch_data_offset);
417 }
418
419 #define MAX_ARGS 64
420 struct arg_info {
421 LLVMTypeRef types[MAX_ARGS];
422 LLVMValueRef *assign[MAX_ARGS];
423 unsigned array_params_mask;
424 uint8_t count;
425 uint8_t sgpr_count;
426 uint8_t num_sgprs_used;
427 uint8_t num_vgprs_used;
428 };
429
430 enum ac_arg_regfile {
431 ARG_SGPR,
432 ARG_VGPR,
433 };
434
435 static void
436 add_arg(struct arg_info *info, enum ac_arg_regfile regfile, LLVMTypeRef type,
437 LLVMValueRef *param_ptr)
438 {
439 assert(info->count < MAX_ARGS);
440
441 info->assign[info->count] = param_ptr;
442 info->types[info->count] = type;
443 info->count++;
444
445 if (regfile == ARG_SGPR) {
446 info->num_sgprs_used += ac_get_type_size(type) / 4;
447 info->sgpr_count++;
448 } else {
449 assert(regfile == ARG_VGPR);
450 info->num_vgprs_used += ac_get_type_size(type) / 4;
451 }
452 }
453
454 static inline void
455 add_array_arg(struct arg_info *info, LLVMTypeRef type, LLVMValueRef *param_ptr)
456 {
457 info->array_params_mask |= (1 << info->count);
458 add_arg(info, ARG_SGPR, type, param_ptr);
459 }
460
461 static void assign_arguments(LLVMValueRef main_function,
462 struct arg_info *info)
463 {
464 unsigned i;
465 for (i = 0; i < info->count; i++) {
466 if (info->assign[i])
467 *info->assign[i] = LLVMGetParam(main_function, i);
468 }
469 }
470
471 static LLVMValueRef
472 create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
473 LLVMBuilderRef builder, LLVMTypeRef *return_types,
474 unsigned num_return_elems,
475 struct arg_info *args,
476 unsigned max_workgroup_size,
477 const struct radv_nir_compiler_options *options)
478 {
479 LLVMTypeRef main_function_type, ret_type;
480 LLVMBasicBlockRef main_function_body;
481
482 if (num_return_elems)
483 ret_type = LLVMStructTypeInContext(ctx, return_types,
484 num_return_elems, true);
485 else
486 ret_type = LLVMVoidTypeInContext(ctx);
487
488 /* Setup the function */
489 main_function_type =
490 LLVMFunctionType(ret_type, args->types, args->count, 0);
491 LLVMValueRef main_function =
492 LLVMAddFunction(module, "main", main_function_type);
493 main_function_body =
494 LLVMAppendBasicBlockInContext(ctx, main_function, "main_body");
495 LLVMPositionBuilderAtEnd(builder, main_function_body);
496
497 LLVMSetFunctionCallConv(main_function, RADEON_LLVM_AMDGPU_CS);
498 for (unsigned i = 0; i < args->sgpr_count; ++i) {
499 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_INREG);
500
501 if (args->array_params_mask & (1 << i)) {
502 LLVMValueRef P = LLVMGetParam(main_function, i);
503 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_NOALIAS);
504 ac_add_attr_dereferenceable(P, UINT64_MAX);
505 }
506 }
507
508 if (options->address32_hi) {
509 ac_llvm_add_target_dep_function_attr(main_function,
510 "amdgpu-32bit-address-high-bits",
511 options->address32_hi);
512 }
513
514 if (max_workgroup_size) {
515 ac_llvm_add_target_dep_function_attr(main_function,
516 "amdgpu-max-work-group-size",
517 max_workgroup_size);
518 }
519 if (options->unsafe_math) {
520 /* These were copied from some LLVM test. */
521 LLVMAddTargetDependentFunctionAttr(main_function,
522 "less-precise-fpmad",
523 "true");
524 LLVMAddTargetDependentFunctionAttr(main_function,
525 "no-infs-fp-math",
526 "true");
527 LLVMAddTargetDependentFunctionAttr(main_function,
528 "no-nans-fp-math",
529 "true");
530 LLVMAddTargetDependentFunctionAttr(main_function,
531 "unsafe-fp-math",
532 "true");
533 LLVMAddTargetDependentFunctionAttr(main_function,
534 "no-signed-zeros-fp-math",
535 "true");
536 }
537 return main_function;
538 }
539
540
541 static void
542 set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx,
543 uint8_t num_sgprs, bool indirect)
544 {
545 ud_info->sgpr_idx = *sgpr_idx;
546 ud_info->num_sgprs = num_sgprs;
547 ud_info->indirect = indirect;
548 *sgpr_idx += num_sgprs;
549 }
550
551 static void
552 set_loc_shader(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
553 uint8_t num_sgprs)
554 {
555 struct radv_userdata_info *ud_info =
556 &ctx->shader_info->user_sgprs_locs.shader_data[idx];
557 assert(ud_info);
558
559 set_loc(ud_info, sgpr_idx, num_sgprs, false);
560 }
561
562 static void
563 set_loc_shader_ptr(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx)
564 {
565 bool use_32bit_pointers = HAVE_32BIT_POINTERS &&
566 idx != AC_UD_SCRATCH_RING_OFFSETS;
567
568 set_loc_shader(ctx, idx, sgpr_idx, use_32bit_pointers ? 1 : 2);
569 }
570
571 static void
572 set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
573 bool indirect)
574 {
575 struct radv_userdata_locations *locs =
576 &ctx->shader_info->user_sgprs_locs;
577 struct radv_userdata_info *ud_info = &locs->descriptor_sets[idx];
578 assert(ud_info);
579
580 set_loc(ud_info, sgpr_idx, HAVE_32BIT_POINTERS ? 1 : 2, indirect);
581
582 if (!indirect)
583 locs->descriptor_sets_enabled |= 1 << idx;
584 }
585
586 struct user_sgpr_info {
587 bool need_ring_offsets;
588 bool indirect_all_descriptor_sets;
589 };
590
591 static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
592 gl_shader_stage stage)
593 {
594 switch (stage) {
595 case MESA_SHADER_VERTEX:
596 if (ctx->shader_info->info.needs_multiview_view_index ||
597 (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
598 return true;
599 break;
600 case MESA_SHADER_TESS_EVAL:
601 if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
602 return true;
603 break;
604 case MESA_SHADER_GEOMETRY:
605 case MESA_SHADER_TESS_CTRL:
606 if (ctx->shader_info->info.needs_multiview_view_index)
607 return true;
608 break;
609 default:
610 break;
611 }
612 return false;
613 }
614
615 static uint8_t
616 count_vs_user_sgprs(struct radv_shader_context *ctx)
617 {
618 uint8_t count = 0;
619
620 if (ctx->shader_info->info.vs.has_vertex_buffers)
621 count += HAVE_32BIT_POINTERS ? 1 : 2;
622 count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
623
624 return count;
625 }
626
627 static void allocate_user_sgprs(struct radv_shader_context *ctx,
628 gl_shader_stage stage,
629 bool has_previous_stage,
630 gl_shader_stage previous_stage,
631 bool needs_view_index,
632 struct user_sgpr_info *user_sgpr_info)
633 {
634 uint8_t user_sgpr_count = 0;
635
636 memset(user_sgpr_info, 0, sizeof(struct user_sgpr_info));
637
638 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
639 if (stage == MESA_SHADER_GEOMETRY ||
640 stage == MESA_SHADER_VERTEX ||
641 stage == MESA_SHADER_TESS_CTRL ||
642 stage == MESA_SHADER_TESS_EVAL ||
643 ctx->is_gs_copy_shader)
644 user_sgpr_info->need_ring_offsets = true;
645
646 if (stage == MESA_SHADER_FRAGMENT &&
647 ctx->shader_info->info.ps.needs_sample_positions)
648 user_sgpr_info->need_ring_offsets = true;
649
650 /* 2 user sgprs will nearly always be allocated for scratch/rings */
651 if (ctx->options->supports_spill || user_sgpr_info->need_ring_offsets) {
652 user_sgpr_count += 2;
653 }
654
655 switch (stage) {
656 case MESA_SHADER_COMPUTE:
657 if (ctx->shader_info->info.cs.uses_grid_size)
658 user_sgpr_count += 3;
659 break;
660 case MESA_SHADER_FRAGMENT:
661 user_sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
662 break;
663 case MESA_SHADER_VERTEX:
664 if (!ctx->is_gs_copy_shader)
665 user_sgpr_count += count_vs_user_sgprs(ctx);
666 break;
667 case MESA_SHADER_TESS_CTRL:
668 if (has_previous_stage) {
669 if (previous_stage == MESA_SHADER_VERTEX)
670 user_sgpr_count += count_vs_user_sgprs(ctx);
671 }
672 break;
673 case MESA_SHADER_TESS_EVAL:
674 break;
675 case MESA_SHADER_GEOMETRY:
676 if (has_previous_stage) {
677 if (previous_stage == MESA_SHADER_VERTEX) {
678 user_sgpr_count += count_vs_user_sgprs(ctx);
679 }
680 }
681 break;
682 default:
683 break;
684 }
685
686 if (needs_view_index)
687 user_sgpr_count++;
688
689 if (ctx->shader_info->info.loads_push_constants)
690 user_sgpr_count += HAVE_32BIT_POINTERS ? 1 : 2;
691
692 uint32_t available_sgprs = ctx->options->chip_class >= GFX9 && stage != MESA_SHADER_COMPUTE ? 32 : 16;
693 uint32_t remaining_sgprs = available_sgprs - user_sgpr_count;
694 uint32_t num_desc_set =
695 util_bitcount(ctx->shader_info->info.desc_set_used_mask);
696
697 if (remaining_sgprs / (HAVE_32BIT_POINTERS ? 1 : 2) < num_desc_set) {
698 user_sgpr_info->indirect_all_descriptor_sets = true;
699 }
700 }
701
702 static void
703 declare_global_input_sgprs(struct radv_shader_context *ctx,
704 gl_shader_stage stage,
705 bool has_previous_stage,
706 gl_shader_stage previous_stage,
707 const struct user_sgpr_info *user_sgpr_info,
708 struct arg_info *args,
709 LLVMValueRef *desc_sets)
710 {
711 LLVMTypeRef type = ac_array_in_const32_addr_space(ctx->ac.i8);
712 unsigned num_sets = ctx->options->layout ?
713 ctx->options->layout->num_sets : 0;
714 unsigned stage_mask = 1 << stage;
715
716 if (has_previous_stage)
717 stage_mask |= 1 << previous_stage;
718
719 /* 1 for each descriptor set */
720 if (!user_sgpr_info->indirect_all_descriptor_sets) {
721 for (unsigned i = 0; i < num_sets; ++i) {
722 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
723 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
724 add_array_arg(args, type,
725 &ctx->descriptor_sets[i]);
726 }
727 }
728 } else {
729 add_array_arg(args, ac_array_in_const32_addr_space(type), desc_sets);
730 }
731
732 if (ctx->shader_info->info.loads_push_constants) {
733 /* 1 for push constants and dynamic descriptors */
734 add_array_arg(args, type, &ctx->abi.push_constants);
735 }
736 }
737
738 static void
739 declare_vs_specific_input_sgprs(struct radv_shader_context *ctx,
740 gl_shader_stage stage,
741 bool has_previous_stage,
742 gl_shader_stage previous_stage,
743 struct arg_info *args)
744 {
745 if (!ctx->is_gs_copy_shader &&
746 (stage == MESA_SHADER_VERTEX ||
747 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
748 if (ctx->shader_info->info.vs.has_vertex_buffers) {
749 add_arg(args, ARG_SGPR,
750 ac_array_in_const32_addr_space(ctx->ac.v4i32),
751 &ctx->vertex_buffers);
752 }
753 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.base_vertex);
754 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.start_instance);
755 if (ctx->shader_info->info.vs.needs_draw_id) {
756 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.draw_id);
757 }
758 }
759 }
760
761 static void
762 declare_vs_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
763 {
764 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.vertex_id);
765 if (!ctx->is_gs_copy_shader) {
766 if (ctx->options->key.vs.as_ls) {
767 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->rel_auto_id);
768 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
769 } else {
770 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
771 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->vs_prim_id);
772 }
773 add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
774 }
775 }
776
777 static void
778 declare_tes_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
779 {
780 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_u);
781 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_v);
782 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->tes_rel_patch_id);
783 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.tes_patch_id);
784 }
785
786 static void
787 set_global_input_locs(struct radv_shader_context *ctx, gl_shader_stage stage,
788 bool has_previous_stage, gl_shader_stage previous_stage,
789 const struct user_sgpr_info *user_sgpr_info,
790 LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
791 {
792 unsigned num_sets = ctx->options->layout ?
793 ctx->options->layout->num_sets : 0;
794 unsigned stage_mask = 1 << stage;
795
796 if (has_previous_stage)
797 stage_mask |= 1 << previous_stage;
798
799 if (!user_sgpr_info->indirect_all_descriptor_sets) {
800 for (unsigned i = 0; i < num_sets; ++i) {
801 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
802 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
803 set_loc_desc(ctx, i, user_sgpr_idx, false);
804 } else
805 ctx->descriptor_sets[i] = NULL;
806 }
807 } else {
808 set_loc_shader_ptr(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS,
809 user_sgpr_idx);
810
811 for (unsigned i = 0; i < num_sets; ++i) {
812 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
813 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
814 ctx->descriptor_sets[i] =
815 ac_build_load_to_sgpr(&ctx->ac,
816 desc_sets,
817 LLVMConstInt(ctx->ac.i32, i, false));
818
819 } else
820 ctx->descriptor_sets[i] = NULL;
821 }
822 ctx->shader_info->need_indirect_descriptor_sets = true;
823 }
824
825 if (ctx->shader_info->info.loads_push_constants) {
826 set_loc_shader_ptr(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx);
827 }
828 }
829
830 static void
831 set_vs_specific_input_locs(struct radv_shader_context *ctx,
832 gl_shader_stage stage, bool has_previous_stage,
833 gl_shader_stage previous_stage,
834 uint8_t *user_sgpr_idx)
835 {
836 if (!ctx->is_gs_copy_shader &&
837 (stage == MESA_SHADER_VERTEX ||
838 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
839 if (ctx->shader_info->info.vs.has_vertex_buffers) {
840 set_loc_shader_ptr(ctx, AC_UD_VS_VERTEX_BUFFERS,
841 user_sgpr_idx);
842 }
843
844 unsigned vs_num = 2;
845 if (ctx->shader_info->info.vs.needs_draw_id)
846 vs_num++;
847
848 set_loc_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE,
849 user_sgpr_idx, vs_num);
850 }
851 }
852
853 static void set_llvm_calling_convention(LLVMValueRef func,
854 gl_shader_stage stage)
855 {
856 enum radeon_llvm_calling_convention calling_conv;
857
858 switch (stage) {
859 case MESA_SHADER_VERTEX:
860 case MESA_SHADER_TESS_EVAL:
861 calling_conv = RADEON_LLVM_AMDGPU_VS;
862 break;
863 case MESA_SHADER_GEOMETRY:
864 calling_conv = RADEON_LLVM_AMDGPU_GS;
865 break;
866 case MESA_SHADER_TESS_CTRL:
867 calling_conv = RADEON_LLVM_AMDGPU_HS;
868 break;
869 case MESA_SHADER_FRAGMENT:
870 calling_conv = RADEON_LLVM_AMDGPU_PS;
871 break;
872 case MESA_SHADER_COMPUTE:
873 calling_conv = RADEON_LLVM_AMDGPU_CS;
874 break;
875 default:
876 unreachable("Unhandle shader type");
877 }
878
879 LLVMSetFunctionCallConv(func, calling_conv);
880 }
881
882 static void create_function(struct radv_shader_context *ctx,
883 gl_shader_stage stage,
884 bool has_previous_stage,
885 gl_shader_stage previous_stage)
886 {
887 uint8_t user_sgpr_idx;
888 struct user_sgpr_info user_sgpr_info;
889 struct arg_info args = {};
890 LLVMValueRef desc_sets;
891 bool needs_view_index = needs_view_index_sgpr(ctx, stage);
892 allocate_user_sgprs(ctx, stage, has_previous_stage,
893 previous_stage, needs_view_index, &user_sgpr_info);
894
895 if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
896 add_arg(&args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
897 &ctx->ring_offsets);
898 }
899
900 switch (stage) {
901 case MESA_SHADER_COMPUTE:
902 declare_global_input_sgprs(ctx, stage, has_previous_stage,
903 previous_stage, &user_sgpr_info,
904 &args, &desc_sets);
905
906 if (ctx->shader_info->info.cs.uses_grid_size) {
907 add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
908 &ctx->abi.num_work_groups);
909 }
910
911 for (int i = 0; i < 3; i++) {
912 ctx->abi.workgroup_ids[i] = NULL;
913 if (ctx->shader_info->info.cs.uses_block_id[i]) {
914 add_arg(&args, ARG_SGPR, ctx->ac.i32,
915 &ctx->abi.workgroup_ids[i]);
916 }
917 }
918
919 if (ctx->shader_info->info.cs.uses_local_invocation_idx)
920 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.tg_size);
921 add_arg(&args, ARG_VGPR, ctx->ac.v3i32,
922 &ctx->abi.local_invocation_ids);
923 break;
924 case MESA_SHADER_VERTEX:
925 declare_global_input_sgprs(ctx, stage, has_previous_stage,
926 previous_stage, &user_sgpr_info,
927 &args, &desc_sets);
928 declare_vs_specific_input_sgprs(ctx, stage, has_previous_stage,
929 previous_stage, &args);
930
931 if (needs_view_index)
932 add_arg(&args, ARG_SGPR, ctx->ac.i32,
933 &ctx->abi.view_index);
934 if (ctx->options->key.vs.as_es)
935 add_arg(&args, ARG_SGPR, ctx->ac.i32,
936 &ctx->es2gs_offset);
937
938 declare_vs_input_vgprs(ctx, &args);
939 break;
940 case MESA_SHADER_TESS_CTRL:
941 if (has_previous_stage) {
942 // First 6 system regs
943 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
944 add_arg(&args, ARG_SGPR, ctx->ac.i32,
945 &ctx->merged_wave_info);
946 add_arg(&args, ARG_SGPR, ctx->ac.i32,
947 &ctx->tess_factor_offset);
948
949 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
950 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
951 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
952
953 declare_global_input_sgprs(ctx, stage,
954 has_previous_stage,
955 previous_stage,
956 &user_sgpr_info, &args,
957 &desc_sets);
958 declare_vs_specific_input_sgprs(ctx, stage,
959 has_previous_stage,
960 previous_stage, &args);
961
962 if (needs_view_index)
963 add_arg(&args, ARG_SGPR, ctx->ac.i32,
964 &ctx->abi.view_index);
965
966 add_arg(&args, ARG_VGPR, ctx->ac.i32,
967 &ctx->abi.tcs_patch_id);
968 add_arg(&args, ARG_VGPR, ctx->ac.i32,
969 &ctx->abi.tcs_rel_ids);
970
971 declare_vs_input_vgprs(ctx, &args);
972 } else {
973 declare_global_input_sgprs(ctx, stage,
974 has_previous_stage,
975 previous_stage,
976 &user_sgpr_info, &args,
977 &desc_sets);
978
979 if (needs_view_index)
980 add_arg(&args, ARG_SGPR, ctx->ac.i32,
981 &ctx->abi.view_index);
982
983 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
984 add_arg(&args, ARG_SGPR, ctx->ac.i32,
985 &ctx->tess_factor_offset);
986 add_arg(&args, ARG_VGPR, ctx->ac.i32,
987 &ctx->abi.tcs_patch_id);
988 add_arg(&args, ARG_VGPR, ctx->ac.i32,
989 &ctx->abi.tcs_rel_ids);
990 }
991 break;
992 case MESA_SHADER_TESS_EVAL:
993 declare_global_input_sgprs(ctx, stage, has_previous_stage,
994 previous_stage, &user_sgpr_info,
995 &args, &desc_sets);
996
997 if (needs_view_index)
998 add_arg(&args, ARG_SGPR, ctx->ac.i32,
999 &ctx->abi.view_index);
1000
1001 if (ctx->options->key.tes.as_es) {
1002 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
1003 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
1004 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1005 &ctx->es2gs_offset);
1006 } else {
1007 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
1008 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
1009 }
1010 declare_tes_input_vgprs(ctx, &args);
1011 break;
1012 case MESA_SHADER_GEOMETRY:
1013 if (has_previous_stage) {
1014 // First 6 system regs
1015 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1016 &ctx->gs2vs_offset);
1017 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1018 &ctx->merged_wave_info);
1019 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
1020
1021 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
1022 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
1023 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
1024
1025 declare_global_input_sgprs(ctx, stage,
1026 has_previous_stage,
1027 previous_stage,
1028 &user_sgpr_info, &args,
1029 &desc_sets);
1030
1031 if (previous_stage != MESA_SHADER_TESS_EVAL) {
1032 declare_vs_specific_input_sgprs(ctx, stage,
1033 has_previous_stage,
1034 previous_stage,
1035 &args);
1036 }
1037
1038 if (needs_view_index)
1039 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1040 &ctx->abi.view_index);
1041
1042 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1043 &ctx->gs_vtx_offset[0]);
1044 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1045 &ctx->gs_vtx_offset[2]);
1046 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1047 &ctx->abi.gs_prim_id);
1048 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1049 &ctx->abi.gs_invocation_id);
1050 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1051 &ctx->gs_vtx_offset[4]);
1052
1053 if (previous_stage == MESA_SHADER_VERTEX) {
1054 declare_vs_input_vgprs(ctx, &args);
1055 } else {
1056 declare_tes_input_vgprs(ctx, &args);
1057 }
1058 } else {
1059 declare_global_input_sgprs(ctx, stage,
1060 has_previous_stage,
1061 previous_stage,
1062 &user_sgpr_info, &args,
1063 &desc_sets);
1064
1065 if (needs_view_index)
1066 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1067 &ctx->abi.view_index);
1068
1069 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs2vs_offset);
1070 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs_wave_id);
1071 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1072 &ctx->gs_vtx_offset[0]);
1073 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1074 &ctx->gs_vtx_offset[1]);
1075 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1076 &ctx->abi.gs_prim_id);
1077 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1078 &ctx->gs_vtx_offset[2]);
1079 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1080 &ctx->gs_vtx_offset[3]);
1081 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1082 &ctx->gs_vtx_offset[4]);
1083 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1084 &ctx->gs_vtx_offset[5]);
1085 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1086 &ctx->abi.gs_invocation_id);
1087 }
1088 break;
1089 case MESA_SHADER_FRAGMENT:
1090 declare_global_input_sgprs(ctx, stage, has_previous_stage,
1091 previous_stage, &user_sgpr_info,
1092 &args, &desc_sets);
1093
1094 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.prim_mask);
1095 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_sample);
1096 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_center);
1097 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_centroid);
1098 add_arg(&args, ARG_VGPR, ctx->ac.v3i32, NULL); /* persp pull model */
1099 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_sample);
1100 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_center);
1101 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_centroid);
1102 add_arg(&args, ARG_VGPR, ctx->ac.f32, NULL); /* line stipple tex */
1103 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[0]);
1104 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[1]);
1105 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[2]);
1106 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[3]);
1107 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.front_face);
1108 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.ancillary);
1109 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.sample_coverage);
1110 add_arg(&args, ARG_VGPR, ctx->ac.i32, NULL); /* fixed pt */
1111 break;
1112 default:
1113 unreachable("Shader stage not implemented");
1114 }
1115
1116 ctx->main_function = create_llvm_function(
1117 ctx->context, ctx->ac.module, ctx->ac.builder, NULL, 0, &args,
1118 ctx->max_workgroup_size, ctx->options);
1119 set_llvm_calling_convention(ctx->main_function, stage);
1120
1121
1122 ctx->shader_info->num_input_vgprs = 0;
1123 ctx->shader_info->num_input_sgprs = ctx->options->supports_spill ? 2 : 0;
1124
1125 ctx->shader_info->num_input_sgprs += args.num_sgprs_used;
1126
1127 if (ctx->stage != MESA_SHADER_FRAGMENT)
1128 ctx->shader_info->num_input_vgprs = args.num_vgprs_used;
1129
1130 assign_arguments(ctx->main_function, &args);
1131
1132 user_sgpr_idx = 0;
1133
1134 if (ctx->options->supports_spill || user_sgpr_info.need_ring_offsets) {
1135 set_loc_shader_ptr(ctx, AC_UD_SCRATCH_RING_OFFSETS,
1136 &user_sgpr_idx);
1137 if (ctx->options->supports_spill) {
1138 ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
1139 LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_CONST),
1140 NULL, 0, AC_FUNC_ATTR_READNONE);
1141 ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
1142 ac_array_in_const_addr_space(ctx->ac.v4i32), "");
1143 }
1144 }
1145
1146 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1147 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1148 if (has_previous_stage)
1149 user_sgpr_idx = 0;
1150
1151 set_global_input_locs(ctx, stage, has_previous_stage, previous_stage,
1152 &user_sgpr_info, desc_sets, &user_sgpr_idx);
1153
1154 switch (stage) {
1155 case MESA_SHADER_COMPUTE:
1156 if (ctx->shader_info->info.cs.uses_grid_size) {
1157 set_loc_shader(ctx, AC_UD_CS_GRID_SIZE,
1158 &user_sgpr_idx, 3);
1159 }
1160 break;
1161 case MESA_SHADER_VERTEX:
1162 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1163 previous_stage, &user_sgpr_idx);
1164 if (ctx->abi.view_index)
1165 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1166 break;
1167 case MESA_SHADER_TESS_CTRL:
1168 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1169 previous_stage, &user_sgpr_idx);
1170 if (ctx->abi.view_index)
1171 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1172 break;
1173 case MESA_SHADER_TESS_EVAL:
1174 if (ctx->abi.view_index)
1175 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1176 break;
1177 case MESA_SHADER_GEOMETRY:
1178 if (has_previous_stage) {
1179 if (previous_stage == MESA_SHADER_VERTEX)
1180 set_vs_specific_input_locs(ctx, stage,
1181 has_previous_stage,
1182 previous_stage,
1183 &user_sgpr_idx);
1184 }
1185 if (ctx->abi.view_index)
1186 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1187 break;
1188 case MESA_SHADER_FRAGMENT:
1189 break;
1190 default:
1191 unreachable("Shader stage not implemented");
1192 }
1193
1194 if (stage == MESA_SHADER_TESS_CTRL ||
1195 (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_ls) ||
1196 /* GFX9 has the ESGS ring buffer in LDS. */
1197 (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
1198 ac_declare_lds_as_pointer(&ctx->ac);
1199 }
1200
1201 ctx->shader_info->num_user_sgprs = user_sgpr_idx;
1202 }
1203
1204
1205 static LLVMValueRef
1206 radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
1207 unsigned desc_set, unsigned binding)
1208 {
1209 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1210 LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
1211 struct radv_pipeline_layout *pipeline_layout = ctx->options->layout;
1212 struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
1213 unsigned base_offset = layout->binding[binding].offset;
1214 LLVMValueRef offset, stride;
1215
1216 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1217 layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
1218 unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
1219 layout->binding[binding].dynamic_offset_offset;
1220 desc_ptr = ctx->abi.push_constants;
1221 base_offset = pipeline_layout->push_constant_size + 16 * idx;
1222 stride = LLVMConstInt(ctx->ac.i32, 16, false);
1223 } else
1224 stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
1225
1226 offset = ac_build_imad(&ctx->ac, index, stride,
1227 LLVMConstInt(ctx->ac.i32, base_offset, false));
1228
1229 desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
1230 desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
1231 LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1232
1233 return desc_ptr;
1234 }
1235
1236
1237 /* The offchip buffer layout for TCS->TES is
1238 *
1239 * - attribute 0 of patch 0 vertex 0
1240 * - attribute 0 of patch 0 vertex 1
1241 * - attribute 0 of patch 0 vertex 2
1242 * ...
1243 * - attribute 0 of patch 1 vertex 0
1244 * - attribute 0 of patch 1 vertex 1
1245 * ...
1246 * - attribute 1 of patch 0 vertex 0
1247 * - attribute 1 of patch 0 vertex 1
1248 * ...
1249 * - per patch attribute 0 of patch 0
1250 * - per patch attribute 0 of patch 1
1251 * ...
1252 *
1253 * Note that every attribute has 4 components.
1254 */
1255 static LLVMValueRef get_non_vertex_index_offset(struct radv_shader_context *ctx)
1256 {
1257 uint32_t num_patches = ctx->tcs_num_patches;
1258 uint32_t num_tcs_outputs;
1259 if (ctx->stage == MESA_SHADER_TESS_CTRL)
1260 num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
1261 else
1262 num_tcs_outputs = ctx->options->key.tes.tcs_num_outputs;
1263
1264 uint32_t output_vertex_size = num_tcs_outputs * 16;
1265 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
1266
1267 return LLVMConstInt(ctx->ac.i32, pervertex_output_patch_size * num_patches, false);
1268 }
1269
1270 static LLVMValueRef calc_param_stride(struct radv_shader_context *ctx,
1271 LLVMValueRef vertex_index)
1272 {
1273 LLVMValueRef param_stride;
1274 if (vertex_index)
1275 param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch * ctx->tcs_num_patches, false);
1276 else
1277 param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_num_patches, false);
1278 return param_stride;
1279 }
1280
1281 static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
1282 LLVMValueRef vertex_index,
1283 LLVMValueRef param_index)
1284 {
1285 LLVMValueRef base_addr;
1286 LLVMValueRef param_stride, constant16;
1287 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
1288 LLVMValueRef vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch, false);
1289 constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
1290 param_stride = calc_param_stride(ctx, vertex_index);
1291 if (vertex_index) {
1292 base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
1293 vertices_per_patch, vertex_index);
1294 } else {
1295 base_addr = rel_patch_id;
1296 }
1297
1298 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1299 LLVMBuildMul(ctx->ac.builder, param_index,
1300 param_stride, ""), "");
1301
1302 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
1303
1304 if (!vertex_index) {
1305 LLVMValueRef patch_data_offset = get_non_vertex_index_offset(ctx);
1306
1307 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1308 patch_data_offset, "");
1309 }
1310 return base_addr;
1311 }
1312
1313 static LLVMValueRef get_tcs_tes_buffer_address_params(struct radv_shader_context *ctx,
1314 unsigned param,
1315 unsigned const_index,
1316 bool is_compact,
1317 LLVMValueRef vertex_index,
1318 LLVMValueRef indir_index)
1319 {
1320 LLVMValueRef param_index;
1321
1322 if (indir_index)
1323 param_index = LLVMBuildAdd(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, param, false),
1324 indir_index, "");
1325 else {
1326 if (const_index && !is_compact)
1327 param += const_index;
1328 param_index = LLVMConstInt(ctx->ac.i32, param, false);
1329 }
1330 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
1331 }
1332
1333 static LLVMValueRef
1334 get_dw_address(struct radv_shader_context *ctx,
1335 LLVMValueRef dw_addr,
1336 unsigned param,
1337 unsigned const_index,
1338 bool compact_const_index,
1339 LLVMValueRef vertex_index,
1340 LLVMValueRef stride,
1341 LLVMValueRef indir_index)
1342
1343 {
1344
1345 if (vertex_index) {
1346 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1347 LLVMBuildMul(ctx->ac.builder,
1348 vertex_index,
1349 stride, ""), "");
1350 }
1351
1352 if (indir_index)
1353 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1354 LLVMBuildMul(ctx->ac.builder, indir_index,
1355 LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
1356 else if (const_index && !compact_const_index)
1357 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1358 LLVMConstInt(ctx->ac.i32, const_index * 4, false), "");
1359
1360 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1361 LLVMConstInt(ctx->ac.i32, param * 4, false), "");
1362
1363 if (const_index && compact_const_index)
1364 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1365 LLVMConstInt(ctx->ac.i32, const_index, false), "");
1366 return dw_addr;
1367 }
1368
1369 static LLVMValueRef
1370 load_tcs_varyings(struct ac_shader_abi *abi,
1371 LLVMTypeRef type,
1372 LLVMValueRef vertex_index,
1373 LLVMValueRef indir_index,
1374 unsigned const_index,
1375 unsigned location,
1376 unsigned driver_location,
1377 unsigned component,
1378 unsigned num_components,
1379 bool is_patch,
1380 bool is_compact,
1381 bool load_input)
1382 {
1383 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1384 LLVMValueRef dw_addr, stride;
1385 LLVMValueRef value[4], result;
1386 unsigned param = shader_io_get_unique_index(location);
1387
1388 if (load_input) {
1389 uint32_t input_vertex_size = (ctx->tcs_num_inputs * 16) / 4;
1390 stride = LLVMConstInt(ctx->ac.i32, input_vertex_size, false);
1391 dw_addr = get_tcs_in_current_patch_offset(ctx);
1392 } else {
1393 if (!is_patch) {
1394 stride = get_tcs_out_vertex_stride(ctx);
1395 dw_addr = get_tcs_out_current_patch_offset(ctx);
1396 } else {
1397 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1398 stride = NULL;
1399 }
1400 }
1401
1402 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1403 indir_index);
1404
1405 for (unsigned i = 0; i < num_components + component; i++) {
1406 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1407 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1408 ctx->ac.i32_1, "");
1409 }
1410 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1411 return result;
1412 }
1413
1414 static void
1415 store_tcs_output(struct ac_shader_abi *abi,
1416 const nir_variable *var,
1417 LLVMValueRef vertex_index,
1418 LLVMValueRef param_index,
1419 unsigned const_index,
1420 LLVMValueRef src,
1421 unsigned writemask)
1422 {
1423 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1424 const unsigned location = var->data.location;
1425 const unsigned component = var->data.location_frac;
1426 const bool is_patch = var->data.patch;
1427 const bool is_compact = var->data.compact;
1428 LLVMValueRef dw_addr;
1429 LLVMValueRef stride = NULL;
1430 LLVMValueRef buf_addr = NULL;
1431 unsigned param;
1432 bool store_lds = true;
1433
1434 if (is_patch) {
1435 if (!(ctx->tcs_patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
1436 store_lds = false;
1437 } else {
1438 if (!(ctx->tcs_outputs_read & (1ULL << location)))
1439 store_lds = false;
1440 }
1441
1442 param = shader_io_get_unique_index(location);
1443 if (location == VARYING_SLOT_CLIP_DIST0 &&
1444 is_compact && const_index > 3) {
1445 const_index -= 3;
1446 param++;
1447 }
1448
1449 if (!is_patch) {
1450 stride = get_tcs_out_vertex_stride(ctx);
1451 dw_addr = get_tcs_out_current_patch_offset(ctx);
1452 } else {
1453 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1454 }
1455
1456 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1457 param_index);
1458 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
1459 vertex_index, param_index);
1460
1461 bool is_tess_factor = false;
1462 if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
1463 location == VARYING_SLOT_TESS_LEVEL_OUTER)
1464 is_tess_factor = true;
1465
1466 unsigned base = is_compact ? const_index : 0;
1467 for (unsigned chan = 0; chan < 8; chan++) {
1468 if (!(writemask & (1 << chan)))
1469 continue;
1470 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1471 value = ac_to_integer(&ctx->ac, value);
1472 value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
1473
1474 if (store_lds || is_tess_factor) {
1475 LLVMValueRef dw_addr_chan =
1476 LLVMBuildAdd(ctx->ac.builder, dw_addr,
1477 LLVMConstInt(ctx->ac.i32, chan, false), "");
1478 ac_lds_store(&ctx->ac, dw_addr_chan, value);
1479 }
1480
1481 if (!is_tess_factor && writemask != 0xF)
1482 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
1483 buf_addr, ctx->oc_lds,
1484 4 * (base + chan), 1, 0, true, false);
1485 }
1486
1487 if (writemask == 0xF) {
1488 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
1489 buf_addr, ctx->oc_lds,
1490 (base * 4), 1, 0, true, false);
1491 }
1492 }
1493
1494 static LLVMValueRef
1495 load_tes_input(struct ac_shader_abi *abi,
1496 LLVMTypeRef type,
1497 LLVMValueRef vertex_index,
1498 LLVMValueRef param_index,
1499 unsigned const_index,
1500 unsigned location,
1501 unsigned driver_location,
1502 unsigned component,
1503 unsigned num_components,
1504 bool is_patch,
1505 bool is_compact,
1506 bool load_input)
1507 {
1508 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1509 LLVMValueRef buf_addr;
1510 LLVMValueRef result;
1511 unsigned param = shader_io_get_unique_index(location);
1512
1513 if (location == VARYING_SLOT_CLIP_DIST0 && is_compact && const_index > 3) {
1514 const_index -= 3;
1515 param++;
1516 }
1517
1518 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
1519 is_compact, vertex_index, param_index);
1520
1521 LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, component * 4, false);
1522 buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");
1523
1524 result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
1525 buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
1526 result = ac_trim_vector(&ctx->ac, result, num_components);
1527 return result;
1528 }
1529
1530 static LLVMValueRef
1531 load_gs_input(struct ac_shader_abi *abi,
1532 unsigned location,
1533 unsigned driver_location,
1534 unsigned component,
1535 unsigned num_components,
1536 unsigned vertex_index,
1537 unsigned const_index,
1538 LLVMTypeRef type)
1539 {
1540 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1541 LLVMValueRef vtx_offset;
1542 unsigned param, vtx_offset_param;
1543 LLVMValueRef value[4], result;
1544
1545 vtx_offset_param = vertex_index;
1546 assert(vtx_offset_param < 6);
1547 vtx_offset = LLVMBuildMul(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
1548 LLVMConstInt(ctx->ac.i32, 4, false), "");
1549
1550 param = shader_io_get_unique_index(location);
1551
1552 for (unsigned i = component; i < num_components + component; i++) {
1553 if (ctx->ac.chip_class >= GFX9) {
1554 LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
1555 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1556 LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
1557 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1558 } else {
1559 LLVMValueRef soffset =
1560 LLVMConstInt(ctx->ac.i32,
1561 (param * 4 + i + const_index) * 256,
1562 false);
1563
1564 value[i] = ac_build_buffer_load(&ctx->ac,
1565 ctx->esgs_ring, 1,
1566 ctx->ac.i32_0,
1567 vtx_offset, soffset,
1568 0, 1, 0, true, false);
1569 }
1570
1571 if (ac_get_type_size(type) == 2) {
1572 value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], ctx->ac.i32, "");
1573 value[i] = LLVMBuildTrunc(ctx->ac.builder, value[i], ctx->ac.i16, "");
1574 }
1575 value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], type, "");
1576 }
1577 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1578 result = ac_to_integer(&ctx->ac, result);
1579 return result;
1580 }
1581
1582
1583 static void radv_emit_kill(struct ac_shader_abi *abi, LLVMValueRef visible)
1584 {
1585 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1586 ac_build_kill_if_false(&ctx->ac, visible);
1587 }
1588
1589 static LLVMValueRef lookup_interp_param(struct ac_shader_abi *abi,
1590 enum glsl_interp_mode interp, unsigned location)
1591 {
1592 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1593
1594 switch (interp) {
1595 case INTERP_MODE_FLAT:
1596 default:
1597 return NULL;
1598 case INTERP_MODE_SMOOTH:
1599 case INTERP_MODE_NONE:
1600 if (location == INTERP_CENTER)
1601 return ctx->persp_center;
1602 else if (location == INTERP_CENTROID)
1603 return ctx->persp_centroid;
1604 else if (location == INTERP_SAMPLE)
1605 return ctx->persp_sample;
1606 break;
1607 case INTERP_MODE_NOPERSPECTIVE:
1608 if (location == INTERP_CENTER)
1609 return ctx->linear_center;
1610 else if (location == INTERP_CENTROID)
1611 return ctx->linear_centroid;
1612 else if (location == INTERP_SAMPLE)
1613 return ctx->linear_sample;
1614 break;
1615 }
1616 return NULL;
1617 }
1618
1619 static uint32_t
1620 radv_get_sample_pos_offset(uint32_t num_samples)
1621 {
1622 uint32_t sample_pos_offset = 0;
1623
1624 switch (num_samples) {
1625 case 2:
1626 sample_pos_offset = 1;
1627 break;
1628 case 4:
1629 sample_pos_offset = 3;
1630 break;
1631 case 8:
1632 sample_pos_offset = 7;
1633 break;
1634 case 16:
1635 sample_pos_offset = 15;
1636 break;
1637 default:
1638 break;
1639 }
1640 return sample_pos_offset;
1641 }
1642
1643 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
1644 LLVMValueRef sample_id)
1645 {
1646 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1647
1648 LLVMValueRef result;
1649 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
1650
1651 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
1652 ac_array_in_const_addr_space(ctx->ac.v2f32), "");
1653
1654 uint32_t sample_pos_offset =
1655 radv_get_sample_pos_offset(ctx->options->key.fs.num_samples);
1656
1657 sample_id =
1658 LLVMBuildAdd(ctx->ac.builder, sample_id,
1659 LLVMConstInt(ctx->ac.i32, sample_pos_offset, false), "");
1660 result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);
1661
1662 return result;
1663 }
1664
1665
1666 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
1667 {
1668 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1669 uint8_t log2_ps_iter_samples;
1670
1671 if (ctx->shader_info->info.ps.force_persample) {
1672 log2_ps_iter_samples =
1673 util_logbase2(ctx->options->key.fs.num_samples);
1674 } else {
1675 log2_ps_iter_samples = ctx->options->key.fs.log2_ps_iter_samples;
1676 }
1677
1678 /* The bit pattern matches that used by fixed function fragment
1679 * processing. */
1680 static const uint16_t ps_iter_masks[] = {
1681 0xffff, /* not used */
1682 0x5555,
1683 0x1111,
1684 0x0101,
1685 0x0001,
1686 };
1687 assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
1688
1689 uint32_t ps_iter_mask = ps_iter_masks[log2_ps_iter_samples];
1690
1691 LLVMValueRef result, sample_id;
1692 sample_id = ac_unpack_param(&ctx->ac, abi->ancillary, 8, 4);
1693 sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, ps_iter_mask, false), sample_id, "");
1694 result = LLVMBuildAnd(ctx->ac.builder, sample_id, abi->sample_coverage, "");
1695 return result;
1696 }
1697
1698
1699 static void
1700 visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
1701 {
1702 LLVMValueRef gs_next_vertex;
1703 LLVMValueRef can_emit;
1704 unsigned offset = 0;
1705 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1706
1707 /* Write vertex attribute values to GSVS ring */
1708 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
1709 ctx->gs_next_vertex[stream],
1710 "");
1711
1712 /* If this thread has already emitted the declared maximum number of
1713 * vertices, kill it: excessive vertex emissions are not supposed to
1714 * have any effect, and GS threads have no externally observable
1715 * effects other than emitting vertices.
1716 */
1717 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
1718 LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
1719 ac_build_kill_if_false(&ctx->ac, can_emit);
1720
1721 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
1722 unsigned output_usage_mask =
1723 ctx->shader_info->info.gs.output_usage_mask[i];
1724 uint8_t output_stream =
1725 ctx->shader_info->info.gs.output_streams[i];
1726 LLVMValueRef *out_ptr = &addrs[i * 4];
1727 int length = util_last_bit(output_usage_mask);
1728
1729 if (!(ctx->output_mask & (1ull << i)) ||
1730 output_stream != stream)
1731 continue;
1732
1733 for (unsigned j = 0; j < length; j++) {
1734 if (!(output_usage_mask & (1 << j)))
1735 continue;
1736
1737 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
1738 out_ptr[j], "");
1739 LLVMValueRef voffset =
1740 LLVMConstInt(ctx->ac.i32, offset *
1741 ctx->gs_max_out_vertices, false);
1742
1743 offset++;
1744
1745 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
1746 voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
1747
1748 out_val = ac_to_integer(&ctx->ac, out_val);
1749 out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
1750
1751 ac_build_buffer_store_dword(&ctx->ac,
1752 ctx->gsvs_ring[stream],
1753 out_val, 1,
1754 voffset, ctx->gs2vs_offset, 0,
1755 1, 1, true, true);
1756 }
1757 }
1758
1759 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
1760 ctx->ac.i32_1, "");
1761 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
1762
1763 ac_build_sendmsg(&ctx->ac,
1764 AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
1765 ctx->gs_wave_id);
1766 }
1767
1768 static void
1769 visit_end_primitive(struct ac_shader_abi *abi, unsigned stream)
1770 {
1771 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1772 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8), ctx->gs_wave_id);
1773 }
1774
1775 static LLVMValueRef
1776 load_tess_coord(struct ac_shader_abi *abi)
1777 {
1778 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1779
1780 LLVMValueRef coord[4] = {
1781 ctx->tes_u,
1782 ctx->tes_v,
1783 ctx->ac.f32_0,
1784 ctx->ac.f32_0,
1785 };
1786
1787 if (ctx->tes_primitive_mode == GL_TRIANGLES)
1788 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1789 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
1790
1791 return ac_build_gather_values(&ctx->ac, coord, 3);
1792 }
1793
1794 static LLVMValueRef
1795 load_patch_vertices_in(struct ac_shader_abi *abi)
1796 {
1797 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1798 return LLVMConstInt(ctx->ac.i32, ctx->options->key.tcs.input_vertices, false);
1799 }
1800
1801
1802 static LLVMValueRef radv_load_base_vertex(struct ac_shader_abi *abi)
1803 {
1804 return abi->base_vertex;
1805 }
1806
1807 static LLVMValueRef radv_load_ssbo(struct ac_shader_abi *abi,
1808 LLVMValueRef buffer_ptr, bool write)
1809 {
1810 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1811 LLVMValueRef result;
1812
1813 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1814
1815 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1816 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1817
1818 return result;
1819 }
1820
1821 static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi, LLVMValueRef buffer_ptr)
1822 {
1823 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1824 LLVMValueRef result;
1825
1826 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1827
1828 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1829 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1830
1831 return result;
1832 }
1833
1834 static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
1835 unsigned descriptor_set,
1836 unsigned base_index,
1837 unsigned constant_index,
1838 LLVMValueRef index,
1839 enum ac_descriptor_type desc_type,
1840 bool image, bool write,
1841 bool bindless)
1842 {
1843 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1844 LLVMValueRef list = ctx->descriptor_sets[descriptor_set];
1845 struct radv_descriptor_set_layout *layout = ctx->options->layout->set[descriptor_set].layout;
1846 struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
1847 unsigned offset = binding->offset;
1848 unsigned stride = binding->size;
1849 unsigned type_size;
1850 LLVMBuilderRef builder = ctx->ac.builder;
1851 LLVMTypeRef type;
1852
1853 assert(base_index < layout->binding_count);
1854
1855 switch (desc_type) {
1856 case AC_DESC_IMAGE:
1857 type = ctx->ac.v8i32;
1858 type_size = 32;
1859 break;
1860 case AC_DESC_FMASK:
1861 type = ctx->ac.v8i32;
1862 offset += 32;
1863 type_size = 32;
1864 break;
1865 case AC_DESC_SAMPLER:
1866 type = ctx->ac.v4i32;
1867 if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1868 offset += 64;
1869
1870 type_size = 16;
1871 break;
1872 case AC_DESC_BUFFER:
1873 type = ctx->ac.v4i32;
1874 type_size = 16;
1875 break;
1876 default:
1877 unreachable("invalid desc_type\n");
1878 }
1879
1880 offset += constant_index * stride;
1881
1882 if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
1883 (!index || binding->immutable_samplers_equal)) {
1884 if (binding->immutable_samplers_equal)
1885 constant_index = 0;
1886
1887 const uint32_t *samplers = radv_immutable_samplers(layout, binding);
1888
1889 LLVMValueRef constants[] = {
1890 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
1891 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
1892 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
1893 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
1894 };
1895 return ac_build_gather_values(&ctx->ac, constants, 4);
1896 }
1897
1898 assert(stride % type_size == 0);
1899
1900 if (!index)
1901 index = ctx->ac.i32_0;
1902
1903 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
1904
1905 list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
1906 list = LLVMBuildPointerCast(builder, list,
1907 ac_array_in_const32_addr_space(type), "");
1908
1909 return ac_build_load_to_sgpr(&ctx->ac, list, index);
1910 }
1911
1912 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
1913 * so we may need to fix it up. */
1914 static LLVMValueRef
1915 adjust_vertex_fetch_alpha(struct radv_shader_context *ctx,
1916 unsigned adjustment,
1917 LLVMValueRef alpha)
1918 {
1919 if (adjustment == RADV_ALPHA_ADJUST_NONE)
1920 return alpha;
1921
1922 LLVMValueRef c30 = LLVMConstInt(ctx->ac.i32, 30, 0);
1923
1924 if (adjustment == RADV_ALPHA_ADJUST_SSCALED)
1925 alpha = LLVMBuildFPToUI(ctx->ac.builder, alpha, ctx->ac.i32, "");
1926 else
1927 alpha = ac_to_integer(&ctx->ac, alpha);
1928
1929 /* For the integer-like cases, do a natural sign extension.
1930 *
1931 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
1932 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
1933 * exponent.
1934 */
1935 alpha = LLVMBuildShl(ctx->ac.builder, alpha,
1936 adjustment == RADV_ALPHA_ADJUST_SNORM ?
1937 LLVMConstInt(ctx->ac.i32, 7, 0) : c30, "");
1938 alpha = LLVMBuildAShr(ctx->ac.builder, alpha, c30, "");
1939
1940 /* Convert back to the right type. */
1941 if (adjustment == RADV_ALPHA_ADJUST_SNORM) {
1942 LLVMValueRef clamp;
1943 LLVMValueRef neg_one = LLVMConstReal(ctx->ac.f32, -1.0);
1944 alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
1945 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, alpha, neg_one, "");
1946 alpha = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, alpha, "");
1947 } else if (adjustment == RADV_ALPHA_ADJUST_SSCALED) {
1948 alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
1949 }
1950
1951 return alpha;
1952 }
1953
1954 static void
1955 handle_vs_input_decl(struct radv_shader_context *ctx,
1956 struct nir_variable *variable)
1957 {
1958 LLVMValueRef t_list_ptr = ctx->vertex_buffers;
1959 LLVMValueRef t_offset;
1960 LLVMValueRef t_list;
1961 LLVMValueRef input;
1962 LLVMValueRef buffer_index;
1963 unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
1964 uint8_t input_usage_mask =
1965 ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
1966 unsigned num_channels = util_last_bit(input_usage_mask);
1967
1968 variable->data.driver_location = variable->data.location * 4;
1969
1970 enum glsl_base_type type = glsl_get_base_type(variable->type);
1971 for (unsigned i = 0; i < attrib_count; ++i) {
1972 LLVMValueRef output[4];
1973 unsigned attrib_index = variable->data.location + i - VERT_ATTRIB_GENERIC0;
1974
1975 if (ctx->options->key.vs.instance_rate_inputs & (1u << attrib_index)) {
1976 uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[attrib_index];
1977
1978 if (divisor) {
1979 buffer_index = ctx->abi.instance_id;
1980
1981 if (divisor != 1) {
1982 buffer_index = LLVMBuildUDiv(ctx->ac.builder, buffer_index,
1983 LLVMConstInt(ctx->ac.i32, divisor, 0), "");
1984 }
1985
1986 if (ctx->options->key.vs.as_ls) {
1987 ctx->shader_info->vs.vgpr_comp_cnt =
1988 MAX2(2, ctx->shader_info->vs.vgpr_comp_cnt);
1989 } else {
1990 ctx->shader_info->vs.vgpr_comp_cnt =
1991 MAX2(1, ctx->shader_info->vs.vgpr_comp_cnt);
1992 }
1993 } else {
1994 buffer_index = ctx->ac.i32_0;
1995 }
1996
1997 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.start_instance, buffer_index, "");
1998 } else
1999 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.vertex_id,
2000 ctx->abi.base_vertex, "");
2001 t_offset = LLVMConstInt(ctx->ac.i32, attrib_index, false);
2002
2003 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
2004
2005 input = ac_build_buffer_load_format(&ctx->ac, t_list,
2006 buffer_index,
2007 ctx->ac.i32_0,
2008 num_channels, false, true);
2009
2010 input = ac_build_expand_to_vec4(&ctx->ac, input, num_channels);
2011
2012 for (unsigned chan = 0; chan < 4; chan++) {
2013 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
2014 output[chan] = LLVMBuildExtractElement(ctx->ac.builder, input, llvm_chan, "");
2015 if (type == GLSL_TYPE_FLOAT16) {
2016 output[chan] = LLVMBuildBitCast(ctx->ac.builder, output[chan], ctx->ac.f32, "");
2017 output[chan] = LLVMBuildFPTrunc(ctx->ac.builder, output[chan], ctx->ac.f16, "");
2018 }
2019 }
2020
2021 unsigned alpha_adjust = (ctx->options->key.vs.alpha_adjust >> (attrib_index * 2)) & 3;
2022 output[3] = adjust_vertex_fetch_alpha(ctx, alpha_adjust, output[3]);
2023
2024 for (unsigned chan = 0; chan < 4; chan++) {
2025 output[chan] = ac_to_integer(&ctx->ac, output[chan]);
2026 if (type == GLSL_TYPE_UINT16 || type == GLSL_TYPE_INT16)
2027 output[chan] = LLVMBuildTrunc(ctx->ac.builder, output[chan], ctx->ac.i16, "");
2028
2029 ctx->inputs[ac_llvm_reg_index_soa(variable->data.location + i, chan)] = output[chan];
2030 }
2031 }
2032 }
2033
2034 static void interp_fs_input(struct radv_shader_context *ctx,
2035 unsigned attr,
2036 LLVMValueRef interp_param,
2037 LLVMValueRef prim_mask,
2038 LLVMValueRef result[4])
2039 {
2040 LLVMValueRef attr_number;
2041 unsigned chan;
2042 LLVMValueRef i, j;
2043 bool interp = !LLVMIsUndef(interp_param);
2044
2045 attr_number = LLVMConstInt(ctx->ac.i32, attr, false);
2046
2047 /* fs.constant returns the param from the middle vertex, so it's not
2048 * really useful for flat shading. It's meant to be used for custom
2049 * interpolation (but the intrinsic can't fetch from the other two
2050 * vertices).
2051 *
2052 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
2053 * to do the right thing. The only reason we use fs.constant is that
2054 * fs.interp cannot be used on integers, because they can be equal
2055 * to NaN.
2056 */
2057 if (interp) {
2058 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
2059 ctx->ac.v2f32, "");
2060
2061 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
2062 ctx->ac.i32_0, "");
2063 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
2064 ctx->ac.i32_1, "");
2065 }
2066
2067 for (chan = 0; chan < 4; chan++) {
2068 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
2069
2070 if (interp) {
2071 result[chan] = ac_build_fs_interp(&ctx->ac,
2072 llvm_chan,
2073 attr_number,
2074 prim_mask, i, j);
2075 } else {
2076 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
2077 LLVMConstInt(ctx->ac.i32, 2, false),
2078 llvm_chan,
2079 attr_number,
2080 prim_mask);
2081 result[chan] = LLVMBuildBitCast(ctx->ac.builder, result[chan], ctx->ac.i32, "");
2082 result[chan] = LLVMBuildTruncOrBitCast(ctx->ac.builder, result[chan], LLVMTypeOf(interp_param), "");
2083 }
2084 }
2085 }
2086
2087 static void
2088 handle_fs_input_decl(struct radv_shader_context *ctx,
2089 struct nir_variable *variable)
2090 {
2091 int idx = variable->data.location;
2092 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
2093 LLVMValueRef interp = NULL;
2094 uint64_t mask;
2095
2096 variable->data.driver_location = idx * 4;
2097 mask = ((1ull << attrib_count) - 1) << variable->data.location;
2098
2099 if (glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT) {
2100 unsigned interp_type;
2101 if (variable->data.sample)
2102 interp_type = INTERP_SAMPLE;
2103 else if (variable->data.centroid)
2104 interp_type = INTERP_CENTROID;
2105 else
2106 interp_type = INTERP_CENTER;
2107
2108 interp = lookup_interp_param(&ctx->abi, variable->data.interpolation, interp_type);
2109 }
2110 bool is_16bit = glsl_type_is_16bit(variable->type);
2111 LLVMTypeRef type = is_16bit ? ctx->ac.i16 : ctx->ac.i32;
2112 if (interp == NULL)
2113 interp = LLVMGetUndef(type);
2114
2115 for (unsigned i = 0; i < attrib_count; ++i)
2116 ctx->inputs[ac_llvm_reg_index_soa(idx + i, 0)] = interp;
2117
2118 if (idx == VARYING_SLOT_CLIP_DIST0) {
2119 /* Do not account for the number of components inside the array
2120 * of clip/cull distances because this might wrongly set other
2121 * bits like primitive ID or layer.
2122 */
2123 mask = 1ull << VARYING_SLOT_CLIP_DIST0;
2124 }
2125
2126 ctx->input_mask |= mask;
2127 }
2128
2129 static void
2130 handle_vs_inputs(struct radv_shader_context *ctx,
2131 struct nir_shader *nir) {
2132 nir_foreach_variable(variable, &nir->inputs)
2133 handle_vs_input_decl(ctx, variable);
2134 }
2135
2136 static void
2137 prepare_interp_optimize(struct radv_shader_context *ctx,
2138 struct nir_shader *nir)
2139 {
2140 bool uses_center = false;
2141 bool uses_centroid = false;
2142 nir_foreach_variable(variable, &nir->inputs) {
2143 if (glsl_get_base_type(glsl_without_array(variable->type)) != GLSL_TYPE_FLOAT ||
2144 variable->data.sample)
2145 continue;
2146
2147 if (variable->data.centroid)
2148 uses_centroid = true;
2149 else
2150 uses_center = true;
2151 }
2152
2153 if (uses_center && uses_centroid) {
2154 LLVMValueRef sel = LLVMBuildICmp(ctx->ac.builder, LLVMIntSLT, ctx->abi.prim_mask, ctx->ac.i32_0, "");
2155 ctx->persp_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->persp_center, ctx->persp_centroid, "");
2156 ctx->linear_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->linear_center, ctx->linear_centroid, "");
2157 }
2158 }
2159
2160 static void
2161 handle_fs_inputs(struct radv_shader_context *ctx,
2162 struct nir_shader *nir)
2163 {
2164 prepare_interp_optimize(ctx, nir);
2165
2166 nir_foreach_variable(variable, &nir->inputs)
2167 handle_fs_input_decl(ctx, variable);
2168
2169 unsigned index = 0;
2170
2171 if (ctx->shader_info->info.ps.uses_input_attachments ||
2172 ctx->shader_info->info.needs_multiview_view_index) {
2173 ctx->input_mask |= 1ull << VARYING_SLOT_LAYER;
2174 ctx->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)] = LLVMGetUndef(ctx->ac.i32);
2175 }
2176
2177 for (unsigned i = 0; i < RADEON_LLVM_MAX_INPUTS; ++i) {
2178 LLVMValueRef interp_param;
2179 LLVMValueRef *inputs = ctx->inputs +ac_llvm_reg_index_soa(i, 0);
2180
2181 if (!(ctx->input_mask & (1ull << i)))
2182 continue;
2183
2184 if (i >= VARYING_SLOT_VAR0 || i == VARYING_SLOT_PNTC ||
2185 i == VARYING_SLOT_PRIMITIVE_ID || i == VARYING_SLOT_LAYER) {
2186 interp_param = *inputs;
2187 interp_fs_input(ctx, index, interp_param, ctx->abi.prim_mask,
2188 inputs);
2189
2190 if (LLVMIsUndef(interp_param))
2191 ctx->shader_info->fs.flat_shaded_mask |= 1u << index;
2192 ++index;
2193 } else if (i == VARYING_SLOT_CLIP_DIST0) {
2194 int length = ctx->shader_info->info.ps.num_input_clips_culls;
2195
2196 for (unsigned j = 0; j < length; j += 4) {
2197 inputs = ctx->inputs + ac_llvm_reg_index_soa(i, j);
2198
2199 interp_param = *inputs;
2200 interp_fs_input(ctx, index, interp_param,
2201 ctx->abi.prim_mask, inputs);
2202 ++index;
2203 }
2204 } else if (i == VARYING_SLOT_POS) {
2205 for(int i = 0; i < 3; ++i)
2206 inputs[i] = ctx->abi.frag_pos[i];
2207
2208 inputs[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
2209 ctx->abi.frag_pos[3]);
2210 }
2211 }
2212 ctx->shader_info->fs.num_interp = index;
2213 ctx->shader_info->fs.input_mask = ctx->input_mask >> VARYING_SLOT_VAR0;
2214
2215 if (ctx->shader_info->info.needs_multiview_view_index)
2216 ctx->abi.view_index = ctx->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
2217 }
2218
2219 static void
2220 scan_shader_output_decl(struct radv_shader_context *ctx,
2221 struct nir_variable *variable,
2222 struct nir_shader *shader,
2223 gl_shader_stage stage)
2224 {
2225 int idx = variable->data.location + variable->data.index;
2226 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
2227 uint64_t mask_attribs;
2228
2229 variable->data.driver_location = idx * 4;
2230
2231 /* tess ctrl has it's own load/store paths for outputs */
2232 if (stage == MESA_SHADER_TESS_CTRL)
2233 return;
2234
2235 mask_attribs = ((1ull << attrib_count) - 1) << idx;
2236 if (stage == MESA_SHADER_VERTEX ||
2237 stage == MESA_SHADER_TESS_EVAL ||
2238 stage == MESA_SHADER_GEOMETRY) {
2239 if (idx == VARYING_SLOT_CLIP_DIST0) {
2240 if (stage == MESA_SHADER_VERTEX) {
2241 ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
2242 ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
2243 ctx->shader_info->vs.outinfo.cull_dist_mask <<= shader->info.clip_distance_array_size;
2244 }
2245 if (stage == MESA_SHADER_TESS_EVAL) {
2246 ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
2247 ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
2248 ctx->shader_info->tes.outinfo.cull_dist_mask <<= shader->info.clip_distance_array_size;
2249 }
2250
2251 mask_attribs = 1ull << idx;
2252 }
2253 }
2254
2255 ctx->output_mask |= mask_attribs;
2256 }
2257
2258
2259 /* Initialize arguments for the shader export intrinsic */
2260 static void
2261 si_llvm_init_export_args(struct radv_shader_context *ctx,
2262 LLVMValueRef *values,
2263 unsigned enabled_channels,
2264 unsigned target,
2265 struct ac_export_args *args)
2266 {
2267 /* Specify the channels that are enabled. */
2268 args->enabled_channels = enabled_channels;
2269
2270 /* Specify whether the EXEC mask represents the valid mask */
2271 args->valid_mask = 0;
2272
2273 /* Specify whether this is the last export */
2274 args->done = 0;
2275
2276 /* Specify the target we are exporting */
2277 args->target = target;
2278
2279 args->compr = false;
2280 args->out[0] = LLVMGetUndef(ctx->ac.f32);
2281 args->out[1] = LLVMGetUndef(ctx->ac.f32);
2282 args->out[2] = LLVMGetUndef(ctx->ac.f32);
2283 args->out[3] = LLVMGetUndef(ctx->ac.f32);
2284
2285 if (!values)
2286 return;
2287
2288 bool is_16bit = ac_get_type_size(LLVMTypeOf(values[0])) == 2;
2289 if (ctx->stage == MESA_SHADER_FRAGMENT) {
2290 unsigned index = target - V_008DFC_SQ_EXP_MRT;
2291 unsigned col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
2292 bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
2293 bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1;
2294 unsigned chan;
2295
2296 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
2297 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
2298 unsigned bits, bool hi) = NULL;
2299
2300 switch(col_format) {
2301 case V_028714_SPI_SHADER_ZERO:
2302 args->enabled_channels = 0; /* writemask */
2303 args->target = V_008DFC_SQ_EXP_NULL;
2304 break;
2305
2306 case V_028714_SPI_SHADER_32_R:
2307 args->enabled_channels = 1;
2308 args->out[0] = values[0];
2309 break;
2310
2311 case V_028714_SPI_SHADER_32_GR:
2312 args->enabled_channels = 0x3;
2313 args->out[0] = values[0];
2314 args->out[1] = values[1];
2315 break;
2316
2317 case V_028714_SPI_SHADER_32_AR:
2318 args->enabled_channels = 0x9;
2319 args->out[0] = values[0];
2320 args->out[3] = values[3];
2321 break;
2322
2323 case V_028714_SPI_SHADER_FP16_ABGR:
2324 args->enabled_channels = 0x5;
2325 packf = ac_build_cvt_pkrtz_f16;
2326 if (is_16bit) {
2327 for (unsigned chan = 0; chan < 4; chan++)
2328 values[chan] = LLVMBuildFPExt(ctx->ac.builder,
2329 values[chan],
2330 ctx->ac.f32, "");
2331 }
2332 break;
2333
2334 case V_028714_SPI_SHADER_UNORM16_ABGR:
2335 args->enabled_channels = 0x5;
2336 packf = ac_build_cvt_pknorm_u16;
2337 break;
2338
2339 case V_028714_SPI_SHADER_SNORM16_ABGR:
2340 args->enabled_channels = 0x5;
2341 packf = ac_build_cvt_pknorm_i16;
2342 break;
2343
2344 case V_028714_SPI_SHADER_UINT16_ABGR:
2345 args->enabled_channels = 0x5;
2346 packi = ac_build_cvt_pk_u16;
2347 if (is_16bit) {
2348 for (unsigned chan = 0; chan < 4; chan++)
2349 values[chan] = LLVMBuildZExt(ctx->ac.builder,
2350 values[chan],
2351 ctx->ac.i32, "");
2352 }
2353 break;
2354
2355 case V_028714_SPI_SHADER_SINT16_ABGR:
2356 args->enabled_channels = 0x5;
2357 packi = ac_build_cvt_pk_i16;
2358 if (is_16bit) {
2359 for (unsigned chan = 0; chan < 4; chan++)
2360 values[chan] = LLVMBuildSExt(ctx->ac.builder,
2361 values[chan],
2362 ctx->ac.i32, "");
2363 }
2364 break;
2365
2366 default:
2367 case V_028714_SPI_SHADER_32_ABGR:
2368 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2369 break;
2370 }
2371
2372 /* Pack f16 or norm_i16/u16. */
2373 if (packf) {
2374 for (chan = 0; chan < 2; chan++) {
2375 LLVMValueRef pack_args[2] = {
2376 values[2 * chan],
2377 values[2 * chan + 1]
2378 };
2379 LLVMValueRef packed;
2380
2381 packed = packf(&ctx->ac, pack_args);
2382 args->out[chan] = ac_to_float(&ctx->ac, packed);
2383 }
2384 args->compr = 1; /* COMPR flag */
2385 }
2386
2387 /* Pack i16/u16. */
2388 if (packi) {
2389 for (chan = 0; chan < 2; chan++) {
2390 LLVMValueRef pack_args[2] = {
2391 ac_to_integer(&ctx->ac, values[2 * chan]),
2392 ac_to_integer(&ctx->ac, values[2 * chan + 1])
2393 };
2394 LLVMValueRef packed;
2395
2396 packed = packi(&ctx->ac, pack_args,
2397 is_int8 ? 8 : is_int10 ? 10 : 16,
2398 chan == 1);
2399 args->out[chan] = ac_to_float(&ctx->ac, packed);
2400 }
2401 args->compr = 1; /* COMPR flag */
2402 }
2403 return;
2404 }
2405
2406 if (is_16bit) {
2407 for (unsigned chan = 0; chan < 4; chan++) {
2408 values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i16, "");
2409 args->out[chan] = LLVMBuildZExt(ctx->ac.builder, values[chan], ctx->ac.i32, "");
2410 }
2411 } else
2412 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2413
2414 for (unsigned i = 0; i < 4; ++i) {
2415 if (!(args->enabled_channels & (1 << i)))
2416 continue;
2417
2418 args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
2419 }
2420 }
2421
2422 static void
2423 radv_export_param(struct radv_shader_context *ctx, unsigned index,
2424 LLVMValueRef *values, unsigned enabled_channels)
2425 {
2426 struct ac_export_args args;
2427
2428 si_llvm_init_export_args(ctx, values, enabled_channels,
2429 V_008DFC_SQ_EXP_PARAM + index, &args);
2430 ac_build_export(&ctx->ac, &args);
2431 }
2432
2433 static LLVMValueRef
2434 radv_load_output(struct radv_shader_context *ctx, unsigned index, unsigned chan)
2435 {
2436 LLVMValueRef output =
2437 ctx->abi.outputs[ac_llvm_reg_index_soa(index, chan)];
2438
2439 return LLVMBuildLoad(ctx->ac.builder, output, "");
2440 }
2441
2442 static void
2443 handle_vs_outputs_post(struct radv_shader_context *ctx,
2444 bool export_prim_id, bool export_layer_id,
2445 struct radv_vs_output_info *outinfo)
2446 {
2447 uint32_t param_count = 0;
2448 unsigned target;
2449 unsigned pos_idx, num_pos_exports = 0;
2450 struct ac_export_args args, pos_args[4] = {};
2451 LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2452 int i;
2453
2454 if (ctx->options->key.has_multiview_view_index) {
2455 LLVMValueRef* tmp_out = &ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
2456 if(!*tmp_out) {
2457 for(unsigned i = 0; i < 4; ++i)
2458 ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, i)] =
2459 ac_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
2460 }
2461
2462 LLVMBuildStore(ctx->ac.builder, ac_to_float(&ctx->ac, ctx->abi.view_index), *tmp_out);
2463 ctx->output_mask |= 1ull << VARYING_SLOT_LAYER;
2464 }
2465
2466 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
2467 sizeof(outinfo->vs_output_param_offset));
2468
2469 if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
2470 unsigned output_usage_mask, length;
2471 LLVMValueRef slots[8];
2472 unsigned j;
2473
2474 if (ctx->stage == MESA_SHADER_VERTEX &&
2475 !ctx->is_gs_copy_shader) {
2476 output_usage_mask =
2477 ctx->shader_info->info.vs.output_usage_mask[VARYING_SLOT_CLIP_DIST0];
2478 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
2479 output_usage_mask =
2480 ctx->shader_info->info.tes.output_usage_mask[VARYING_SLOT_CLIP_DIST0];
2481 } else {
2482 assert(ctx->is_gs_copy_shader);
2483 output_usage_mask =
2484 ctx->shader_info->info.gs.output_usage_mask[VARYING_SLOT_CLIP_DIST0];
2485 }
2486
2487 length = util_last_bit(output_usage_mask);
2488
2489 i = VARYING_SLOT_CLIP_DIST0;
2490 for (j = 0; j < length; j++)
2491 slots[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2492
2493 for (i = length; i < 8; i++)
2494 slots[i] = LLVMGetUndef(ctx->ac.f32);
2495
2496 if (length > 4) {
2497 target = V_008DFC_SQ_EXP_POS + 3;
2498 si_llvm_init_export_args(ctx, &slots[4], 0xf, target, &args);
2499 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2500 &args, sizeof(args));
2501 }
2502
2503 target = V_008DFC_SQ_EXP_POS + 2;
2504 si_llvm_init_export_args(ctx, &slots[0], 0xf, target, &args);
2505 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2506 &args, sizeof(args));
2507
2508 /* Export the clip/cull distances values to the next stage. */
2509 radv_export_param(ctx, param_count, &slots[0], 0xf);
2510 outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST0] = param_count++;
2511 if (length > 4) {
2512 radv_export_param(ctx, param_count, &slots[4], 0xf);
2513 outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST1] = param_count++;
2514 }
2515 }
2516
2517 LLVMValueRef pos_values[4] = {ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_1};
2518 if (ctx->output_mask & (1ull << VARYING_SLOT_POS)) {
2519 for (unsigned j = 0; j < 4; j++)
2520 pos_values[j] = radv_load_output(ctx, VARYING_SLOT_POS, j);
2521 }
2522 si_llvm_init_export_args(ctx, pos_values, 0xf, V_008DFC_SQ_EXP_POS, &pos_args[0]);
2523
2524 if (ctx->output_mask & (1ull << VARYING_SLOT_PSIZ)) {
2525 outinfo->writes_pointsize = true;
2526 psize_value = radv_load_output(ctx, VARYING_SLOT_PSIZ, 0);
2527 }
2528
2529 if (ctx->output_mask & (1ull << VARYING_SLOT_LAYER)) {
2530 outinfo->writes_layer = true;
2531 layer_value = radv_load_output(ctx, VARYING_SLOT_LAYER, 0);
2532 }
2533
2534 if (ctx->output_mask & (1ull << VARYING_SLOT_VIEWPORT)) {
2535 outinfo->writes_viewport_index = true;
2536 viewport_index_value = radv_load_output(ctx, VARYING_SLOT_VIEWPORT, 0);
2537 }
2538
2539 if (outinfo->writes_pointsize ||
2540 outinfo->writes_layer ||
2541 outinfo->writes_viewport_index) {
2542 pos_args[1].enabled_channels = ((outinfo->writes_pointsize == true ? 1 : 0) |
2543 (outinfo->writes_layer == true ? 4 : 0));
2544 pos_args[1].valid_mask = 0;
2545 pos_args[1].done = 0;
2546 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2547 pos_args[1].compr = 0;
2548 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2549 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2550 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2551 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2552
2553 if (outinfo->writes_pointsize == true)
2554 pos_args[1].out[0] = psize_value;
2555 if (outinfo->writes_layer == true)
2556 pos_args[1].out[2] = layer_value;
2557 if (outinfo->writes_viewport_index == true) {
2558 if (ctx->options->chip_class >= GFX9) {
2559 /* GFX9 has the layer in out.z[10:0] and the viewport
2560 * index in out.z[19:16].
2561 */
2562 LLVMValueRef v = viewport_index_value;
2563 v = ac_to_integer(&ctx->ac, v);
2564 v = LLVMBuildShl(ctx->ac.builder, v,
2565 LLVMConstInt(ctx->ac.i32, 16, false),
2566 "");
2567 v = LLVMBuildOr(ctx->ac.builder, v,
2568 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2569
2570 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2571 pos_args[1].enabled_channels |= 1 << 2;
2572 } else {
2573 pos_args[1].out[3] = viewport_index_value;
2574 pos_args[1].enabled_channels |= 1 << 3;
2575 }
2576 }
2577 }
2578 for (i = 0; i < 4; i++) {
2579 if (pos_args[i].out[0])
2580 num_pos_exports++;
2581 }
2582
2583 pos_idx = 0;
2584 for (i = 0; i < 4; i++) {
2585 if (!pos_args[i].out[0])
2586 continue;
2587
2588 /* Specify the target we are exporting */
2589 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2590 if (pos_idx == num_pos_exports)
2591 pos_args[i].done = 1;
2592 ac_build_export(&ctx->ac, &pos_args[i]);
2593 }
2594
2595 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2596 LLVMValueRef values[4];
2597 if (!(ctx->output_mask & (1ull << i)))
2598 continue;
2599
2600 if (i != VARYING_SLOT_LAYER &&
2601 i != VARYING_SLOT_PRIMITIVE_ID &&
2602 i < VARYING_SLOT_VAR0)
2603 continue;
2604
2605 for (unsigned j = 0; j < 4; j++)
2606 values[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2607
2608 unsigned output_usage_mask;
2609
2610 if (ctx->stage == MESA_SHADER_VERTEX &&
2611 !ctx->is_gs_copy_shader) {
2612 output_usage_mask =
2613 ctx->shader_info->info.vs.output_usage_mask[i];
2614 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
2615 output_usage_mask =
2616 ctx->shader_info->info.tes.output_usage_mask[i];
2617 } else {
2618 assert(ctx->is_gs_copy_shader);
2619 output_usage_mask =
2620 ctx->shader_info->info.gs.output_usage_mask[i];
2621 }
2622
2623 radv_export_param(ctx, param_count, values, output_usage_mask);
2624
2625 outinfo->vs_output_param_offset[i] = param_count++;
2626 }
2627
2628 if (export_prim_id) {
2629 LLVMValueRef values[4];
2630
2631 values[0] = ctx->vs_prim_id;
2632 ctx->shader_info->vs.vgpr_comp_cnt = MAX2(2,
2633 ctx->shader_info->vs.vgpr_comp_cnt);
2634 for (unsigned j = 1; j < 4; j++)
2635 values[j] = ctx->ac.f32_0;
2636
2637 radv_export_param(ctx, param_count, values, 0x1);
2638
2639 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count++;
2640 outinfo->export_prim_id = true;
2641 }
2642
2643 if (export_layer_id && layer_value) {
2644 LLVMValueRef values[4];
2645
2646 values[0] = layer_value;
2647 for (unsigned j = 1; j < 4; j++)
2648 values[j] = ctx->ac.f32_0;
2649
2650 radv_export_param(ctx, param_count, values, 0x1);
2651
2652 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] = param_count++;
2653 }
2654
2655 outinfo->pos_exports = num_pos_exports;
2656 outinfo->param_exports = param_count;
2657 }
2658
2659 static void
2660 handle_es_outputs_post(struct radv_shader_context *ctx,
2661 struct radv_es_output_info *outinfo)
2662 {
2663 int j;
2664 uint64_t max_output_written = 0;
2665 LLVMValueRef lds_base = NULL;
2666
2667 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2668 unsigned output_usage_mask;
2669 int param_index;
2670 int length = 4;
2671
2672 if (!(ctx->output_mask & (1ull << i)))
2673 continue;
2674
2675 if (ctx->stage == MESA_SHADER_VERTEX) {
2676 output_usage_mask =
2677 ctx->shader_info->info.vs.output_usage_mask[i];
2678 } else {
2679 assert(ctx->stage == MESA_SHADER_TESS_EVAL);
2680 output_usage_mask =
2681 ctx->shader_info->info.tes.output_usage_mask[i];
2682 }
2683
2684 if (i == VARYING_SLOT_CLIP_DIST0)
2685 length = util_last_bit(output_usage_mask);
2686
2687 param_index = shader_io_get_unique_index(i);
2688
2689 max_output_written = MAX2(param_index + (length > 4), max_output_written);
2690 }
2691
2692 outinfo->esgs_itemsize = (max_output_written + 1) * 16;
2693
2694 if (ctx->ac.chip_class >= GFX9) {
2695 unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
2696 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2697 LLVMValueRef wave_idx = ac_unpack_param(&ctx->ac, ctx->merged_wave_info, 24, 4);
2698 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2699 LLVMBuildMul(ctx->ac.builder, wave_idx,
2700 LLVMConstInt(ctx->ac.i32, 64, false), ""), "");
2701 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2702 LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
2703 }
2704
2705 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2706 LLVMValueRef dw_addr = NULL;
2707 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2708 unsigned output_usage_mask;
2709 int param_index;
2710 int length = 4;
2711
2712 if (!(ctx->output_mask & (1ull << i)))
2713 continue;
2714
2715 if (ctx->stage == MESA_SHADER_VERTEX) {
2716 output_usage_mask =
2717 ctx->shader_info->info.vs.output_usage_mask[i];
2718 } else {
2719 assert(ctx->stage == MESA_SHADER_TESS_EVAL);
2720 output_usage_mask =
2721 ctx->shader_info->info.tes.output_usage_mask[i];
2722 }
2723
2724 if (i == VARYING_SLOT_CLIP_DIST0)
2725 length = util_last_bit(output_usage_mask);
2726
2727 param_index = shader_io_get_unique_index(i);
2728
2729 if (lds_base) {
2730 dw_addr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2731 LLVMConstInt(ctx->ac.i32, param_index * 4, false),
2732 "");
2733 }
2734
2735 for (j = 0; j < length; j++) {
2736 if (!(output_usage_mask & (1 << j)))
2737 continue;
2738
2739 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2740 out_val = ac_to_integer(&ctx->ac, out_val);
2741 out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
2742
2743 if (ctx->ac.chip_class >= GFX9) {
2744 LLVMValueRef dw_addr_offset =
2745 LLVMBuildAdd(ctx->ac.builder, dw_addr,
2746 LLVMConstInt(ctx->ac.i32,
2747 j, false), "");
2748
2749 ac_lds_store(&ctx->ac, dw_addr_offset, out_val);
2750 } else {
2751 ac_build_buffer_store_dword(&ctx->ac,
2752 ctx->esgs_ring,
2753 out_val, 1,
2754 NULL, ctx->es2gs_offset,
2755 (4 * param_index + j) * 4,
2756 1, 1, true, true);
2757 }
2758 }
2759 }
2760 }
2761
2762 static void
2763 handle_ls_outputs_post(struct radv_shader_context *ctx)
2764 {
2765 LLVMValueRef vertex_id = ctx->rel_auto_id;
2766 uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->info.vs.ls_outputs_written);
2767 LLVMValueRef vertex_dw_stride = LLVMConstInt(ctx->ac.i32, num_tcs_inputs * 4, false);
2768 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2769 vertex_dw_stride, "");
2770
2771 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2772 unsigned output_usage_mask =
2773 ctx->shader_info->info.vs.output_usage_mask[i];
2774 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2775 int length = 4;
2776
2777 if (!(ctx->output_mask & (1ull << i)))
2778 continue;
2779
2780 if (i == VARYING_SLOT_CLIP_DIST0)
2781 length = util_last_bit(output_usage_mask);
2782
2783 int param = shader_io_get_unique_index(i);
2784 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2785 LLVMConstInt(ctx->ac.i32, param * 4, false),
2786 "");
2787 for (unsigned j = 0; j < length; j++) {
2788 LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2789 value = ac_to_integer(&ctx->ac, value);
2790 value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
2791 ac_lds_store(&ctx->ac, dw_addr, value);
2792 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2793 }
2794 }
2795 }
2796
2797 static void
2798 write_tess_factors(struct radv_shader_context *ctx)
2799 {
2800 unsigned stride, outer_comps, inner_comps;
2801 struct ac_build_if_state if_ctx, inner_if_ctx;
2802 LLVMValueRef invocation_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 8, 5);
2803 LLVMValueRef rel_patch_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
2804 unsigned tess_inner_index = 0, tess_outer_index;
2805 LLVMValueRef lds_base, lds_inner = NULL, lds_outer, byteoffset, buffer;
2806 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2807 int i;
2808 ac_emit_barrier(&ctx->ac, ctx->stage);
2809
2810 switch (ctx->options->key.tcs.primitive_mode) {
2811 case GL_ISOLINES:
2812 stride = 2;
2813 outer_comps = 2;
2814 inner_comps = 0;
2815 break;
2816 case GL_TRIANGLES:
2817 stride = 4;
2818 outer_comps = 3;
2819 inner_comps = 1;
2820 break;
2821 case GL_QUADS:
2822 stride = 6;
2823 outer_comps = 4;
2824 inner_comps = 2;
2825 break;
2826 default:
2827 return;
2828 }
2829
2830 ac_nir_build_if(&if_ctx, ctx,
2831 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2832 invocation_id, ctx->ac.i32_0, ""));
2833
2834 lds_base = get_tcs_out_current_patch_data_offset(ctx);
2835
2836 if (inner_comps) {
2837 tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2838 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2839 LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), "");
2840 }
2841
2842 tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2843 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2844 LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), "");
2845
2846 for (i = 0; i < 4; i++) {
2847 inner[i] = LLVMGetUndef(ctx->ac.i32);
2848 outer[i] = LLVMGetUndef(ctx->ac.i32);
2849 }
2850
2851 // LINES reversal
2852 if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
2853 outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
2854 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2855 ctx->ac.i32_1, "");
2856 outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
2857 } else {
2858 for (i = 0; i < outer_comps; i++) {
2859 outer[i] = out[i] =
2860 ac_lds_load(&ctx->ac, lds_outer);
2861 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2862 ctx->ac.i32_1, "");
2863 }
2864 for (i = 0; i < inner_comps; i++) {
2865 inner[i] = out[outer_comps+i] =
2866 ac_lds_load(&ctx->ac, lds_inner);
2867 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_inner,
2868 ctx->ac.i32_1, "");
2869 }
2870 }
2871
2872 /* Convert the outputs to vectors for stores. */
2873 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2874 vec1 = NULL;
2875
2876 if (stride > 4)
2877 vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
2878
2879
2880 buffer = ctx->hs_ring_tess_factor;
2881 tf_base = ctx->tess_factor_offset;
2882 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2883 LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
2884 unsigned tf_offset = 0;
2885
2886 if (ctx->options->chip_class <= VI) {
2887 ac_nir_build_if(&inner_if_ctx, ctx,
2888 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2889 rel_patch_id, ctx->ac.i32_0, ""));
2890
2891 /* Store the dynamic HS control word. */
2892 ac_build_buffer_store_dword(&ctx->ac, buffer,
2893 LLVMConstInt(ctx->ac.i32, 0x80000000, false),
2894 1, ctx->ac.i32_0, tf_base,
2895 0, 1, 0, true, false);
2896 tf_offset += 4;
2897
2898 ac_nir_build_endif(&inner_if_ctx);
2899 }
2900
2901 /* Store the tessellation factors. */
2902 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2903 MIN2(stride, 4), byteoffset, tf_base,
2904 tf_offset, 1, 0, true, false);
2905 if (vec1)
2906 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2907 stride - 4, byteoffset, tf_base,
2908 16 + tf_offset, 1, 0, true, false);
2909
2910 //store to offchip for TES to read - only if TES reads them
2911 if (ctx->options->key.tcs.tes_reads_tess_factors) {
2912 LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
2913 LLVMValueRef tf_inner_offset;
2914 unsigned param_outer, param_inner;
2915
2916 param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2917 tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
2918 LLVMConstInt(ctx->ac.i32, param_outer, 0));
2919
2920 outer_vec = ac_build_gather_values(&ctx->ac, outer,
2921 util_next_power_of_two(outer_comps));
2922
2923 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
2924 outer_comps, tf_outer_offset,
2925 ctx->oc_lds, 0, 1, 0, true, false);
2926 if (inner_comps) {
2927 param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2928 tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
2929 LLVMConstInt(ctx->ac.i32, param_inner, 0));
2930
2931 inner_vec = inner_comps == 1 ? inner[0] :
2932 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2933 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
2934 inner_comps, tf_inner_offset,
2935 ctx->oc_lds, 0, 1, 0, true, false);
2936 }
2937 }
2938 ac_nir_build_endif(&if_ctx);
2939 }
2940
2941 static void
2942 handle_tcs_outputs_post(struct radv_shader_context *ctx)
2943 {
2944 write_tess_factors(ctx);
2945 }
2946
2947 static bool
2948 si_export_mrt_color(struct radv_shader_context *ctx,
2949 LLVMValueRef *color, unsigned index,
2950 struct ac_export_args *args)
2951 {
2952 /* Export */
2953 si_llvm_init_export_args(ctx, color, 0xf,
2954 V_008DFC_SQ_EXP_MRT + index, args);
2955 if (!args->enabled_channels)
2956 return false; /* unnecessary NULL export */
2957
2958 return true;
2959 }
2960
2961 static void
2962 radv_export_mrt_z(struct radv_shader_context *ctx,
2963 LLVMValueRef depth, LLVMValueRef stencil,
2964 LLVMValueRef samplemask)
2965 {
2966 struct ac_export_args args;
2967
2968 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
2969
2970 ac_build_export(&ctx->ac, &args);
2971 }
2972
2973 static void
2974 handle_fs_outputs_post(struct radv_shader_context *ctx)
2975 {
2976 unsigned index = 0;
2977 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2978 struct ac_export_args color_args[8];
2979
2980 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2981 LLVMValueRef values[4];
2982
2983 if (!(ctx->output_mask & (1ull << i)))
2984 continue;
2985
2986 if (i < FRAG_RESULT_DATA0)
2987 continue;
2988
2989 for (unsigned j = 0; j < 4; j++)
2990 values[j] = ac_to_float(&ctx->ac,
2991 radv_load_output(ctx, i, j));
2992
2993 bool ret = si_export_mrt_color(ctx, values,
2994 i - FRAG_RESULT_DATA0,
2995 &color_args[index]);
2996 if (ret)
2997 index++;
2998 }
2999
3000 /* Process depth, stencil, samplemask. */
3001 if (ctx->shader_info->info.ps.writes_z) {
3002 depth = ac_to_float(&ctx->ac,
3003 radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
3004 }
3005 if (ctx->shader_info->info.ps.writes_stencil) {
3006 stencil = ac_to_float(&ctx->ac,
3007 radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
3008 }
3009 if (ctx->shader_info->info.ps.writes_sample_mask) {
3010 samplemask = ac_to_float(&ctx->ac,
3011 radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
3012 }
3013
3014 /* Set the DONE bit on last non-null color export only if Z isn't
3015 * exported.
3016 */
3017 if (index > 0 &&
3018 !ctx->shader_info->info.ps.writes_z &&
3019 !ctx->shader_info->info.ps.writes_stencil &&
3020 !ctx->shader_info->info.ps.writes_sample_mask) {
3021 unsigned last = index - 1;
3022
3023 color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
3024 color_args[last].done = 1; /* DONE bit */
3025 }
3026
3027 /* Export PS outputs. */
3028 for (unsigned i = 0; i < index; i++)
3029 ac_build_export(&ctx->ac, &color_args[i]);
3030
3031 if (depth || stencil || samplemask)
3032 radv_export_mrt_z(ctx, depth, stencil, samplemask);
3033 else if (!index)
3034 ac_build_export_null(&ctx->ac);
3035 }
3036
3037 static void
3038 emit_gs_epilogue(struct radv_shader_context *ctx)
3039 {
3040 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
3041 }
3042
3043 static void
3044 handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
3045 LLVMValueRef *addrs)
3046 {
3047 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
3048
3049 switch (ctx->stage) {
3050 case MESA_SHADER_VERTEX:
3051 if (ctx->options->key.vs.as_ls)
3052 handle_ls_outputs_post(ctx);
3053 else if (ctx->options->key.vs.as_es)
3054 handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
3055 else
3056 handle_vs_outputs_post(ctx, ctx->options->key.vs.export_prim_id,
3057 ctx->options->key.vs.export_layer_id,
3058 &ctx->shader_info->vs.outinfo);
3059 break;
3060 case MESA_SHADER_FRAGMENT:
3061 handle_fs_outputs_post(ctx);
3062 break;
3063 case MESA_SHADER_GEOMETRY:
3064 emit_gs_epilogue(ctx);
3065 break;
3066 case MESA_SHADER_TESS_CTRL:
3067 handle_tcs_outputs_post(ctx);
3068 break;
3069 case MESA_SHADER_TESS_EVAL:
3070 if (ctx->options->key.tes.as_es)
3071 handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
3072 else
3073 handle_vs_outputs_post(ctx, ctx->options->key.tes.export_prim_id,
3074 ctx->options->key.tes.export_layer_id,
3075 &ctx->shader_info->tes.outinfo);
3076 break;
3077 default:
3078 break;
3079 }
3080 }
3081
3082 static void ac_llvm_finalize_module(struct radv_shader_context *ctx,
3083 LLVMPassManagerRef passmgr,
3084 const struct radv_nir_compiler_options *options)
3085 {
3086 LLVMRunPassManager(passmgr, ctx->ac.module);
3087 LLVMDisposeBuilder(ctx->ac.builder);
3088
3089 ac_llvm_context_dispose(&ctx->ac);
3090 }
3091
3092 static void
3093 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
3094 {
3095 struct radv_vs_output_info *outinfo;
3096
3097 switch (ctx->stage) {
3098 case MESA_SHADER_FRAGMENT:
3099 case MESA_SHADER_COMPUTE:
3100 case MESA_SHADER_TESS_CTRL:
3101 case MESA_SHADER_GEOMETRY:
3102 return;
3103 case MESA_SHADER_VERTEX:
3104 if (ctx->options->key.vs.as_ls ||
3105 ctx->options->key.vs.as_es)
3106 return;
3107 outinfo = &ctx->shader_info->vs.outinfo;
3108 break;
3109 case MESA_SHADER_TESS_EVAL:
3110 if (ctx->options->key.vs.as_es)
3111 return;
3112 outinfo = &ctx->shader_info->tes.outinfo;
3113 break;
3114 default:
3115 unreachable("Unhandled shader type");
3116 }
3117
3118 ac_optimize_vs_outputs(&ctx->ac,
3119 ctx->main_function,
3120 outinfo->vs_output_param_offset,
3121 VARYING_SLOT_MAX,
3122 &outinfo->param_exports);
3123 }
3124
3125 static void
3126 ac_setup_rings(struct radv_shader_context *ctx)
3127 {
3128 if (ctx->options->chip_class <= VI &&
3129 (ctx->stage == MESA_SHADER_GEOMETRY ||
3130 ctx->options->key.vs.as_es || ctx->options->key.tes.as_es)) {
3131 unsigned ring = ctx->stage == MESA_SHADER_GEOMETRY ? RING_ESGS_GS
3132 : RING_ESGS_VS;
3133 LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, ring, false);
3134
3135 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac,
3136 ctx->ring_offsets,
3137 offset);
3138 }
3139
3140 if (ctx->is_gs_copy_shader) {
3141 ctx->gsvs_ring[0] =
3142 ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
3143 LLVMConstInt(ctx->ac.i32,
3144 RING_GSVS_VS, false));
3145 }
3146
3147 if (ctx->stage == MESA_SHADER_GEOMETRY) {
3148 /* The conceptual layout of the GSVS ring is
3149 * v0c0 .. vLv0 v0c1 .. vLc1 ..
3150 * but the real memory layout is swizzled across
3151 * threads:
3152 * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
3153 * t16v0c0 ..
3154 * Override the buffer descriptor accordingly.
3155 */
3156 LLVMTypeRef v2i64 = LLVMVectorType(ctx->ac.i64, 2);
3157 uint64_t stream_offset = 0;
3158 unsigned num_records = 64;
3159 LLVMValueRef base_ring;
3160
3161 base_ring =
3162 ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
3163 LLVMConstInt(ctx->ac.i32,
3164 RING_GSVS_GS, false));
3165
3166 for (unsigned stream = 0; stream < 4; stream++) {
3167 unsigned num_components, stride;
3168 LLVMValueRef ring, tmp;
3169
3170 num_components =
3171 ctx->shader_info->info.gs.num_stream_output_components[stream];
3172
3173 if (!num_components)
3174 continue;
3175
3176 stride = 4 * num_components * ctx->gs_max_out_vertices;
3177
3178 /* Limit on the stride field for <= CIK. */
3179 assert(stride < (1 << 14));
3180
3181 ring = LLVMBuildBitCast(ctx->ac.builder,
3182 base_ring, v2i64, "");
3183 tmp = LLVMBuildExtractElement(ctx->ac.builder,
3184 ring, ctx->ac.i32_0, "");
3185 tmp = LLVMBuildAdd(ctx->ac.builder, tmp,
3186 LLVMConstInt(ctx->ac.i64,
3187 stream_offset, 0), "");
3188 ring = LLVMBuildInsertElement(ctx->ac.builder,
3189 ring, tmp, ctx->ac.i32_0, "");
3190
3191 stream_offset += stride * 64;
3192
3193 ring = LLVMBuildBitCast(ctx->ac.builder, ring,
3194 ctx->ac.v4i32, "");
3195
3196 tmp = LLVMBuildExtractElement(ctx->ac.builder, ring,
3197 ctx->ac.i32_1, "");
3198 tmp = LLVMBuildOr(ctx->ac.builder, tmp,
3199 LLVMConstInt(ctx->ac.i32,
3200 S_008F04_STRIDE(stride), false), "");
3201 ring = LLVMBuildInsertElement(ctx->ac.builder, ring, tmp,
3202 ctx->ac.i32_1, "");
3203
3204 ring = LLVMBuildInsertElement(ctx->ac.builder, ring,
3205 LLVMConstInt(ctx->ac.i32,
3206 num_records, false),
3207 LLVMConstInt(ctx->ac.i32, 2, false), "");
3208
3209 ctx->gsvs_ring[stream] = ring;
3210 }
3211 }
3212
3213 if (ctx->stage == MESA_SHADER_TESS_CTRL ||
3214 ctx->stage == MESA_SHADER_TESS_EVAL) {
3215 ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
3216 ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
3217 }
3218 }
3219
3220 static unsigned
3221 ac_nir_get_max_workgroup_size(enum chip_class chip_class,
3222 const struct nir_shader *nir)
3223 {
3224 switch (nir->info.stage) {
3225 case MESA_SHADER_TESS_CTRL:
3226 return chip_class >= CIK ? 128 : 64;
3227 case MESA_SHADER_GEOMETRY:
3228 return chip_class >= GFX9 ? 128 : 64;
3229 case MESA_SHADER_COMPUTE:
3230 break;
3231 default:
3232 return 0;
3233 }
3234
3235 unsigned max_workgroup_size = nir->info.cs.local_size[0] *
3236 nir->info.cs.local_size[1] *
3237 nir->info.cs.local_size[2];
3238 return max_workgroup_size;
3239 }
3240
3241 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3242 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context *ctx)
3243 {
3244 LLVMValueRef count = ac_unpack_param(&ctx->ac, ctx->merged_wave_info, 8, 8);
3245 LLVMValueRef hs_empty = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, count,
3246 ctx->ac.i32_0, "");
3247 ctx->abi.instance_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->rel_auto_id, ctx->abi.instance_id, "");
3248 ctx->rel_auto_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_rel_ids, ctx->rel_auto_id, "");
3249 ctx->abi.vertex_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_patch_id, ctx->abi.vertex_id, "");
3250 }
3251
3252 static void prepare_gs_input_vgprs(struct radv_shader_context *ctx)
3253 {
3254 for(int i = 5; i >= 0; --i) {
3255 ctx->gs_vtx_offset[i] = ac_unpack_param(&ctx->ac, ctx->gs_vtx_offset[i & ~1],
3256 (i & 1) * 16, 16);
3257 }
3258
3259 ctx->gs_wave_id = ac_unpack_param(&ctx->ac, ctx->merged_wave_info, 16, 8);
3260 }
3261
3262
3263 static
3264 LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
3265 struct nir_shader *const *shaders,
3266 int shader_count,
3267 struct radv_shader_variant_info *shader_info,
3268 const struct radv_nir_compiler_options *options)
3269 {
3270 struct radv_shader_context ctx = {0};
3271 unsigned i;
3272 ctx.options = options;
3273 ctx.shader_info = shader_info;
3274
3275 ac_llvm_context_init(&ctx.ac, options->chip_class, options->family);
3276 ctx.context = ctx.ac.context;
3277 ctx.ac.module = ac_create_module(ac_llvm->tm, ctx.context);
3278
3279 enum ac_float_mode float_mode =
3280 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3281 AC_FLOAT_MODE_DEFAULT;
3282
3283 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3284
3285 memset(shader_info, 0, sizeof(*shader_info));
3286
3287 for(int i = 0; i < shader_count; ++i)
3288 radv_nir_shader_info_pass(shaders[i], options, &shader_info->info);
3289
3290 for (i = 0; i < RADV_UD_MAX_SETS; i++)
3291 shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
3292 for (i = 0; i < AC_UD_MAX_UD; i++)
3293 shader_info->user_sgprs_locs.shader_data[i].sgpr_idx = -1;
3294
3295 ctx.max_workgroup_size = 0;
3296 for (int i = 0; i < shader_count; ++i) {
3297 ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
3298 ac_nir_get_max_workgroup_size(ctx.options->chip_class,
3299 shaders[i]));
3300 }
3301
3302 create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
3303 shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
3304
3305 ctx.abi.inputs = &ctx.inputs[0];
3306 ctx.abi.emit_outputs = handle_shader_outputs_post;
3307 ctx.abi.emit_vertex = visit_emit_vertex;
3308 ctx.abi.load_ubo = radv_load_ubo;
3309 ctx.abi.load_ssbo = radv_load_ssbo;
3310 ctx.abi.load_sampler_desc = radv_get_sampler_desc;
3311 ctx.abi.load_resource = radv_load_resource;
3312 ctx.abi.clamp_shadow_reference = false;
3313 ctx.abi.gfx9_stride_size_workaround = ctx.ac.chip_class == GFX9;
3314
3315 if (shader_count >= 2)
3316 ac_init_exec_full_mask(&ctx.ac);
3317
3318 if (ctx.ac.chip_class == GFX9 &&
3319 shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
3320 ac_nir_fixup_ls_hs_input_vgprs(&ctx);
3321
3322 for(int i = 0; i < shader_count; ++i) {
3323 ctx.stage = shaders[i]->info.stage;
3324 ctx.output_mask = 0;
3325
3326 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3327 for (int i = 0; i < 4; i++) {
3328 ctx.gs_next_vertex[i] =
3329 ac_build_alloca(&ctx.ac, ctx.ac.i32, "");
3330 }
3331 ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
3332 ctx.abi.load_inputs = load_gs_input;
3333 ctx.abi.emit_primitive = visit_end_primitive;
3334 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3335 ctx.tcs_outputs_read = shaders[i]->info.outputs_read;
3336 ctx.tcs_patch_outputs_read = shaders[i]->info.patch_outputs_read;
3337 ctx.abi.load_tess_varyings = load_tcs_varyings;
3338 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3339 ctx.abi.store_tcs_outputs = store_tcs_output;
3340 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3341 if (shader_count == 1)
3342 ctx.tcs_num_inputs = ctx.options->key.tcs.num_inputs;
3343 else
3344 ctx.tcs_num_inputs = util_last_bit64(shader_info->info.vs.ls_outputs_written);
3345 ctx.tcs_num_patches = get_tcs_num_patches(&ctx);
3346 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
3347 ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
3348 ctx.abi.load_tess_varyings = load_tes_input;
3349 ctx.abi.load_tess_coord = load_tess_coord;
3350 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3351 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3352 ctx.tcs_num_patches = ctx.options->key.tes.num_patches;
3353 } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
3354 if (shader_info->info.vs.needs_instance_id) {
3355 if (ctx.options->key.vs.as_ls) {
3356 ctx.shader_info->vs.vgpr_comp_cnt =
3357 MAX2(2, ctx.shader_info->vs.vgpr_comp_cnt);
3358 } else {
3359 ctx.shader_info->vs.vgpr_comp_cnt =
3360 MAX2(1, ctx.shader_info->vs.vgpr_comp_cnt);
3361 }
3362 }
3363 ctx.abi.load_base_vertex = radv_load_base_vertex;
3364 } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
3365 shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
3366 ctx.abi.lookup_interp_param = lookup_interp_param;
3367 ctx.abi.load_sample_position = load_sample_position;
3368 ctx.abi.load_sample_mask_in = load_sample_mask_in;
3369 ctx.abi.emit_kill = radv_emit_kill;
3370 }
3371
3372 if (i)
3373 ac_emit_barrier(&ctx.ac, ctx.stage);
3374
3375 nir_foreach_variable(variable, &shaders[i]->outputs)
3376 scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
3377
3378 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3379 unsigned addclip = shaders[i]->info.clip_distance_array_size +
3380 shaders[i]->info.cull_distance_array_size > 4;
3381 ctx.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
3382 ctx.max_gsvs_emit_size = ctx.gsvs_vertex_size *
3383 shaders[i]->info.gs.vertices_out;
3384 }
3385
3386 ac_setup_rings(&ctx);
3387
3388 LLVMBasicBlockRef merge_block;
3389 if (shader_count >= 2) {
3390 LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
3391 LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3392 merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3393
3394 LLVMValueRef count = ac_unpack_param(&ctx.ac, ctx.merged_wave_info, 8 * i, 8);
3395 LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac);
3396 LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT,
3397 thread_id, count, "");
3398 LLVMBuildCondBr(ctx.ac.builder, cond, then_block, merge_block);
3399
3400 LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
3401 }
3402
3403 if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
3404 handle_fs_inputs(&ctx, shaders[i]);
3405 else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
3406 handle_vs_inputs(&ctx, shaders[i]);
3407 else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
3408 prepare_gs_input_vgprs(&ctx);
3409
3410 ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i]);
3411
3412 if (shader_count >= 2) {
3413 LLVMBuildBr(ctx.ac.builder, merge_block);
3414 LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
3415 }
3416
3417 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3418 shader_info->gs.gsvs_vertex_size = ctx.gsvs_vertex_size;
3419 shader_info->gs.max_gsvs_emit_size = ctx.max_gsvs_emit_size;
3420 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3421 shader_info->tcs.num_patches = ctx.tcs_num_patches;
3422 shader_info->tcs.lds_size = calculate_tess_lds_size(&ctx);
3423 }
3424 }
3425
3426 LLVMBuildRetVoid(ctx.ac.builder);
3427
3428 if (options->dump_preoptir)
3429 ac_dump_module(ctx.ac.module);
3430
3431 ac_llvm_finalize_module(&ctx, ac_llvm->passmgr, options);
3432
3433 if (shader_count == 1)
3434 ac_nir_eliminate_const_vs_outputs(&ctx);
3435
3436 if (options->dump_shader) {
3437 ctx.shader_info->private_mem_vgprs =
3438 ac_count_scratch_private_memory(ctx.main_function);
3439 }
3440
3441 return ctx.ac.module;
3442 }
3443
3444 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
3445 {
3446 unsigned *retval = (unsigned *)context;
3447 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
3448 char *description = LLVMGetDiagInfoDescription(di);
3449
3450 if (severity == LLVMDSError) {
3451 *retval = 1;
3452 fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n",
3453 description);
3454 }
3455
3456 LLVMDisposeMessage(description);
3457 }
3458
3459 static unsigned ac_llvm_compile(LLVMModuleRef M,
3460 struct ac_shader_binary *binary,
3461 struct ac_llvm_compiler *ac_llvm)
3462 {
3463 unsigned retval = 0;
3464 LLVMContextRef llvm_ctx;
3465
3466 /* Setup Diagnostic Handler*/
3467 llvm_ctx = LLVMGetModuleContext(M);
3468
3469 LLVMContextSetDiagnosticHandler(llvm_ctx, ac_diagnostic_handler,
3470 &retval);
3471
3472 /* Compile IR*/
3473 if (!radv_compile_to_binary(ac_llvm, M, binary))
3474 retval = 1;
3475 return retval;
3476 }
3477
3478 static void ac_compile_llvm_module(struct ac_llvm_compiler *ac_llvm,
3479 LLVMModuleRef llvm_module,
3480 struct ac_shader_binary *binary,
3481 struct ac_shader_config *config,
3482 struct radv_shader_variant_info *shader_info,
3483 gl_shader_stage stage,
3484 const struct radv_nir_compiler_options *options)
3485 {
3486 if (options->dump_shader)
3487 ac_dump_module(llvm_module);
3488
3489 memset(binary, 0, sizeof(*binary));
3490
3491 if (options->record_llvm_ir) {
3492 char *llvm_ir = LLVMPrintModuleToString(llvm_module);
3493 binary->llvm_ir_string = strdup(llvm_ir);
3494 LLVMDisposeMessage(llvm_ir);
3495 }
3496
3497 int v = ac_llvm_compile(llvm_module, binary, ac_llvm);
3498 if (v) {
3499 fprintf(stderr, "compile failed\n");
3500 }
3501
3502 if (options->dump_shader)
3503 fprintf(stderr, "disasm:\n%s\n", binary->disasm_string);
3504
3505 ac_shader_binary_read_config(binary, config, 0, options->supports_spill);
3506
3507 LLVMContextRef ctx = LLVMGetModuleContext(llvm_module);
3508 LLVMDisposeModule(llvm_module);
3509 LLVMContextDispose(ctx);
3510
3511 if (stage == MESA_SHADER_FRAGMENT) {
3512 shader_info->num_input_vgprs = 0;
3513 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
3514 shader_info->num_input_vgprs += 2;
3515 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
3516 shader_info->num_input_vgprs += 2;
3517 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
3518 shader_info->num_input_vgprs += 2;
3519 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
3520 shader_info->num_input_vgprs += 3;
3521 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
3522 shader_info->num_input_vgprs += 2;
3523 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
3524 shader_info->num_input_vgprs += 2;
3525 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
3526 shader_info->num_input_vgprs += 2;
3527 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
3528 shader_info->num_input_vgprs += 1;
3529 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
3530 shader_info->num_input_vgprs += 1;
3531 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
3532 shader_info->num_input_vgprs += 1;
3533 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
3534 shader_info->num_input_vgprs += 1;
3535 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
3536 shader_info->num_input_vgprs += 1;
3537 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr))
3538 shader_info->num_input_vgprs += 1;
3539 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr))
3540 shader_info->num_input_vgprs += 1;
3541 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
3542 shader_info->num_input_vgprs += 1;
3543 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
3544 shader_info->num_input_vgprs += 1;
3545 }
3546 config->num_vgprs = MAX2(config->num_vgprs, shader_info->num_input_vgprs);
3547
3548 /* +3 for scratch wave offset and VCC */
3549 config->num_sgprs = MAX2(config->num_sgprs,
3550 shader_info->num_input_sgprs + 3);
3551
3552 /* Enable 64-bit and 16-bit denormals, because there is no performance
3553 * cost.
3554 *
3555 * If denormals are enabled, all floating-point output modifiers are
3556 * ignored.
3557 *
3558 * Don't enable denormals for 32-bit floats, because:
3559 * - Floating-point output modifiers would be ignored by the hw.
3560 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3561 * have to stop using those.
3562 * - SI & CI would be very slow.
3563 */
3564 config->float_mode |= V_00B028_FP_64_DENORMS;
3565 }
3566
3567 static void
3568 ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_shader *nir, const struct radv_nir_compiler_options *options)
3569 {
3570 switch (nir->info.stage) {
3571 case MESA_SHADER_COMPUTE:
3572 for (int i = 0; i < 3; ++i)
3573 shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
3574 break;
3575 case MESA_SHADER_FRAGMENT:
3576 shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
3577 break;
3578 case MESA_SHADER_GEOMETRY:
3579 shader_info->gs.vertices_in = nir->info.gs.vertices_in;
3580 shader_info->gs.vertices_out = nir->info.gs.vertices_out;
3581 shader_info->gs.output_prim = nir->info.gs.output_primitive;
3582 shader_info->gs.invocations = nir->info.gs.invocations;
3583 break;
3584 case MESA_SHADER_TESS_EVAL:
3585 shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
3586 shader_info->tes.spacing = nir->info.tess.spacing;
3587 shader_info->tes.ccw = nir->info.tess.ccw;
3588 shader_info->tes.point_mode = nir->info.tess.point_mode;
3589 shader_info->tes.as_es = options->key.tes.as_es;
3590 break;
3591 case MESA_SHADER_TESS_CTRL:
3592 shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
3593 break;
3594 case MESA_SHADER_VERTEX:
3595 shader_info->vs.as_es = options->key.vs.as_es;
3596 shader_info->vs.as_ls = options->key.vs.as_ls;
3597 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3598 if (options->key.vs.as_ls)
3599 shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
3600 break;
3601 default:
3602 break;
3603 }
3604 }
3605
3606 void
3607 radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
3608 struct ac_shader_binary *binary,
3609 struct ac_shader_config *config,
3610 struct radv_shader_variant_info *shader_info,
3611 struct nir_shader *const *nir,
3612 int nir_count,
3613 const struct radv_nir_compiler_options *options)
3614 {
3615
3616 LLVMModuleRef llvm_module;
3617
3618 llvm_module = ac_translate_nir_to_llvm(ac_llvm, nir, nir_count, shader_info,
3619 options);
3620
3621 ac_compile_llvm_module(ac_llvm, llvm_module, binary, config, shader_info,
3622 nir[0]->info.stage, options);
3623
3624 for (int i = 0; i < nir_count; ++i)
3625 ac_fill_shader_info(shader_info, nir[i], options);
3626
3627 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3628 if (options->chip_class == GFX9) {
3629 if (nir_count == 2 &&
3630 nir[1]->info.stage == MESA_SHADER_GEOMETRY) {
3631 shader_info->gs.es_type = nir[0]->info.stage;
3632 }
3633 }
3634 }
3635
3636 static void
3637 ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
3638 {
3639 LLVMValueRef vtx_offset =
3640 LLVMBuildMul(ctx->ac.builder, ctx->abi.vertex_id,
3641 LLVMConstInt(ctx->ac.i32, 4, false), "");
3642 unsigned offset = 0;
3643
3644 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3645 unsigned output_usage_mask =
3646 ctx->shader_info->info.gs.output_usage_mask[i];
3647 int length = util_last_bit(output_usage_mask);
3648
3649 if (!(ctx->output_mask & (1ull << i)))
3650 continue;
3651
3652 for (unsigned j = 0; j < length; j++) {
3653 LLVMValueRef value, soffset;
3654
3655 if (!(output_usage_mask & (1 << j)))
3656 continue;
3657
3658 soffset = LLVMConstInt(ctx->ac.i32,
3659 offset *
3660 ctx->gs_max_out_vertices * 16 * 4, false);
3661
3662 offset++;
3663
3664 value = ac_build_buffer_load(&ctx->ac,
3665 ctx->gsvs_ring[0],
3666 1, ctx->ac.i32_0,
3667 vtx_offset, soffset,
3668 0, 1, 1, true, false);
3669
3670 LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
3671 if (ac_get_type_size(type) == 2) {
3672 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
3673 value = LLVMBuildTrunc(ctx->ac.builder, value, ctx->ac.i16, "");
3674 }
3675
3676 LLVMBuildStore(ctx->ac.builder,
3677 ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
3678 }
3679 }
3680 handle_vs_outputs_post(ctx, false, false, &ctx->shader_info->vs.outinfo);
3681 }
3682
3683 void
3684 radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
3685 struct nir_shader *geom_shader,
3686 struct ac_shader_binary *binary,
3687 struct ac_shader_config *config,
3688 struct radv_shader_variant_info *shader_info,
3689 const struct radv_nir_compiler_options *options)
3690 {
3691 struct radv_shader_context ctx = {0};
3692 ctx.options = options;
3693 ctx.shader_info = shader_info;
3694
3695 ac_llvm_context_init(&ctx.ac, options->chip_class, options->family);
3696 ctx.context = ctx.ac.context;
3697 ctx.ac.module = ac_create_module(ac_llvm->tm, ctx.context);
3698
3699 ctx.is_gs_copy_shader = true;
3700
3701 enum ac_float_mode float_mode =
3702 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3703 AC_FLOAT_MODE_DEFAULT;
3704
3705 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3706 ctx.stage = MESA_SHADER_VERTEX;
3707
3708 radv_nir_shader_info_pass(geom_shader, options, &shader_info->info);
3709
3710 create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
3711
3712 ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
3713 ac_setup_rings(&ctx);
3714
3715 nir_foreach_variable(variable, &geom_shader->outputs) {
3716 scan_shader_output_decl(&ctx, variable, geom_shader, MESA_SHADER_VERTEX);
3717 ac_handle_shader_output_decl(&ctx.ac, &ctx.abi, geom_shader,
3718 variable, MESA_SHADER_VERTEX);
3719 }
3720
3721 ac_gs_copy_shader_emit(&ctx);
3722
3723 LLVMBuildRetVoid(ctx.ac.builder);
3724
3725 ac_llvm_finalize_module(&ctx, ac_llvm->passmgr, options);
3726
3727 ac_compile_llvm_module(ac_llvm, ctx.ac.module, binary, config, shader_info,
3728 MESA_SHADER_VERTEX, options);
3729 }