8bf3ae0f150083d68867bfbbfcc631a26b53046d
[mesa.git] / src / amd / vulkan / radv_nir_to_llvm.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "radv_shader_helper.h"
31 #include "nir/nir.h"
32
33 #include <llvm-c/Core.h>
34 #include <llvm-c/TargetMachine.h>
35 #include <llvm-c/Transforms/Scalar.h>
36 #if HAVE_LLVM >= 0x0700
37 #include <llvm-c/Transforms/Utils.h>
38 #endif
39
40 #include "sid.h"
41 #include "gfx9d.h"
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_llvm_build.h"
45 #include "ac_shader_abi.h"
46 #include "ac_shader_util.h"
47 #include "ac_exp_param.h"
48
49 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
50
51 struct radv_shader_context {
52 struct ac_llvm_context ac;
53 const struct radv_nir_compiler_options *options;
54 struct radv_shader_variant_info *shader_info;
55 struct ac_shader_abi abi;
56
57 unsigned max_workgroup_size;
58 LLVMContextRef context;
59 LLVMValueRef main_function;
60
61 LLVMValueRef descriptor_sets[RADV_UD_MAX_SETS];
62 LLVMValueRef ring_offsets;
63
64 LLVMValueRef vertex_buffers;
65 LLVMValueRef rel_auto_id;
66 LLVMValueRef vs_prim_id;
67 LLVMValueRef es2gs_offset;
68
69 LLVMValueRef oc_lds;
70 LLVMValueRef merged_wave_info;
71 LLVMValueRef tess_factor_offset;
72 LLVMValueRef tes_rel_patch_id;
73 LLVMValueRef tes_u;
74 LLVMValueRef tes_v;
75
76 LLVMValueRef gs2vs_offset;
77 LLVMValueRef gs_wave_id;
78 LLVMValueRef gs_vtx_offset[6];
79
80 LLVMValueRef esgs_ring;
81 LLVMValueRef gsvs_ring;
82 LLVMValueRef hs_ring_tess_offchip;
83 LLVMValueRef hs_ring_tess_factor;
84
85 LLVMValueRef persp_sample, persp_center, persp_centroid;
86 LLVMValueRef linear_sample, linear_center, linear_centroid;
87
88 gl_shader_stage stage;
89
90 LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
91
92 uint64_t input_mask;
93 uint64_t output_mask;
94 uint8_t num_output_clips;
95 uint8_t num_output_culls;
96
97 bool is_gs_copy_shader;
98 LLVMValueRef gs_next_vertex;
99 unsigned gs_max_out_vertices;
100
101 unsigned tes_primitive_mode;
102
103 uint32_t tcs_patch_outputs_read;
104 uint64_t tcs_outputs_read;
105 uint32_t tcs_vertices_per_patch;
106 uint32_t tcs_num_inputs;
107 uint32_t tcs_num_patches;
108 uint32_t max_gsvs_emit_size;
109 uint32_t gsvs_vertex_size;
110 };
111
112 enum radeon_llvm_calling_convention {
113 RADEON_LLVM_AMDGPU_VS = 87,
114 RADEON_LLVM_AMDGPU_GS = 88,
115 RADEON_LLVM_AMDGPU_PS = 89,
116 RADEON_LLVM_AMDGPU_CS = 90,
117 RADEON_LLVM_AMDGPU_HS = 93,
118 };
119
120 static inline struct radv_shader_context *
121 radv_shader_context_from_abi(struct ac_shader_abi *abi)
122 {
123 struct radv_shader_context *ctx = NULL;
124 return container_of(abi, ctx, abi);
125 }
126
127 struct ac_build_if_state
128 {
129 struct radv_shader_context *ctx;
130 LLVMValueRef condition;
131 LLVMBasicBlockRef entry_block;
132 LLVMBasicBlockRef true_block;
133 LLVMBasicBlockRef false_block;
134 LLVMBasicBlockRef merge_block;
135 };
136
137 static LLVMBasicBlockRef
138 ac_build_insert_new_block(struct radv_shader_context *ctx, const char *name)
139 {
140 LLVMBasicBlockRef current_block;
141 LLVMBasicBlockRef next_block;
142 LLVMBasicBlockRef new_block;
143
144 /* get current basic block */
145 current_block = LLVMGetInsertBlock(ctx->ac.builder);
146
147 /* chqeck if there's another block after this one */
148 next_block = LLVMGetNextBasicBlock(current_block);
149 if (next_block) {
150 /* insert the new block before the next block */
151 new_block = LLVMInsertBasicBlockInContext(ctx->context, next_block, name);
152 }
153 else {
154 /* append new block after current block */
155 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
156 new_block = LLVMAppendBasicBlockInContext(ctx->context, function, name);
157 }
158 return new_block;
159 }
160
161 static void
162 ac_nir_build_if(struct ac_build_if_state *ifthen,
163 struct radv_shader_context *ctx,
164 LLVMValueRef condition)
165 {
166 LLVMBasicBlockRef block = LLVMGetInsertBlock(ctx->ac.builder);
167
168 memset(ifthen, 0, sizeof *ifthen);
169 ifthen->ctx = ctx;
170 ifthen->condition = condition;
171 ifthen->entry_block = block;
172
173 /* create endif/merge basic block for the phi functions */
174 ifthen->merge_block = ac_build_insert_new_block(ctx, "endif-block");
175
176 /* create/insert true_block before merge_block */
177 ifthen->true_block =
178 LLVMInsertBasicBlockInContext(ctx->context,
179 ifthen->merge_block,
180 "if-true-block");
181
182 /* successive code goes into the true block */
183 LLVMPositionBuilderAtEnd(ctx->ac.builder, ifthen->true_block);
184 }
185
186 /**
187 * End a conditional.
188 */
189 static void
190 ac_nir_build_endif(struct ac_build_if_state *ifthen)
191 {
192 LLVMBuilderRef builder = ifthen->ctx->ac.builder;
193
194 /* Insert branch to the merge block from current block */
195 LLVMBuildBr(builder, ifthen->merge_block);
196
197 /*
198 * Now patch in the various branch instructions.
199 */
200
201 /* Insert the conditional branch instruction at the end of entry_block */
202 LLVMPositionBuilderAtEnd(builder, ifthen->entry_block);
203 if (ifthen->false_block) {
204 /* we have an else clause */
205 LLVMBuildCondBr(builder, ifthen->condition,
206 ifthen->true_block, ifthen->false_block);
207 }
208 else {
209 /* no else clause */
210 LLVMBuildCondBr(builder, ifthen->condition,
211 ifthen->true_block, ifthen->merge_block);
212 }
213
214 /* Resume building code at end of the ifthen->merge_block */
215 LLVMPositionBuilderAtEnd(builder, ifthen->merge_block);
216 }
217
218
219 static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
220 {
221 switch (ctx->stage) {
222 case MESA_SHADER_TESS_CTRL:
223 return ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
224 case MESA_SHADER_TESS_EVAL:
225 return ctx->tes_rel_patch_id;
226 break;
227 default:
228 unreachable("Illegal stage");
229 }
230 }
231
232 static unsigned
233 get_tcs_num_patches(struct radv_shader_context *ctx)
234 {
235 unsigned num_tcs_input_cp = ctx->options->key.tcs.input_vertices;
236 unsigned num_tcs_output_cp = ctx->tcs_vertices_per_patch;
237 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
238 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
239 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
240 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
241 uint32_t output_vertex_size = num_tcs_outputs * 16;
242 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
243 uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
244 unsigned num_patches;
245 unsigned hardware_lds_size;
246
247 /* Ensure that we only need one wave per SIMD so we don't need to check
248 * resource usage. Also ensures that the number of tcs in and out
249 * vertices per threadgroup are at most 256.
250 */
251 num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
252 /* Make sure that the data fits in LDS. This assumes the shaders only
253 * use LDS for the inputs and outputs.
254 */
255 hardware_lds_size = ctx->options->chip_class >= CIK ? 65536 : 32768;
256 num_patches = MIN2(num_patches, hardware_lds_size / (input_patch_size + output_patch_size));
257 /* Make sure the output data fits in the offchip buffer */
258 num_patches = MIN2(num_patches, (ctx->options->tess_offchip_block_dw_size * 4) / output_patch_size);
259 /* Not necessary for correctness, but improves performance. The
260 * specific value is taken from the proprietary driver.
261 */
262 num_patches = MIN2(num_patches, 40);
263
264 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
265 if (ctx->options->chip_class == SI) {
266 unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
267 num_patches = MIN2(num_patches, one_wave);
268 }
269 return num_patches;
270 }
271
272 static unsigned
273 calculate_tess_lds_size(struct radv_shader_context *ctx)
274 {
275 unsigned num_tcs_input_cp = ctx->options->key.tcs.input_vertices;
276 unsigned num_tcs_output_cp;
277 unsigned num_tcs_outputs, num_tcs_patch_outputs;
278 unsigned input_vertex_size, output_vertex_size;
279 unsigned input_patch_size, output_patch_size;
280 unsigned pervertex_output_patch_size;
281 unsigned output_patch0_offset;
282 unsigned num_patches;
283 unsigned lds_size;
284
285 num_tcs_output_cp = ctx->tcs_vertices_per_patch;
286 num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
287 num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
288
289 input_vertex_size = ctx->tcs_num_inputs * 16;
290 output_vertex_size = num_tcs_outputs * 16;
291
292 input_patch_size = num_tcs_input_cp * input_vertex_size;
293
294 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
295 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
296
297 num_patches = ctx->tcs_num_patches;
298 output_patch0_offset = input_patch_size * num_patches;
299
300 lds_size = output_patch0_offset + output_patch_size * num_patches;
301 return lds_size;
302 }
303
304 /* Tessellation shaders pass outputs to the next shader using LDS.
305 *
306 * LS outputs = TCS inputs
307 * TCS outputs = TES inputs
308 *
309 * The LDS layout is:
310 * - TCS inputs for patch 0
311 * - TCS inputs for patch 1
312 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
313 * - ...
314 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
315 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
316 * - TCS outputs for patch 1
317 * - Per-patch TCS outputs for patch 1
318 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
319 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
320 * - ...
321 *
322 * All three shaders VS(LS), TCS, TES share the same LDS space.
323 */
324 static LLVMValueRef
325 get_tcs_in_patch_stride(struct radv_shader_context *ctx)
326 {
327 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
328 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
329 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
330
331 input_patch_size /= 4;
332 return LLVMConstInt(ctx->ac.i32, input_patch_size, false);
333 }
334
335 static LLVMValueRef
336 get_tcs_out_patch_stride(struct radv_shader_context *ctx)
337 {
338 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
339 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
340 uint32_t output_vertex_size = num_tcs_outputs * 16;
341 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
342 uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
343 output_patch_size /= 4;
344 return LLVMConstInt(ctx->ac.i32, output_patch_size, false);
345 }
346
347 static LLVMValueRef
348 get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
349 {
350 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
351 uint32_t output_vertex_size = num_tcs_outputs * 16;
352 output_vertex_size /= 4;
353 return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
354 }
355
356 static LLVMValueRef
357 get_tcs_out_patch0_offset(struct radv_shader_context *ctx)
358 {
359 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
360 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
361 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
362 uint32_t output_patch0_offset = input_patch_size;
363 unsigned num_patches = ctx->tcs_num_patches;
364
365 output_patch0_offset *= num_patches;
366 output_patch0_offset /= 4;
367 return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
368 }
369
370 static LLVMValueRef
371 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
372 {
373 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
374 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
375 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
376 uint32_t output_patch0_offset = input_patch_size;
377
378 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
379 uint32_t output_vertex_size = num_tcs_outputs * 16;
380 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
381 unsigned num_patches = ctx->tcs_num_patches;
382
383 output_patch0_offset *= num_patches;
384 output_patch0_offset += pervertex_output_patch_size;
385 output_patch0_offset /= 4;
386 return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
387 }
388
389 static LLVMValueRef
390 get_tcs_in_current_patch_offset(struct radv_shader_context *ctx)
391 {
392 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
393 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
394
395 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
396 }
397
398 static LLVMValueRef
399 get_tcs_out_current_patch_offset(struct radv_shader_context *ctx)
400 {
401 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
402 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
403 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
404
405 return LLVMBuildAdd(ctx->ac.builder, patch0_offset,
406 LLVMBuildMul(ctx->ac.builder, patch_stride,
407 rel_patch_id, ""),
408 "");
409 }
410
411 static LLVMValueRef
412 get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
413 {
414 LLVMValueRef patch0_patch_data_offset =
415 get_tcs_out_patch0_patch_data_offset(ctx);
416 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
417 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
418
419 return LLVMBuildAdd(ctx->ac.builder, patch0_patch_data_offset,
420 LLVMBuildMul(ctx->ac.builder, patch_stride,
421 rel_patch_id, ""),
422 "");
423 }
424
425 #define MAX_ARGS 23
426 struct arg_info {
427 LLVMTypeRef types[MAX_ARGS];
428 LLVMValueRef *assign[MAX_ARGS];
429 unsigned array_params_mask;
430 uint8_t count;
431 uint8_t sgpr_count;
432 uint8_t num_sgprs_used;
433 uint8_t num_vgprs_used;
434 };
435
436 enum ac_arg_regfile {
437 ARG_SGPR,
438 ARG_VGPR,
439 };
440
441 static void
442 add_arg(struct arg_info *info, enum ac_arg_regfile regfile, LLVMTypeRef type,
443 LLVMValueRef *param_ptr)
444 {
445 assert(info->count < MAX_ARGS);
446
447 info->assign[info->count] = param_ptr;
448 info->types[info->count] = type;
449 info->count++;
450
451 if (regfile == ARG_SGPR) {
452 info->num_sgprs_used += ac_get_type_size(type) / 4;
453 info->sgpr_count++;
454 } else {
455 assert(regfile == ARG_VGPR);
456 info->num_vgprs_used += ac_get_type_size(type) / 4;
457 }
458 }
459
460 static inline void
461 add_array_arg(struct arg_info *info, LLVMTypeRef type, LLVMValueRef *param_ptr)
462 {
463 info->array_params_mask |= (1 << info->count);
464 add_arg(info, ARG_SGPR, type, param_ptr);
465 }
466
467 static void assign_arguments(LLVMValueRef main_function,
468 struct arg_info *info)
469 {
470 unsigned i;
471 for (i = 0; i < info->count; i++) {
472 if (info->assign[i])
473 *info->assign[i] = LLVMGetParam(main_function, i);
474 }
475 }
476
477 static LLVMValueRef
478 create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
479 LLVMBuilderRef builder, LLVMTypeRef *return_types,
480 unsigned num_return_elems,
481 struct arg_info *args,
482 unsigned max_workgroup_size,
483 const struct radv_nir_compiler_options *options)
484 {
485 LLVMTypeRef main_function_type, ret_type;
486 LLVMBasicBlockRef main_function_body;
487
488 if (num_return_elems)
489 ret_type = LLVMStructTypeInContext(ctx, return_types,
490 num_return_elems, true);
491 else
492 ret_type = LLVMVoidTypeInContext(ctx);
493
494 /* Setup the function */
495 main_function_type =
496 LLVMFunctionType(ret_type, args->types, args->count, 0);
497 LLVMValueRef main_function =
498 LLVMAddFunction(module, "main", main_function_type);
499 main_function_body =
500 LLVMAppendBasicBlockInContext(ctx, main_function, "main_body");
501 LLVMPositionBuilderAtEnd(builder, main_function_body);
502
503 LLVMSetFunctionCallConv(main_function, RADEON_LLVM_AMDGPU_CS);
504 for (unsigned i = 0; i < args->sgpr_count; ++i) {
505 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_INREG);
506
507 if (args->array_params_mask & (1 << i)) {
508 LLVMValueRef P = LLVMGetParam(main_function, i);
509 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_NOALIAS);
510 ac_add_attr_dereferenceable(P, UINT64_MAX);
511 }
512 }
513
514 if (options->address32_hi) {
515 ac_llvm_add_target_dep_function_attr(main_function,
516 "amdgpu-32bit-address-high-bits",
517 options->address32_hi);
518 }
519
520 if (max_workgroup_size) {
521 ac_llvm_add_target_dep_function_attr(main_function,
522 "amdgpu-max-work-group-size",
523 max_workgroup_size);
524 }
525 if (options->unsafe_math) {
526 /* These were copied from some LLVM test. */
527 LLVMAddTargetDependentFunctionAttr(main_function,
528 "less-precise-fpmad",
529 "true");
530 LLVMAddTargetDependentFunctionAttr(main_function,
531 "no-infs-fp-math",
532 "true");
533 LLVMAddTargetDependentFunctionAttr(main_function,
534 "no-nans-fp-math",
535 "true");
536 LLVMAddTargetDependentFunctionAttr(main_function,
537 "unsafe-fp-math",
538 "true");
539 LLVMAddTargetDependentFunctionAttr(main_function,
540 "no-signed-zeros-fp-math",
541 "true");
542 }
543 return main_function;
544 }
545
546
547 static void
548 set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx, uint8_t num_sgprs,
549 uint32_t indirect_offset)
550 {
551 ud_info->sgpr_idx = *sgpr_idx;
552 ud_info->num_sgprs = num_sgprs;
553 ud_info->indirect = indirect_offset > 0;
554 ud_info->indirect_offset = indirect_offset;
555 *sgpr_idx += num_sgprs;
556 }
557
558 static void
559 set_loc_shader(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
560 uint8_t num_sgprs)
561 {
562 struct radv_userdata_info *ud_info =
563 &ctx->shader_info->user_sgprs_locs.shader_data[idx];
564 assert(ud_info);
565
566 set_loc(ud_info, sgpr_idx, num_sgprs, 0);
567 }
568
569 static void
570 set_loc_shader_ptr(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx)
571 {
572 bool use_32bit_pointers = HAVE_32BIT_POINTERS &&
573 idx != AC_UD_SCRATCH_RING_OFFSETS;
574
575 set_loc_shader(ctx, idx, sgpr_idx, use_32bit_pointers ? 1 : 2);
576 }
577
578 static void
579 set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
580 uint32_t indirect_offset)
581 {
582 struct radv_userdata_locations *locs =
583 &ctx->shader_info->user_sgprs_locs;
584 struct radv_userdata_info *ud_info = &locs->descriptor_sets[idx];
585 assert(ud_info);
586
587 set_loc(ud_info, sgpr_idx, HAVE_32BIT_POINTERS ? 1 : 2, indirect_offset);
588 if (indirect_offset == 0)
589 locs->descriptor_sets_enabled |= 1 << idx;
590 }
591
592 struct user_sgpr_info {
593 bool need_ring_offsets;
594 bool indirect_all_descriptor_sets;
595 };
596
597 static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
598 gl_shader_stage stage)
599 {
600 switch (stage) {
601 case MESA_SHADER_VERTEX:
602 if (ctx->shader_info->info.needs_multiview_view_index ||
603 (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
604 return true;
605 break;
606 case MESA_SHADER_TESS_EVAL:
607 if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
608 return true;
609 break;
610 case MESA_SHADER_GEOMETRY:
611 case MESA_SHADER_TESS_CTRL:
612 if (ctx->shader_info->info.needs_multiview_view_index)
613 return true;
614 break;
615 default:
616 break;
617 }
618 return false;
619 }
620
621 static uint8_t
622 count_vs_user_sgprs(struct radv_shader_context *ctx)
623 {
624 uint8_t count = 0;
625
626 if (ctx->shader_info->info.vs.has_vertex_buffers)
627 count += HAVE_32BIT_POINTERS ? 1 : 2;
628 count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
629
630 return count;
631 }
632
633 static void allocate_user_sgprs(struct radv_shader_context *ctx,
634 gl_shader_stage stage,
635 bool has_previous_stage,
636 gl_shader_stage previous_stage,
637 bool needs_view_index,
638 struct user_sgpr_info *user_sgpr_info)
639 {
640 uint8_t user_sgpr_count = 0;
641
642 memset(user_sgpr_info, 0, sizeof(struct user_sgpr_info));
643
644 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
645 if (stage == MESA_SHADER_GEOMETRY ||
646 stage == MESA_SHADER_VERTEX ||
647 stage == MESA_SHADER_TESS_CTRL ||
648 stage == MESA_SHADER_TESS_EVAL ||
649 ctx->is_gs_copy_shader)
650 user_sgpr_info->need_ring_offsets = true;
651
652 if (stage == MESA_SHADER_FRAGMENT &&
653 ctx->shader_info->info.ps.needs_sample_positions)
654 user_sgpr_info->need_ring_offsets = true;
655
656 /* 2 user sgprs will nearly always be allocated for scratch/rings */
657 if (ctx->options->supports_spill || user_sgpr_info->need_ring_offsets) {
658 user_sgpr_count += 2;
659 }
660
661 switch (stage) {
662 case MESA_SHADER_COMPUTE:
663 if (ctx->shader_info->info.cs.uses_grid_size)
664 user_sgpr_count += 3;
665 break;
666 case MESA_SHADER_FRAGMENT:
667 user_sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
668 break;
669 case MESA_SHADER_VERTEX:
670 if (!ctx->is_gs_copy_shader)
671 user_sgpr_count += count_vs_user_sgprs(ctx);
672 break;
673 case MESA_SHADER_TESS_CTRL:
674 if (has_previous_stage) {
675 if (previous_stage == MESA_SHADER_VERTEX)
676 user_sgpr_count += count_vs_user_sgprs(ctx);
677 }
678 break;
679 case MESA_SHADER_TESS_EVAL:
680 break;
681 case MESA_SHADER_GEOMETRY:
682 if (has_previous_stage) {
683 if (previous_stage == MESA_SHADER_VERTEX) {
684 user_sgpr_count += count_vs_user_sgprs(ctx);
685 }
686 }
687 break;
688 default:
689 break;
690 }
691
692 if (needs_view_index)
693 user_sgpr_count++;
694
695 if (ctx->shader_info->info.loads_push_constants)
696 user_sgpr_count += HAVE_32BIT_POINTERS ? 1 : 2;
697
698 uint32_t available_sgprs = ctx->options->chip_class >= GFX9 ? 32 : 16;
699 uint32_t remaining_sgprs = available_sgprs - user_sgpr_count;
700 uint32_t num_desc_set =
701 util_bitcount(ctx->shader_info->info.desc_set_used_mask);
702
703 if (remaining_sgprs / (HAVE_32BIT_POINTERS ? 1 : 2) < num_desc_set) {
704 user_sgpr_info->indirect_all_descriptor_sets = true;
705 }
706 }
707
708 static void
709 declare_global_input_sgprs(struct radv_shader_context *ctx,
710 gl_shader_stage stage,
711 bool has_previous_stage,
712 gl_shader_stage previous_stage,
713 const struct user_sgpr_info *user_sgpr_info,
714 struct arg_info *args,
715 LLVMValueRef *desc_sets)
716 {
717 LLVMTypeRef type = ac_array_in_const32_addr_space(ctx->ac.i8);
718 unsigned num_sets = ctx->options->layout ?
719 ctx->options->layout->num_sets : 0;
720 unsigned stage_mask = 1 << stage;
721
722 if (has_previous_stage)
723 stage_mask |= 1 << previous_stage;
724
725 /* 1 for each descriptor set */
726 if (!user_sgpr_info->indirect_all_descriptor_sets) {
727 for (unsigned i = 0; i < num_sets; ++i) {
728 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
729 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
730 add_array_arg(args, type,
731 &ctx->descriptor_sets[i]);
732 }
733 }
734 } else {
735 add_array_arg(args, ac_array_in_const32_addr_space(type), desc_sets);
736 }
737
738 if (ctx->shader_info->info.loads_push_constants) {
739 /* 1 for push constants and dynamic descriptors */
740 add_array_arg(args, type, &ctx->abi.push_constants);
741 }
742 }
743
744 static void
745 declare_vs_specific_input_sgprs(struct radv_shader_context *ctx,
746 gl_shader_stage stage,
747 bool has_previous_stage,
748 gl_shader_stage previous_stage,
749 struct arg_info *args)
750 {
751 if (!ctx->is_gs_copy_shader &&
752 (stage == MESA_SHADER_VERTEX ||
753 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
754 if (ctx->shader_info->info.vs.has_vertex_buffers) {
755 add_arg(args, ARG_SGPR,
756 ac_array_in_const32_addr_space(ctx->ac.v4i32),
757 &ctx->vertex_buffers);
758 }
759 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.base_vertex);
760 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.start_instance);
761 if (ctx->shader_info->info.vs.needs_draw_id) {
762 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.draw_id);
763 }
764 }
765 }
766
767 static void
768 declare_vs_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
769 {
770 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.vertex_id);
771 if (!ctx->is_gs_copy_shader) {
772 if (ctx->options->key.vs.as_ls) {
773 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->rel_auto_id);
774 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
775 } else {
776 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
777 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->vs_prim_id);
778 }
779 add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
780 }
781 }
782
783 static void
784 declare_tes_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
785 {
786 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_u);
787 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_v);
788 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->tes_rel_patch_id);
789 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.tes_patch_id);
790 }
791
792 static void
793 set_global_input_locs(struct radv_shader_context *ctx, gl_shader_stage stage,
794 bool has_previous_stage, gl_shader_stage previous_stage,
795 const struct user_sgpr_info *user_sgpr_info,
796 LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
797 {
798 unsigned num_sets = ctx->options->layout ?
799 ctx->options->layout->num_sets : 0;
800 unsigned stage_mask = 1 << stage;
801
802 if (has_previous_stage)
803 stage_mask |= 1 << previous_stage;
804
805 if (!user_sgpr_info->indirect_all_descriptor_sets) {
806 for (unsigned i = 0; i < num_sets; ++i) {
807 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
808 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
809 set_loc_desc(ctx, i, user_sgpr_idx, 0);
810 } else
811 ctx->descriptor_sets[i] = NULL;
812 }
813 } else {
814 set_loc_shader_ptr(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS,
815 user_sgpr_idx);
816
817 for (unsigned i = 0; i < num_sets; ++i) {
818 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
819 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
820 set_loc_desc(ctx, i, user_sgpr_idx, i * 8);
821 ctx->descriptor_sets[i] =
822 ac_build_load_to_sgpr(&ctx->ac,
823 desc_sets,
824 LLVMConstInt(ctx->ac.i32, i, false));
825
826 } else
827 ctx->descriptor_sets[i] = NULL;
828 }
829 ctx->shader_info->need_indirect_descriptor_sets = true;
830 }
831
832 if (ctx->shader_info->info.loads_push_constants) {
833 set_loc_shader_ptr(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx);
834 }
835 }
836
837 static void
838 set_vs_specific_input_locs(struct radv_shader_context *ctx,
839 gl_shader_stage stage, bool has_previous_stage,
840 gl_shader_stage previous_stage,
841 uint8_t *user_sgpr_idx)
842 {
843 if (!ctx->is_gs_copy_shader &&
844 (stage == MESA_SHADER_VERTEX ||
845 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
846 if (ctx->shader_info->info.vs.has_vertex_buffers) {
847 set_loc_shader_ptr(ctx, AC_UD_VS_VERTEX_BUFFERS,
848 user_sgpr_idx);
849 }
850
851 unsigned vs_num = 2;
852 if (ctx->shader_info->info.vs.needs_draw_id)
853 vs_num++;
854
855 set_loc_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE,
856 user_sgpr_idx, vs_num);
857 }
858 }
859
860 static void set_llvm_calling_convention(LLVMValueRef func,
861 gl_shader_stage stage)
862 {
863 enum radeon_llvm_calling_convention calling_conv;
864
865 switch (stage) {
866 case MESA_SHADER_VERTEX:
867 case MESA_SHADER_TESS_EVAL:
868 calling_conv = RADEON_LLVM_AMDGPU_VS;
869 break;
870 case MESA_SHADER_GEOMETRY:
871 calling_conv = RADEON_LLVM_AMDGPU_GS;
872 break;
873 case MESA_SHADER_TESS_CTRL:
874 calling_conv = RADEON_LLVM_AMDGPU_HS;
875 break;
876 case MESA_SHADER_FRAGMENT:
877 calling_conv = RADEON_LLVM_AMDGPU_PS;
878 break;
879 case MESA_SHADER_COMPUTE:
880 calling_conv = RADEON_LLVM_AMDGPU_CS;
881 break;
882 default:
883 unreachable("Unhandle shader type");
884 }
885
886 LLVMSetFunctionCallConv(func, calling_conv);
887 }
888
889 static void create_function(struct radv_shader_context *ctx,
890 gl_shader_stage stage,
891 bool has_previous_stage,
892 gl_shader_stage previous_stage)
893 {
894 uint8_t user_sgpr_idx;
895 struct user_sgpr_info user_sgpr_info;
896 struct arg_info args = {};
897 LLVMValueRef desc_sets;
898 bool needs_view_index = needs_view_index_sgpr(ctx, stage);
899 allocate_user_sgprs(ctx, stage, has_previous_stage,
900 previous_stage, needs_view_index, &user_sgpr_info);
901
902 if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
903 add_arg(&args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
904 &ctx->ring_offsets);
905 }
906
907 switch (stage) {
908 case MESA_SHADER_COMPUTE:
909 declare_global_input_sgprs(ctx, stage, has_previous_stage,
910 previous_stage, &user_sgpr_info,
911 &args, &desc_sets);
912
913 if (ctx->shader_info->info.cs.uses_grid_size) {
914 add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
915 &ctx->abi.num_work_groups);
916 }
917
918 for (int i = 0; i < 3; i++) {
919 ctx->abi.workgroup_ids[i] = NULL;
920 if (ctx->shader_info->info.cs.uses_block_id[i]) {
921 add_arg(&args, ARG_SGPR, ctx->ac.i32,
922 &ctx->abi.workgroup_ids[i]);
923 }
924 }
925
926 if (ctx->shader_info->info.cs.uses_local_invocation_idx)
927 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.tg_size);
928 add_arg(&args, ARG_VGPR, ctx->ac.v3i32,
929 &ctx->abi.local_invocation_ids);
930 break;
931 case MESA_SHADER_VERTEX:
932 declare_global_input_sgprs(ctx, stage, has_previous_stage,
933 previous_stage, &user_sgpr_info,
934 &args, &desc_sets);
935 declare_vs_specific_input_sgprs(ctx, stage, has_previous_stage,
936 previous_stage, &args);
937
938 if (needs_view_index)
939 add_arg(&args, ARG_SGPR, ctx->ac.i32,
940 &ctx->abi.view_index);
941 if (ctx->options->key.vs.as_es)
942 add_arg(&args, ARG_SGPR, ctx->ac.i32,
943 &ctx->es2gs_offset);
944
945 declare_vs_input_vgprs(ctx, &args);
946 break;
947 case MESA_SHADER_TESS_CTRL:
948 if (has_previous_stage) {
949 // First 6 system regs
950 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
951 add_arg(&args, ARG_SGPR, ctx->ac.i32,
952 &ctx->merged_wave_info);
953 add_arg(&args, ARG_SGPR, ctx->ac.i32,
954 &ctx->tess_factor_offset);
955
956 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
957 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
958 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
959
960 declare_global_input_sgprs(ctx, stage,
961 has_previous_stage,
962 previous_stage,
963 &user_sgpr_info, &args,
964 &desc_sets);
965 declare_vs_specific_input_sgprs(ctx, stage,
966 has_previous_stage,
967 previous_stage, &args);
968
969 if (needs_view_index)
970 add_arg(&args, ARG_SGPR, ctx->ac.i32,
971 &ctx->abi.view_index);
972
973 add_arg(&args, ARG_VGPR, ctx->ac.i32,
974 &ctx->abi.tcs_patch_id);
975 add_arg(&args, ARG_VGPR, ctx->ac.i32,
976 &ctx->abi.tcs_rel_ids);
977
978 declare_vs_input_vgprs(ctx, &args);
979 } else {
980 declare_global_input_sgprs(ctx, stage,
981 has_previous_stage,
982 previous_stage,
983 &user_sgpr_info, &args,
984 &desc_sets);
985
986 if (needs_view_index)
987 add_arg(&args, ARG_SGPR, ctx->ac.i32,
988 &ctx->abi.view_index);
989
990 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
991 add_arg(&args, ARG_SGPR, ctx->ac.i32,
992 &ctx->tess_factor_offset);
993 add_arg(&args, ARG_VGPR, ctx->ac.i32,
994 &ctx->abi.tcs_patch_id);
995 add_arg(&args, ARG_VGPR, ctx->ac.i32,
996 &ctx->abi.tcs_rel_ids);
997 }
998 break;
999 case MESA_SHADER_TESS_EVAL:
1000 declare_global_input_sgprs(ctx, stage, has_previous_stage,
1001 previous_stage, &user_sgpr_info,
1002 &args, &desc_sets);
1003
1004 if (needs_view_index)
1005 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1006 &ctx->abi.view_index);
1007
1008 if (ctx->options->key.tes.as_es) {
1009 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
1010 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
1011 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1012 &ctx->es2gs_offset);
1013 } else {
1014 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
1015 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
1016 }
1017 declare_tes_input_vgprs(ctx, &args);
1018 break;
1019 case MESA_SHADER_GEOMETRY:
1020 if (has_previous_stage) {
1021 // First 6 system regs
1022 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1023 &ctx->gs2vs_offset);
1024 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1025 &ctx->merged_wave_info);
1026 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
1027
1028 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
1029 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
1030 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
1031
1032 declare_global_input_sgprs(ctx, stage,
1033 has_previous_stage,
1034 previous_stage,
1035 &user_sgpr_info, &args,
1036 &desc_sets);
1037
1038 if (previous_stage != MESA_SHADER_TESS_EVAL) {
1039 declare_vs_specific_input_sgprs(ctx, stage,
1040 has_previous_stage,
1041 previous_stage,
1042 &args);
1043 }
1044
1045 if (needs_view_index)
1046 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1047 &ctx->abi.view_index);
1048
1049 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1050 &ctx->gs_vtx_offset[0]);
1051 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1052 &ctx->gs_vtx_offset[2]);
1053 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1054 &ctx->abi.gs_prim_id);
1055 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1056 &ctx->abi.gs_invocation_id);
1057 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1058 &ctx->gs_vtx_offset[4]);
1059
1060 if (previous_stage == MESA_SHADER_VERTEX) {
1061 declare_vs_input_vgprs(ctx, &args);
1062 } else {
1063 declare_tes_input_vgprs(ctx, &args);
1064 }
1065 } else {
1066 declare_global_input_sgprs(ctx, stage,
1067 has_previous_stage,
1068 previous_stage,
1069 &user_sgpr_info, &args,
1070 &desc_sets);
1071
1072 if (needs_view_index)
1073 add_arg(&args, ARG_SGPR, ctx->ac.i32,
1074 &ctx->abi.view_index);
1075
1076 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs2vs_offset);
1077 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs_wave_id);
1078 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1079 &ctx->gs_vtx_offset[0]);
1080 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1081 &ctx->gs_vtx_offset[1]);
1082 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1083 &ctx->abi.gs_prim_id);
1084 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1085 &ctx->gs_vtx_offset[2]);
1086 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1087 &ctx->gs_vtx_offset[3]);
1088 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1089 &ctx->gs_vtx_offset[4]);
1090 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1091 &ctx->gs_vtx_offset[5]);
1092 add_arg(&args, ARG_VGPR, ctx->ac.i32,
1093 &ctx->abi.gs_invocation_id);
1094 }
1095 break;
1096 case MESA_SHADER_FRAGMENT:
1097 declare_global_input_sgprs(ctx, stage, has_previous_stage,
1098 previous_stage, &user_sgpr_info,
1099 &args, &desc_sets);
1100
1101 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.prim_mask);
1102 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_sample);
1103 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_center);
1104 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_centroid);
1105 add_arg(&args, ARG_VGPR, ctx->ac.v3i32, NULL); /* persp pull model */
1106 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_sample);
1107 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_center);
1108 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_centroid);
1109 add_arg(&args, ARG_VGPR, ctx->ac.f32, NULL); /* line stipple tex */
1110 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[0]);
1111 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[1]);
1112 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[2]);
1113 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[3]);
1114 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.front_face);
1115 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.ancillary);
1116 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.sample_coverage);
1117 add_arg(&args, ARG_VGPR, ctx->ac.i32, NULL); /* fixed pt */
1118 break;
1119 default:
1120 unreachable("Shader stage not implemented");
1121 }
1122
1123 ctx->main_function = create_llvm_function(
1124 ctx->context, ctx->ac.module, ctx->ac.builder, NULL, 0, &args,
1125 ctx->max_workgroup_size, ctx->options);
1126 set_llvm_calling_convention(ctx->main_function, stage);
1127
1128
1129 ctx->shader_info->num_input_vgprs = 0;
1130 ctx->shader_info->num_input_sgprs = ctx->options->supports_spill ? 2 : 0;
1131
1132 ctx->shader_info->num_input_sgprs += args.num_sgprs_used;
1133
1134 if (ctx->stage != MESA_SHADER_FRAGMENT)
1135 ctx->shader_info->num_input_vgprs = args.num_vgprs_used;
1136
1137 assign_arguments(ctx->main_function, &args);
1138
1139 user_sgpr_idx = 0;
1140
1141 if (ctx->options->supports_spill || user_sgpr_info.need_ring_offsets) {
1142 set_loc_shader_ptr(ctx, AC_UD_SCRATCH_RING_OFFSETS,
1143 &user_sgpr_idx);
1144 if (ctx->options->supports_spill) {
1145 ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
1146 LLVMPointerType(ctx->ac.i8, AC_CONST_ADDR_SPACE),
1147 NULL, 0, AC_FUNC_ATTR_READNONE);
1148 ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
1149 ac_array_in_const_addr_space(ctx->ac.v4i32), "");
1150 }
1151 }
1152
1153 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1154 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1155 if (has_previous_stage)
1156 user_sgpr_idx = 0;
1157
1158 set_global_input_locs(ctx, stage, has_previous_stage, previous_stage,
1159 &user_sgpr_info, desc_sets, &user_sgpr_idx);
1160
1161 switch (stage) {
1162 case MESA_SHADER_COMPUTE:
1163 if (ctx->shader_info->info.cs.uses_grid_size) {
1164 set_loc_shader(ctx, AC_UD_CS_GRID_SIZE,
1165 &user_sgpr_idx, 3);
1166 }
1167 break;
1168 case MESA_SHADER_VERTEX:
1169 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1170 previous_stage, &user_sgpr_idx);
1171 if (ctx->abi.view_index)
1172 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1173 break;
1174 case MESA_SHADER_TESS_CTRL:
1175 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1176 previous_stage, &user_sgpr_idx);
1177 if (ctx->abi.view_index)
1178 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1179 break;
1180 case MESA_SHADER_TESS_EVAL:
1181 if (ctx->abi.view_index)
1182 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1183 break;
1184 case MESA_SHADER_GEOMETRY:
1185 if (has_previous_stage) {
1186 if (previous_stage == MESA_SHADER_VERTEX)
1187 set_vs_specific_input_locs(ctx, stage,
1188 has_previous_stage,
1189 previous_stage,
1190 &user_sgpr_idx);
1191 }
1192 if (ctx->abi.view_index)
1193 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1194 break;
1195 case MESA_SHADER_FRAGMENT:
1196 break;
1197 default:
1198 unreachable("Shader stage not implemented");
1199 }
1200
1201 if (stage == MESA_SHADER_TESS_CTRL ||
1202 (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_ls) ||
1203 /* GFX9 has the ESGS ring buffer in LDS. */
1204 (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
1205 ac_declare_lds_as_pointer(&ctx->ac);
1206 }
1207
1208 ctx->shader_info->num_user_sgprs = user_sgpr_idx;
1209 }
1210
1211
1212 static LLVMValueRef
1213 radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
1214 unsigned desc_set, unsigned binding)
1215 {
1216 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1217 LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
1218 struct radv_pipeline_layout *pipeline_layout = ctx->options->layout;
1219 struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
1220 unsigned base_offset = layout->binding[binding].offset;
1221 LLVMValueRef offset, stride;
1222
1223 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1224 layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
1225 unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
1226 layout->binding[binding].dynamic_offset_offset;
1227 desc_ptr = ctx->abi.push_constants;
1228 base_offset = pipeline_layout->push_constant_size + 16 * idx;
1229 stride = LLVMConstInt(ctx->ac.i32, 16, false);
1230 } else
1231 stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
1232
1233 offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
1234 index = LLVMBuildMul(ctx->ac.builder, index, stride, "");
1235 offset = LLVMBuildAdd(ctx->ac.builder, offset, index, "");
1236
1237 desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
1238 desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
1239 LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1240
1241 return desc_ptr;
1242 }
1243
1244
1245 /* The offchip buffer layout for TCS->TES is
1246 *
1247 * - attribute 0 of patch 0 vertex 0
1248 * - attribute 0 of patch 0 vertex 1
1249 * - attribute 0 of patch 0 vertex 2
1250 * ...
1251 * - attribute 0 of patch 1 vertex 0
1252 * - attribute 0 of patch 1 vertex 1
1253 * ...
1254 * - attribute 1 of patch 0 vertex 0
1255 * - attribute 1 of patch 0 vertex 1
1256 * ...
1257 * - per patch attribute 0 of patch 0
1258 * - per patch attribute 0 of patch 1
1259 * ...
1260 *
1261 * Note that every attribute has 4 components.
1262 */
1263 static LLVMValueRef get_non_vertex_index_offset(struct radv_shader_context *ctx)
1264 {
1265 uint32_t num_patches = ctx->tcs_num_patches;
1266 uint32_t num_tcs_outputs;
1267 if (ctx->stage == MESA_SHADER_TESS_CTRL)
1268 num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
1269 else
1270 num_tcs_outputs = ctx->options->key.tes.tcs_num_outputs;
1271
1272 uint32_t output_vertex_size = num_tcs_outputs * 16;
1273 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
1274
1275 return LLVMConstInt(ctx->ac.i32, pervertex_output_patch_size * num_patches, false);
1276 }
1277
1278 static LLVMValueRef calc_param_stride(struct radv_shader_context *ctx,
1279 LLVMValueRef vertex_index)
1280 {
1281 LLVMValueRef param_stride;
1282 if (vertex_index)
1283 param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch * ctx->tcs_num_patches, false);
1284 else
1285 param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_num_patches, false);
1286 return param_stride;
1287 }
1288
1289 static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
1290 LLVMValueRef vertex_index,
1291 LLVMValueRef param_index)
1292 {
1293 LLVMValueRef base_addr;
1294 LLVMValueRef param_stride, constant16;
1295 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
1296 LLVMValueRef vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch, false);
1297 constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
1298 param_stride = calc_param_stride(ctx, vertex_index);
1299 if (vertex_index) {
1300 base_addr = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
1301 vertices_per_patch, "");
1302
1303 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1304 vertex_index, "");
1305 } else {
1306 base_addr = rel_patch_id;
1307 }
1308
1309 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1310 LLVMBuildMul(ctx->ac.builder, param_index,
1311 param_stride, ""), "");
1312
1313 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
1314
1315 if (!vertex_index) {
1316 LLVMValueRef patch_data_offset = get_non_vertex_index_offset(ctx);
1317
1318 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1319 patch_data_offset, "");
1320 }
1321 return base_addr;
1322 }
1323
1324 static LLVMValueRef get_tcs_tes_buffer_address_params(struct radv_shader_context *ctx,
1325 unsigned param,
1326 unsigned const_index,
1327 bool is_compact,
1328 LLVMValueRef vertex_index,
1329 LLVMValueRef indir_index)
1330 {
1331 LLVMValueRef param_index;
1332
1333 if (indir_index)
1334 param_index = LLVMBuildAdd(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, param, false),
1335 indir_index, "");
1336 else {
1337 if (const_index && !is_compact)
1338 param += const_index;
1339 param_index = LLVMConstInt(ctx->ac.i32, param, false);
1340 }
1341 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
1342 }
1343
1344 static LLVMValueRef
1345 get_dw_address(struct radv_shader_context *ctx,
1346 LLVMValueRef dw_addr,
1347 unsigned param,
1348 unsigned const_index,
1349 bool compact_const_index,
1350 LLVMValueRef vertex_index,
1351 LLVMValueRef stride,
1352 LLVMValueRef indir_index)
1353
1354 {
1355
1356 if (vertex_index) {
1357 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1358 LLVMBuildMul(ctx->ac.builder,
1359 vertex_index,
1360 stride, ""), "");
1361 }
1362
1363 if (indir_index)
1364 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1365 LLVMBuildMul(ctx->ac.builder, indir_index,
1366 LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
1367 else if (const_index && !compact_const_index)
1368 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1369 LLVMConstInt(ctx->ac.i32, const_index * 4, false), "");
1370
1371 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1372 LLVMConstInt(ctx->ac.i32, param * 4, false), "");
1373
1374 if (const_index && compact_const_index)
1375 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1376 LLVMConstInt(ctx->ac.i32, const_index, false), "");
1377 return dw_addr;
1378 }
1379
1380 static LLVMValueRef
1381 load_tcs_varyings(struct ac_shader_abi *abi,
1382 LLVMTypeRef type,
1383 LLVMValueRef vertex_index,
1384 LLVMValueRef indir_index,
1385 unsigned const_index,
1386 unsigned location,
1387 unsigned driver_location,
1388 unsigned component,
1389 unsigned num_components,
1390 bool is_patch,
1391 bool is_compact,
1392 bool load_input)
1393 {
1394 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1395 LLVMValueRef dw_addr, stride;
1396 LLVMValueRef value[4], result;
1397 unsigned param = shader_io_get_unique_index(location);
1398
1399 if (load_input) {
1400 uint32_t input_vertex_size = (ctx->tcs_num_inputs * 16) / 4;
1401 stride = LLVMConstInt(ctx->ac.i32, input_vertex_size, false);
1402 dw_addr = get_tcs_in_current_patch_offset(ctx);
1403 } else {
1404 if (!is_patch) {
1405 stride = get_tcs_out_vertex_stride(ctx);
1406 dw_addr = get_tcs_out_current_patch_offset(ctx);
1407 } else {
1408 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1409 stride = NULL;
1410 }
1411 }
1412
1413 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1414 indir_index);
1415
1416 for (unsigned i = 0; i < num_components + component; i++) {
1417 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1418 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1419 ctx->ac.i32_1, "");
1420 }
1421 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1422 return result;
1423 }
1424
1425 static void
1426 store_tcs_output(struct ac_shader_abi *abi,
1427 const nir_variable *var,
1428 LLVMValueRef vertex_index,
1429 LLVMValueRef param_index,
1430 unsigned const_index,
1431 LLVMValueRef src,
1432 unsigned writemask)
1433 {
1434 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1435 const unsigned location = var->data.location;
1436 const unsigned component = var->data.location_frac;
1437 const bool is_patch = var->data.patch;
1438 const bool is_compact = var->data.compact;
1439 LLVMValueRef dw_addr;
1440 LLVMValueRef stride = NULL;
1441 LLVMValueRef buf_addr = NULL;
1442 unsigned param;
1443 bool store_lds = true;
1444
1445 if (is_patch) {
1446 if (!(ctx->tcs_patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
1447 store_lds = false;
1448 } else {
1449 if (!(ctx->tcs_outputs_read & (1ULL << location)))
1450 store_lds = false;
1451 }
1452
1453 param = shader_io_get_unique_index(location);
1454 if (location == VARYING_SLOT_CLIP_DIST0 &&
1455 is_compact && const_index > 3) {
1456 const_index -= 3;
1457 param++;
1458 }
1459
1460 if (!is_patch) {
1461 stride = get_tcs_out_vertex_stride(ctx);
1462 dw_addr = get_tcs_out_current_patch_offset(ctx);
1463 } else {
1464 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1465 }
1466
1467 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1468 param_index);
1469 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
1470 vertex_index, param_index);
1471
1472 bool is_tess_factor = false;
1473 if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
1474 location == VARYING_SLOT_TESS_LEVEL_OUTER)
1475 is_tess_factor = true;
1476
1477 unsigned base = is_compact ? const_index : 0;
1478 for (unsigned chan = 0; chan < 8; chan++) {
1479 if (!(writemask & (1 << chan)))
1480 continue;
1481 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1482 value = ac_to_integer(&ctx->ac, value);
1483 value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
1484
1485 if (store_lds || is_tess_factor) {
1486 LLVMValueRef dw_addr_chan =
1487 LLVMBuildAdd(ctx->ac.builder, dw_addr,
1488 LLVMConstInt(ctx->ac.i32, chan, false), "");
1489 ac_lds_store(&ctx->ac, dw_addr_chan, value);
1490 }
1491
1492 if (!is_tess_factor && writemask != 0xF)
1493 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
1494 buf_addr, ctx->oc_lds,
1495 4 * (base + chan), 1, 0, true, false);
1496 }
1497
1498 if (writemask == 0xF) {
1499 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
1500 buf_addr, ctx->oc_lds,
1501 (base * 4), 1, 0, true, false);
1502 }
1503 }
1504
1505 static LLVMValueRef
1506 load_tes_input(struct ac_shader_abi *abi,
1507 LLVMTypeRef type,
1508 LLVMValueRef vertex_index,
1509 LLVMValueRef param_index,
1510 unsigned const_index,
1511 unsigned location,
1512 unsigned driver_location,
1513 unsigned component,
1514 unsigned num_components,
1515 bool is_patch,
1516 bool is_compact,
1517 bool load_input)
1518 {
1519 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1520 LLVMValueRef buf_addr;
1521 LLVMValueRef result;
1522 unsigned param = shader_io_get_unique_index(location);
1523
1524 if (location == VARYING_SLOT_CLIP_DIST0 && is_compact && const_index > 3) {
1525 const_index -= 3;
1526 param++;
1527 }
1528
1529 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
1530 is_compact, vertex_index, param_index);
1531
1532 LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, component * 4, false);
1533 buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");
1534
1535 result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
1536 buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
1537 result = ac_trim_vector(&ctx->ac, result, num_components);
1538 return result;
1539 }
1540
1541 static LLVMValueRef
1542 load_gs_input(struct ac_shader_abi *abi,
1543 unsigned location,
1544 unsigned driver_location,
1545 unsigned component,
1546 unsigned num_components,
1547 unsigned vertex_index,
1548 unsigned const_index,
1549 LLVMTypeRef type)
1550 {
1551 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1552 LLVMValueRef vtx_offset;
1553 unsigned param, vtx_offset_param;
1554 LLVMValueRef value[4], result;
1555
1556 vtx_offset_param = vertex_index;
1557 assert(vtx_offset_param < 6);
1558 vtx_offset = LLVMBuildMul(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
1559 LLVMConstInt(ctx->ac.i32, 4, false), "");
1560
1561 param = shader_io_get_unique_index(location);
1562
1563 for (unsigned i = component; i < num_components + component; i++) {
1564 if (ctx->ac.chip_class >= GFX9) {
1565 LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
1566 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1567 LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
1568 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1569 } else {
1570 LLVMValueRef soffset =
1571 LLVMConstInt(ctx->ac.i32,
1572 (param * 4 + i + const_index) * 256,
1573 false);
1574
1575 value[i] = ac_build_buffer_load(&ctx->ac,
1576 ctx->esgs_ring, 1,
1577 ctx->ac.i32_0,
1578 vtx_offset, soffset,
1579 0, 1, 0, true, false);
1580 }
1581
1582 if (ac_get_type_size(type) == 2) {
1583 value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], ctx->ac.i32, "");
1584 value[i] = LLVMBuildTrunc(ctx->ac.builder, value[i], ctx->ac.i16, "");
1585 }
1586 value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], type, "");
1587 }
1588 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1589 result = ac_to_integer(&ctx->ac, result);
1590 return result;
1591 }
1592
1593
1594 static void radv_emit_kill(struct ac_shader_abi *abi, LLVMValueRef visible)
1595 {
1596 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1597 ac_build_kill_if_false(&ctx->ac, visible);
1598 }
1599
1600 static LLVMValueRef lookup_interp_param(struct ac_shader_abi *abi,
1601 enum glsl_interp_mode interp, unsigned location)
1602 {
1603 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1604
1605 switch (interp) {
1606 case INTERP_MODE_FLAT:
1607 default:
1608 return NULL;
1609 case INTERP_MODE_SMOOTH:
1610 case INTERP_MODE_NONE:
1611 if (location == INTERP_CENTER)
1612 return ctx->persp_center;
1613 else if (location == INTERP_CENTROID)
1614 return ctx->persp_centroid;
1615 else if (location == INTERP_SAMPLE)
1616 return ctx->persp_sample;
1617 break;
1618 case INTERP_MODE_NOPERSPECTIVE:
1619 if (location == INTERP_CENTER)
1620 return ctx->linear_center;
1621 else if (location == INTERP_CENTROID)
1622 return ctx->linear_centroid;
1623 else if (location == INTERP_SAMPLE)
1624 return ctx->linear_sample;
1625 break;
1626 }
1627 return NULL;
1628 }
1629
1630 static uint32_t
1631 radv_get_sample_pos_offset(uint32_t num_samples)
1632 {
1633 uint32_t sample_pos_offset = 0;
1634
1635 switch (num_samples) {
1636 case 2:
1637 sample_pos_offset = 1;
1638 break;
1639 case 4:
1640 sample_pos_offset = 3;
1641 break;
1642 case 8:
1643 sample_pos_offset = 7;
1644 break;
1645 case 16:
1646 sample_pos_offset = 15;
1647 break;
1648 default:
1649 break;
1650 }
1651 return sample_pos_offset;
1652 }
1653
1654 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
1655 LLVMValueRef sample_id)
1656 {
1657 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1658
1659 LLVMValueRef result;
1660 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
1661
1662 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
1663 ac_array_in_const_addr_space(ctx->ac.v2f32), "");
1664
1665 uint32_t sample_pos_offset =
1666 radv_get_sample_pos_offset(ctx->options->key.fs.num_samples);
1667
1668 sample_id =
1669 LLVMBuildAdd(ctx->ac.builder, sample_id,
1670 LLVMConstInt(ctx->ac.i32, sample_pos_offset, false), "");
1671 result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);
1672
1673 return result;
1674 }
1675
1676
1677 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
1678 {
1679 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1680 uint8_t log2_ps_iter_samples;
1681
1682 if (ctx->shader_info->info.ps.force_persample) {
1683 log2_ps_iter_samples =
1684 util_logbase2(ctx->options->key.fs.num_samples);
1685 } else {
1686 log2_ps_iter_samples = ctx->options->key.fs.log2_ps_iter_samples;
1687 }
1688
1689 /* The bit pattern matches that used by fixed function fragment
1690 * processing. */
1691 static const uint16_t ps_iter_masks[] = {
1692 0xffff, /* not used */
1693 0x5555,
1694 0x1111,
1695 0x0101,
1696 0x0001,
1697 };
1698 assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
1699
1700 uint32_t ps_iter_mask = ps_iter_masks[log2_ps_iter_samples];
1701
1702 LLVMValueRef result, sample_id;
1703 sample_id = ac_unpack_param(&ctx->ac, abi->ancillary, 8, 4);
1704 sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, ps_iter_mask, false), sample_id, "");
1705 result = LLVMBuildAnd(ctx->ac.builder, sample_id, abi->sample_coverage, "");
1706 return result;
1707 }
1708
1709
1710 static void
1711 visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
1712 {
1713 LLVMValueRef gs_next_vertex;
1714 LLVMValueRef can_emit;
1715 int idx;
1716 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1717
1718 assert(stream == 0);
1719
1720 /* Write vertex attribute values to GSVS ring */
1721 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
1722 ctx->gs_next_vertex,
1723 "");
1724
1725 /* If this thread has already emitted the declared maximum number of
1726 * vertices, kill it: excessive vertex emissions are not supposed to
1727 * have any effect, and GS threads have no externally observable
1728 * effects other than emitting vertices.
1729 */
1730 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
1731 LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
1732 ac_build_kill_if_false(&ctx->ac, can_emit);
1733
1734 /* loop num outputs */
1735 idx = 0;
1736 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
1737 unsigned output_usage_mask =
1738 ctx->shader_info->info.gs.output_usage_mask[i];
1739 LLVMValueRef *out_ptr = &addrs[i * 4];
1740 int length = 4;
1741 int slot = idx;
1742 int slot_inc = 1;
1743
1744 if (!(ctx->output_mask & (1ull << i)))
1745 continue;
1746
1747 if (i == VARYING_SLOT_CLIP_DIST0) {
1748 /* pack clip and cull into a single set of slots */
1749 length = ctx->num_output_clips + ctx->num_output_culls;
1750 if (length > 4)
1751 slot_inc = 2;
1752 output_usage_mask = (1 << length) - 1;
1753 }
1754
1755 for (unsigned j = 0; j < length; j++) {
1756 if (!(output_usage_mask & (1 << j)))
1757 continue;
1758
1759 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
1760 out_ptr[j], "");
1761 LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
1762 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
1763 voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
1764
1765 out_val = ac_to_integer(&ctx->ac, out_val);
1766 out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
1767
1768 ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
1769 out_val, 1,
1770 voffset, ctx->gs2vs_offset, 0,
1771 1, 1, true, true);
1772 }
1773 idx += slot_inc;
1774 }
1775
1776 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
1777 ctx->ac.i32_1, "");
1778 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex);
1779
1780 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
1781 }
1782
1783 static void
1784 visit_end_primitive(struct ac_shader_abi *abi, unsigned stream)
1785 {
1786 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1787 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8), ctx->gs_wave_id);
1788 }
1789
1790 static LLVMValueRef
1791 load_tess_coord(struct ac_shader_abi *abi)
1792 {
1793 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1794
1795 LLVMValueRef coord[4] = {
1796 ctx->tes_u,
1797 ctx->tes_v,
1798 ctx->ac.f32_0,
1799 ctx->ac.f32_0,
1800 };
1801
1802 if (ctx->tes_primitive_mode == GL_TRIANGLES)
1803 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1804 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
1805
1806 return ac_build_gather_values(&ctx->ac, coord, 3);
1807 }
1808
1809 static LLVMValueRef
1810 load_patch_vertices_in(struct ac_shader_abi *abi)
1811 {
1812 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1813 return LLVMConstInt(ctx->ac.i32, ctx->options->key.tcs.input_vertices, false);
1814 }
1815
1816
1817 static LLVMValueRef radv_load_base_vertex(struct ac_shader_abi *abi)
1818 {
1819 return abi->base_vertex;
1820 }
1821
1822 static LLVMValueRef radv_load_ssbo(struct ac_shader_abi *abi,
1823 LLVMValueRef buffer_ptr, bool write)
1824 {
1825 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1826 LLVMValueRef result;
1827
1828 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1829
1830 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1831 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1832
1833 return result;
1834 }
1835
1836 static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi, LLVMValueRef buffer_ptr)
1837 {
1838 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1839 LLVMValueRef result;
1840
1841 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1842
1843 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1844 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1845
1846 return result;
1847 }
1848
1849 static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
1850 unsigned descriptor_set,
1851 unsigned base_index,
1852 unsigned constant_index,
1853 LLVMValueRef index,
1854 enum ac_descriptor_type desc_type,
1855 bool image, bool write,
1856 bool bindless)
1857 {
1858 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1859 LLVMValueRef list = ctx->descriptor_sets[descriptor_set];
1860 struct radv_descriptor_set_layout *layout = ctx->options->layout->set[descriptor_set].layout;
1861 struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
1862 unsigned offset = binding->offset;
1863 unsigned stride = binding->size;
1864 unsigned type_size;
1865 LLVMBuilderRef builder = ctx->ac.builder;
1866 LLVMTypeRef type;
1867
1868 assert(base_index < layout->binding_count);
1869
1870 switch (desc_type) {
1871 case AC_DESC_IMAGE:
1872 type = ctx->ac.v8i32;
1873 type_size = 32;
1874 break;
1875 case AC_DESC_FMASK:
1876 type = ctx->ac.v8i32;
1877 offset += 32;
1878 type_size = 32;
1879 break;
1880 case AC_DESC_SAMPLER:
1881 type = ctx->ac.v4i32;
1882 if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1883 offset += 64;
1884
1885 type_size = 16;
1886 break;
1887 case AC_DESC_BUFFER:
1888 type = ctx->ac.v4i32;
1889 type_size = 16;
1890 break;
1891 default:
1892 unreachable("invalid desc_type\n");
1893 }
1894
1895 offset += constant_index * stride;
1896
1897 if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
1898 (!index || binding->immutable_samplers_equal)) {
1899 if (binding->immutable_samplers_equal)
1900 constant_index = 0;
1901
1902 const uint32_t *samplers = radv_immutable_samplers(layout, binding);
1903
1904 LLVMValueRef constants[] = {
1905 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
1906 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
1907 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
1908 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
1909 };
1910 return ac_build_gather_values(&ctx->ac, constants, 4);
1911 }
1912
1913 assert(stride % type_size == 0);
1914
1915 if (!index)
1916 index = ctx->ac.i32_0;
1917
1918 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
1919
1920 list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
1921 list = LLVMBuildPointerCast(builder, list,
1922 ac_array_in_const32_addr_space(type), "");
1923
1924 return ac_build_load_to_sgpr(&ctx->ac, list, index);
1925 }
1926
1927 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
1928 * so we may need to fix it up. */
1929 static LLVMValueRef
1930 adjust_vertex_fetch_alpha(struct radv_shader_context *ctx,
1931 unsigned adjustment,
1932 LLVMValueRef alpha)
1933 {
1934 if (adjustment == RADV_ALPHA_ADJUST_NONE)
1935 return alpha;
1936
1937 LLVMValueRef c30 = LLVMConstInt(ctx->ac.i32, 30, 0);
1938
1939 if (adjustment == RADV_ALPHA_ADJUST_SSCALED)
1940 alpha = LLVMBuildFPToUI(ctx->ac.builder, alpha, ctx->ac.i32, "");
1941 else
1942 alpha = ac_to_integer(&ctx->ac, alpha);
1943
1944 /* For the integer-like cases, do a natural sign extension.
1945 *
1946 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
1947 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
1948 * exponent.
1949 */
1950 alpha = LLVMBuildShl(ctx->ac.builder, alpha,
1951 adjustment == RADV_ALPHA_ADJUST_SNORM ?
1952 LLVMConstInt(ctx->ac.i32, 7, 0) : c30, "");
1953 alpha = LLVMBuildAShr(ctx->ac.builder, alpha, c30, "");
1954
1955 /* Convert back to the right type. */
1956 if (adjustment == RADV_ALPHA_ADJUST_SNORM) {
1957 LLVMValueRef clamp;
1958 LLVMValueRef neg_one = LLVMConstReal(ctx->ac.f32, -1.0);
1959 alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
1960 clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, alpha, neg_one, "");
1961 alpha = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, alpha, "");
1962 } else if (adjustment == RADV_ALPHA_ADJUST_SSCALED) {
1963 alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
1964 }
1965
1966 return alpha;
1967 }
1968
1969 static void
1970 handle_vs_input_decl(struct radv_shader_context *ctx,
1971 struct nir_variable *variable)
1972 {
1973 LLVMValueRef t_list_ptr = ctx->vertex_buffers;
1974 LLVMValueRef t_offset;
1975 LLVMValueRef t_list;
1976 LLVMValueRef input;
1977 LLVMValueRef buffer_index;
1978 unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
1979 uint8_t input_usage_mask =
1980 ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
1981 unsigned num_channels = util_last_bit(input_usage_mask);
1982
1983 variable->data.driver_location = variable->data.location * 4;
1984
1985 enum glsl_base_type type = glsl_get_base_type(variable->type);
1986 for (unsigned i = 0; i < attrib_count; ++i) {
1987 LLVMValueRef output[4];
1988 unsigned attrib_index = variable->data.location + i - VERT_ATTRIB_GENERIC0;
1989
1990 if (ctx->options->key.vs.instance_rate_inputs & (1u << attrib_index)) {
1991 uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[attrib_index];
1992
1993 if (divisor) {
1994 buffer_index = ctx->abi.instance_id;
1995
1996 if (divisor != 1) {
1997 buffer_index = LLVMBuildUDiv(ctx->ac.builder, buffer_index,
1998 LLVMConstInt(ctx->ac.i32, divisor, 0), "");
1999 }
2000
2001 if (ctx->options->key.vs.as_ls) {
2002 ctx->shader_info->vs.vgpr_comp_cnt =
2003 MAX2(2, ctx->shader_info->vs.vgpr_comp_cnt);
2004 } else {
2005 ctx->shader_info->vs.vgpr_comp_cnt =
2006 MAX2(1, ctx->shader_info->vs.vgpr_comp_cnt);
2007 }
2008 } else {
2009 buffer_index = ctx->ac.i32_0;
2010 }
2011
2012 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.start_instance, buffer_index, "");
2013 } else
2014 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.vertex_id,
2015 ctx->abi.base_vertex, "");
2016 t_offset = LLVMConstInt(ctx->ac.i32, attrib_index, false);
2017
2018 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
2019
2020 input = ac_build_buffer_load_format(&ctx->ac, t_list,
2021 buffer_index,
2022 ctx->ac.i32_0,
2023 num_channels, false, true);
2024
2025 input = ac_build_expand_to_vec4(&ctx->ac, input, num_channels);
2026
2027 for (unsigned chan = 0; chan < 4; chan++) {
2028 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
2029 output[chan] = LLVMBuildExtractElement(ctx->ac.builder, input, llvm_chan, "");
2030 if (type == GLSL_TYPE_FLOAT16) {
2031 output[chan] = LLVMBuildBitCast(ctx->ac.builder, output[chan], ctx->ac.f32, "");
2032 output[chan] = LLVMBuildFPTrunc(ctx->ac.builder, output[chan], ctx->ac.f16, "");
2033 }
2034 }
2035
2036 unsigned alpha_adjust = (ctx->options->key.vs.alpha_adjust >> (attrib_index * 2)) & 3;
2037 output[3] = adjust_vertex_fetch_alpha(ctx, alpha_adjust, output[3]);
2038
2039 for (unsigned chan = 0; chan < 4; chan++) {
2040 output[chan] = ac_to_integer(&ctx->ac, output[chan]);
2041 if (type == GLSL_TYPE_UINT16 || type == GLSL_TYPE_INT16)
2042 output[chan] = LLVMBuildTrunc(ctx->ac.builder, output[chan], ctx->ac.i16, "");
2043
2044 ctx->inputs[ac_llvm_reg_index_soa(variable->data.location + i, chan)] = output[chan];
2045 }
2046 }
2047 }
2048
2049 static void interp_fs_input(struct radv_shader_context *ctx,
2050 unsigned attr,
2051 LLVMValueRef interp_param,
2052 LLVMValueRef prim_mask,
2053 LLVMValueRef result[4])
2054 {
2055 LLVMValueRef attr_number;
2056 unsigned chan;
2057 LLVMValueRef i, j;
2058 bool interp = !LLVMIsUndef(interp_param);
2059
2060 attr_number = LLVMConstInt(ctx->ac.i32, attr, false);
2061
2062 /* fs.constant returns the param from the middle vertex, so it's not
2063 * really useful for flat shading. It's meant to be used for custom
2064 * interpolation (but the intrinsic can't fetch from the other two
2065 * vertices).
2066 *
2067 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
2068 * to do the right thing. The only reason we use fs.constant is that
2069 * fs.interp cannot be used on integers, because they can be equal
2070 * to NaN.
2071 */
2072 if (interp) {
2073 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
2074 ctx->ac.v2f32, "");
2075
2076 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
2077 ctx->ac.i32_0, "");
2078 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
2079 ctx->ac.i32_1, "");
2080 }
2081
2082 for (chan = 0; chan < 4; chan++) {
2083 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
2084
2085 if (interp) {
2086 result[chan] = ac_build_fs_interp(&ctx->ac,
2087 llvm_chan,
2088 attr_number,
2089 prim_mask, i, j);
2090 } else {
2091 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
2092 LLVMConstInt(ctx->ac.i32, 2, false),
2093 llvm_chan,
2094 attr_number,
2095 prim_mask);
2096 result[chan] = LLVMBuildBitCast(ctx->ac.builder, result[chan], ctx->ac.i32, "");
2097 result[chan] = LLVMBuildTruncOrBitCast(ctx->ac.builder, result[chan], LLVMTypeOf(interp_param), "");
2098 }
2099 }
2100 }
2101
2102 static void
2103 handle_fs_input_decl(struct radv_shader_context *ctx,
2104 struct nir_variable *variable)
2105 {
2106 int idx = variable->data.location;
2107 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
2108 LLVMValueRef interp = NULL;
2109
2110 variable->data.driver_location = idx * 4;
2111 ctx->input_mask |= ((1ull << attrib_count) - 1) << variable->data.location;
2112
2113 if (glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT) {
2114 unsigned interp_type;
2115 if (variable->data.sample)
2116 interp_type = INTERP_SAMPLE;
2117 else if (variable->data.centroid)
2118 interp_type = INTERP_CENTROID;
2119 else
2120 interp_type = INTERP_CENTER;
2121
2122 interp = lookup_interp_param(&ctx->abi, variable->data.interpolation, interp_type);
2123 }
2124 bool is_16bit = glsl_type_is_16bit(variable->type);
2125 LLVMTypeRef type = is_16bit ? ctx->ac.i16 : ctx->ac.i32;
2126 if (interp == NULL)
2127 interp = LLVMGetUndef(type);
2128
2129 for (unsigned i = 0; i < attrib_count; ++i)
2130 ctx->inputs[ac_llvm_reg_index_soa(idx + i, 0)] = interp;
2131
2132 }
2133
2134 static void
2135 handle_vs_inputs(struct radv_shader_context *ctx,
2136 struct nir_shader *nir) {
2137 nir_foreach_variable(variable, &nir->inputs)
2138 handle_vs_input_decl(ctx, variable);
2139 }
2140
2141 static void
2142 prepare_interp_optimize(struct radv_shader_context *ctx,
2143 struct nir_shader *nir)
2144 {
2145 bool uses_center = false;
2146 bool uses_centroid = false;
2147 nir_foreach_variable(variable, &nir->inputs) {
2148 if (glsl_get_base_type(glsl_without_array(variable->type)) != GLSL_TYPE_FLOAT ||
2149 variable->data.sample)
2150 continue;
2151
2152 if (variable->data.centroid)
2153 uses_centroid = true;
2154 else
2155 uses_center = true;
2156 }
2157
2158 if (uses_center && uses_centroid) {
2159 LLVMValueRef sel = LLVMBuildICmp(ctx->ac.builder, LLVMIntSLT, ctx->abi.prim_mask, ctx->ac.i32_0, "");
2160 ctx->persp_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->persp_center, ctx->persp_centroid, "");
2161 ctx->linear_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->linear_center, ctx->linear_centroid, "");
2162 }
2163 }
2164
2165 static void
2166 handle_fs_inputs(struct radv_shader_context *ctx,
2167 struct nir_shader *nir)
2168 {
2169 prepare_interp_optimize(ctx, nir);
2170
2171 nir_foreach_variable(variable, &nir->inputs)
2172 handle_fs_input_decl(ctx, variable);
2173
2174 unsigned index = 0;
2175
2176 if (ctx->shader_info->info.ps.uses_input_attachments ||
2177 ctx->shader_info->info.needs_multiview_view_index) {
2178 ctx->input_mask |= 1ull << VARYING_SLOT_LAYER;
2179 ctx->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)] = LLVMGetUndef(ctx->ac.i32);
2180 }
2181
2182 for (unsigned i = 0; i < RADEON_LLVM_MAX_INPUTS; ++i) {
2183 LLVMValueRef interp_param;
2184 LLVMValueRef *inputs = ctx->inputs +ac_llvm_reg_index_soa(i, 0);
2185
2186 if (!(ctx->input_mask & (1ull << i)))
2187 continue;
2188
2189 if (i >= VARYING_SLOT_VAR0 || i == VARYING_SLOT_PNTC ||
2190 i == VARYING_SLOT_PRIMITIVE_ID || i == VARYING_SLOT_LAYER) {
2191 interp_param = *inputs;
2192 interp_fs_input(ctx, index, interp_param, ctx->abi.prim_mask,
2193 inputs);
2194
2195 if (LLVMIsUndef(interp_param))
2196 ctx->shader_info->fs.flat_shaded_mask |= 1u << index;
2197 ++index;
2198 } else if (i == VARYING_SLOT_POS) {
2199 for(int i = 0; i < 3; ++i)
2200 inputs[i] = ctx->abi.frag_pos[i];
2201
2202 inputs[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
2203 ctx->abi.frag_pos[3]);
2204 }
2205 }
2206 ctx->shader_info->fs.num_interp = index;
2207 ctx->shader_info->fs.input_mask = ctx->input_mask >> VARYING_SLOT_VAR0;
2208
2209 if (ctx->shader_info->info.needs_multiview_view_index)
2210 ctx->abi.view_index = ctx->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
2211 }
2212
2213 static void
2214 scan_shader_output_decl(struct radv_shader_context *ctx,
2215 struct nir_variable *variable,
2216 struct nir_shader *shader,
2217 gl_shader_stage stage)
2218 {
2219 int idx = variable->data.location + variable->data.index;
2220 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
2221 uint64_t mask_attribs;
2222
2223 variable->data.driver_location = idx * 4;
2224
2225 /* tess ctrl has it's own load/store paths for outputs */
2226 if (stage == MESA_SHADER_TESS_CTRL)
2227 return;
2228
2229 mask_attribs = ((1ull << attrib_count) - 1) << idx;
2230 if (stage == MESA_SHADER_VERTEX ||
2231 stage == MESA_SHADER_TESS_EVAL ||
2232 stage == MESA_SHADER_GEOMETRY) {
2233 if (idx == VARYING_SLOT_CLIP_DIST0) {
2234 int length = shader->info.clip_distance_array_size +
2235 shader->info.cull_distance_array_size;
2236 if (stage == MESA_SHADER_VERTEX) {
2237 ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
2238 ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
2239 }
2240 if (stage == MESA_SHADER_TESS_EVAL) {
2241 ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
2242 ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
2243 }
2244
2245 if (length > 4)
2246 attrib_count = 2;
2247 else
2248 attrib_count = 1;
2249 mask_attribs = 1ull << idx;
2250 }
2251 }
2252
2253 ctx->output_mask |= mask_attribs;
2254 }
2255
2256
2257 /* Initialize arguments for the shader export intrinsic */
2258 static void
2259 si_llvm_init_export_args(struct radv_shader_context *ctx,
2260 LLVMValueRef *values,
2261 unsigned enabled_channels,
2262 unsigned target,
2263 struct ac_export_args *args)
2264 {
2265 /* Specify the channels that are enabled. */
2266 args->enabled_channels = enabled_channels;
2267
2268 /* Specify whether the EXEC mask represents the valid mask */
2269 args->valid_mask = 0;
2270
2271 /* Specify whether this is the last export */
2272 args->done = 0;
2273
2274 /* Specify the target we are exporting */
2275 args->target = target;
2276
2277 args->compr = false;
2278 args->out[0] = LLVMGetUndef(ctx->ac.f32);
2279 args->out[1] = LLVMGetUndef(ctx->ac.f32);
2280 args->out[2] = LLVMGetUndef(ctx->ac.f32);
2281 args->out[3] = LLVMGetUndef(ctx->ac.f32);
2282
2283 if (!values)
2284 return;
2285
2286 bool is_16bit = ac_get_type_size(LLVMTypeOf(values[0])) == 2;
2287 if (ctx->stage == MESA_SHADER_FRAGMENT && target >= V_008DFC_SQ_EXP_MRT) {
2288 unsigned index = target - V_008DFC_SQ_EXP_MRT;
2289 unsigned col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
2290 bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
2291 bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1;
2292 unsigned chan;
2293
2294 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
2295 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
2296 unsigned bits, bool hi) = NULL;
2297
2298 switch(col_format) {
2299 case V_028714_SPI_SHADER_ZERO:
2300 args->enabled_channels = 0; /* writemask */
2301 args->target = V_008DFC_SQ_EXP_NULL;
2302 break;
2303
2304 case V_028714_SPI_SHADER_32_R:
2305 args->enabled_channels = 1;
2306 args->out[0] = values[0];
2307 break;
2308
2309 case V_028714_SPI_SHADER_32_GR:
2310 args->enabled_channels = 0x3;
2311 args->out[0] = values[0];
2312 args->out[1] = values[1];
2313 break;
2314
2315 case V_028714_SPI_SHADER_32_AR:
2316 args->enabled_channels = 0x9;
2317 args->out[0] = values[0];
2318 args->out[3] = values[3];
2319 break;
2320
2321 case V_028714_SPI_SHADER_FP16_ABGR:
2322 args->enabled_channels = 0x5;
2323 packf = ac_build_cvt_pkrtz_f16;
2324 if (is_16bit) {
2325 for (unsigned chan = 0; chan < 4; chan++)
2326 values[chan] = LLVMBuildFPExt(ctx->ac.builder,
2327 values[chan],
2328 ctx->ac.f32, "");
2329 }
2330 break;
2331
2332 case V_028714_SPI_SHADER_UNORM16_ABGR:
2333 args->enabled_channels = 0x5;
2334 packf = ac_build_cvt_pknorm_u16;
2335 break;
2336
2337 case V_028714_SPI_SHADER_SNORM16_ABGR:
2338 args->enabled_channels = 0x5;
2339 packf = ac_build_cvt_pknorm_i16;
2340 break;
2341
2342 case V_028714_SPI_SHADER_UINT16_ABGR:
2343 args->enabled_channels = 0x5;
2344 packi = ac_build_cvt_pk_u16;
2345 if (is_16bit) {
2346 for (unsigned chan = 0; chan < 4; chan++)
2347 values[chan] = LLVMBuildZExt(ctx->ac.builder,
2348 values[chan],
2349 ctx->ac.i32, "");
2350 }
2351 break;
2352
2353 case V_028714_SPI_SHADER_SINT16_ABGR:
2354 args->enabled_channels = 0x5;
2355 packi = ac_build_cvt_pk_i16;
2356 if (is_16bit) {
2357 for (unsigned chan = 0; chan < 4; chan++)
2358 values[chan] = LLVMBuildSExt(ctx->ac.builder,
2359 values[chan],
2360 ctx->ac.i32, "");
2361 }
2362 break;
2363
2364 default:
2365 case V_028714_SPI_SHADER_32_ABGR:
2366 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2367 break;
2368 }
2369
2370 /* Pack f16 or norm_i16/u16. */
2371 if (packf) {
2372 for (chan = 0; chan < 2; chan++) {
2373 LLVMValueRef pack_args[2] = {
2374 values[2 * chan],
2375 values[2 * chan + 1]
2376 };
2377 LLVMValueRef packed;
2378
2379 packed = packf(&ctx->ac, pack_args);
2380 args->out[chan] = ac_to_float(&ctx->ac, packed);
2381 }
2382 args->compr = 1; /* COMPR flag */
2383 }
2384
2385 /* Pack i16/u16. */
2386 if (packi) {
2387 for (chan = 0; chan < 2; chan++) {
2388 LLVMValueRef pack_args[2] = {
2389 ac_to_integer(&ctx->ac, values[2 * chan]),
2390 ac_to_integer(&ctx->ac, values[2 * chan + 1])
2391 };
2392 LLVMValueRef packed;
2393
2394 packed = packi(&ctx->ac, pack_args,
2395 is_int8 ? 8 : is_int10 ? 10 : 16,
2396 chan == 1);
2397 args->out[chan] = ac_to_float(&ctx->ac, packed);
2398 }
2399 args->compr = 1; /* COMPR flag */
2400 }
2401 return;
2402 }
2403
2404 if (is_16bit) {
2405 for (unsigned chan = 0; chan < 4; chan++) {
2406 values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i16, "");
2407 args->out[chan] = LLVMBuildZExt(ctx->ac.builder, values[chan], ctx->ac.i32, "");
2408 }
2409 } else
2410 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2411
2412 for (unsigned i = 0; i < 4; ++i) {
2413 if (!(args->enabled_channels & (1 << i)))
2414 continue;
2415
2416 args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
2417 }
2418 }
2419
2420 static void
2421 radv_export_param(struct radv_shader_context *ctx, unsigned index,
2422 LLVMValueRef *values, unsigned enabled_channels)
2423 {
2424 struct ac_export_args args;
2425
2426 si_llvm_init_export_args(ctx, values, enabled_channels,
2427 V_008DFC_SQ_EXP_PARAM + index, &args);
2428 ac_build_export(&ctx->ac, &args);
2429 }
2430
2431 static LLVMValueRef
2432 radv_load_output(struct radv_shader_context *ctx, unsigned index, unsigned chan)
2433 {
2434 LLVMValueRef output =
2435 ctx->abi.outputs[ac_llvm_reg_index_soa(index, chan)];
2436
2437 return LLVMBuildLoad(ctx->ac.builder, output, "");
2438 }
2439
2440 static void
2441 handle_vs_outputs_post(struct radv_shader_context *ctx,
2442 bool export_prim_id, bool export_layer_id,
2443 struct radv_vs_output_info *outinfo)
2444 {
2445 uint32_t param_count = 0;
2446 unsigned target;
2447 unsigned pos_idx, num_pos_exports = 0;
2448 struct ac_export_args args, pos_args[4] = {};
2449 LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2450 int i;
2451
2452 if (ctx->options->key.has_multiview_view_index) {
2453 LLVMValueRef* tmp_out = &ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
2454 if(!*tmp_out) {
2455 for(unsigned i = 0; i < 4; ++i)
2456 ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, i)] =
2457 ac_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
2458 }
2459
2460 LLVMBuildStore(ctx->ac.builder, ac_to_float(&ctx->ac, ctx->abi.view_index), *tmp_out);
2461 ctx->output_mask |= 1ull << VARYING_SLOT_LAYER;
2462 }
2463
2464 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
2465 sizeof(outinfo->vs_output_param_offset));
2466
2467 if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
2468 LLVMValueRef slots[8];
2469 unsigned j;
2470
2471 if (outinfo->cull_dist_mask)
2472 outinfo->cull_dist_mask <<= ctx->num_output_clips;
2473
2474 i = VARYING_SLOT_CLIP_DIST0;
2475 for (j = 0; j < ctx->num_output_clips + ctx->num_output_culls; j++)
2476 slots[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2477
2478 for (i = ctx->num_output_clips + ctx->num_output_culls; i < 8; i++)
2479 slots[i] = LLVMGetUndef(ctx->ac.f32);
2480
2481 if (ctx->num_output_clips + ctx->num_output_culls > 4) {
2482 target = V_008DFC_SQ_EXP_POS + 3;
2483 si_llvm_init_export_args(ctx, &slots[4], 0xf, target, &args);
2484 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2485 &args, sizeof(args));
2486 }
2487
2488 target = V_008DFC_SQ_EXP_POS + 2;
2489 si_llvm_init_export_args(ctx, &slots[0], 0xf, target, &args);
2490 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2491 &args, sizeof(args));
2492
2493 }
2494
2495 LLVMValueRef pos_values[4] = {ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_1};
2496 if (ctx->output_mask & (1ull << VARYING_SLOT_POS)) {
2497 for (unsigned j = 0; j < 4; j++)
2498 pos_values[j] = radv_load_output(ctx, VARYING_SLOT_POS, j);
2499 }
2500 si_llvm_init_export_args(ctx, pos_values, 0xf, V_008DFC_SQ_EXP_POS, &pos_args[0]);
2501
2502 if (ctx->output_mask & (1ull << VARYING_SLOT_PSIZ)) {
2503 outinfo->writes_pointsize = true;
2504 psize_value = radv_load_output(ctx, VARYING_SLOT_PSIZ, 0);
2505 }
2506
2507 if (ctx->output_mask & (1ull << VARYING_SLOT_LAYER)) {
2508 outinfo->writes_layer = true;
2509 layer_value = radv_load_output(ctx, VARYING_SLOT_LAYER, 0);
2510 }
2511
2512 if (ctx->output_mask & (1ull << VARYING_SLOT_VIEWPORT)) {
2513 outinfo->writes_viewport_index = true;
2514 viewport_index_value = radv_load_output(ctx, VARYING_SLOT_VIEWPORT, 0);
2515 }
2516
2517 if (outinfo->writes_pointsize ||
2518 outinfo->writes_layer ||
2519 outinfo->writes_viewport_index) {
2520 pos_args[1].enabled_channels = ((outinfo->writes_pointsize == true ? 1 : 0) |
2521 (outinfo->writes_layer == true ? 4 : 0));
2522 pos_args[1].valid_mask = 0;
2523 pos_args[1].done = 0;
2524 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2525 pos_args[1].compr = 0;
2526 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2527 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2528 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2529 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2530
2531 if (outinfo->writes_pointsize == true)
2532 pos_args[1].out[0] = psize_value;
2533 if (outinfo->writes_layer == true)
2534 pos_args[1].out[2] = layer_value;
2535 if (outinfo->writes_viewport_index == true) {
2536 if (ctx->options->chip_class >= GFX9) {
2537 /* GFX9 has the layer in out.z[10:0] and the viewport
2538 * index in out.z[19:16].
2539 */
2540 LLVMValueRef v = viewport_index_value;
2541 v = ac_to_integer(&ctx->ac, v);
2542 v = LLVMBuildShl(ctx->ac.builder, v,
2543 LLVMConstInt(ctx->ac.i32, 16, false),
2544 "");
2545 v = LLVMBuildOr(ctx->ac.builder, v,
2546 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2547
2548 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2549 pos_args[1].enabled_channels |= 1 << 2;
2550 } else {
2551 pos_args[1].out[3] = viewport_index_value;
2552 pos_args[1].enabled_channels |= 1 << 3;
2553 }
2554 }
2555 }
2556 for (i = 0; i < 4; i++) {
2557 if (pos_args[i].out[0])
2558 num_pos_exports++;
2559 }
2560
2561 pos_idx = 0;
2562 for (i = 0; i < 4; i++) {
2563 if (!pos_args[i].out[0])
2564 continue;
2565
2566 /* Specify the target we are exporting */
2567 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2568 if (pos_idx == num_pos_exports)
2569 pos_args[i].done = 1;
2570 ac_build_export(&ctx->ac, &pos_args[i]);
2571 }
2572
2573 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2574 LLVMValueRef values[4];
2575 if (!(ctx->output_mask & (1ull << i)))
2576 continue;
2577
2578 if (i != VARYING_SLOT_LAYER &&
2579 i != VARYING_SLOT_PRIMITIVE_ID &&
2580 i < VARYING_SLOT_VAR0)
2581 continue;
2582
2583 for (unsigned j = 0; j < 4; j++)
2584 values[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2585
2586 unsigned output_usage_mask;
2587
2588 if (ctx->stage == MESA_SHADER_VERTEX &&
2589 !ctx->is_gs_copy_shader) {
2590 output_usage_mask =
2591 ctx->shader_info->info.vs.output_usage_mask[i];
2592 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
2593 output_usage_mask =
2594 ctx->shader_info->info.tes.output_usage_mask[i];
2595 } else {
2596 assert(ctx->is_gs_copy_shader);
2597 output_usage_mask =
2598 ctx->shader_info->info.gs.output_usage_mask[i];
2599 }
2600
2601 radv_export_param(ctx, param_count, values, output_usage_mask);
2602
2603 outinfo->vs_output_param_offset[i] = param_count++;
2604 }
2605
2606 if (export_prim_id) {
2607 LLVMValueRef values[4];
2608
2609 values[0] = ctx->vs_prim_id;
2610 ctx->shader_info->vs.vgpr_comp_cnt = MAX2(2,
2611 ctx->shader_info->vs.vgpr_comp_cnt);
2612 for (unsigned j = 1; j < 4; j++)
2613 values[j] = ctx->ac.f32_0;
2614
2615 radv_export_param(ctx, param_count, values, 0x1);
2616
2617 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count++;
2618 outinfo->export_prim_id = true;
2619 }
2620
2621 if (export_layer_id && layer_value) {
2622 LLVMValueRef values[4];
2623
2624 values[0] = layer_value;
2625 for (unsigned j = 1; j < 4; j++)
2626 values[j] = ctx->ac.f32_0;
2627
2628 radv_export_param(ctx, param_count, values, 0x1);
2629
2630 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] = param_count++;
2631 }
2632
2633 outinfo->pos_exports = num_pos_exports;
2634 outinfo->param_exports = param_count;
2635 }
2636
2637 static void
2638 handle_es_outputs_post(struct radv_shader_context *ctx,
2639 struct radv_es_output_info *outinfo)
2640 {
2641 int j;
2642 uint64_t max_output_written = 0;
2643 LLVMValueRef lds_base = NULL;
2644
2645 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2646 int param_index;
2647 int length = 4;
2648
2649 if (!(ctx->output_mask & (1ull << i)))
2650 continue;
2651
2652 if (i == VARYING_SLOT_CLIP_DIST0)
2653 length = ctx->num_output_clips + ctx->num_output_culls;
2654
2655 param_index = shader_io_get_unique_index(i);
2656
2657 max_output_written = MAX2(param_index + (length > 4), max_output_written);
2658 }
2659
2660 outinfo->esgs_itemsize = (max_output_written + 1) * 16;
2661
2662 if (ctx->ac.chip_class >= GFX9) {
2663 unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
2664 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2665 LLVMValueRef wave_idx = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2666 LLVMConstInt(ctx->ac.i32, 24, false),
2667 LLVMConstInt(ctx->ac.i32, 4, false), false);
2668 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2669 LLVMBuildMul(ctx->ac.builder, wave_idx,
2670 LLVMConstInt(ctx->ac.i32, 64, false), ""), "");
2671 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2672 LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
2673 }
2674
2675 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2676 LLVMValueRef dw_addr = NULL;
2677 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2678 unsigned output_usage_mask;
2679 int param_index;
2680 int length = 4;
2681
2682 if (!(ctx->output_mask & (1ull << i)))
2683 continue;
2684
2685 if (ctx->stage == MESA_SHADER_VERTEX) {
2686 output_usage_mask =
2687 ctx->shader_info->info.vs.output_usage_mask[i];
2688 } else {
2689 assert(ctx->stage == MESA_SHADER_TESS_EVAL);
2690 output_usage_mask =
2691 ctx->shader_info->info.tes.output_usage_mask[i];
2692 }
2693
2694 if (i == VARYING_SLOT_CLIP_DIST0) {
2695 length = ctx->num_output_clips + ctx->num_output_culls;
2696 output_usage_mask = (1 << length) - 1;
2697 }
2698
2699 param_index = shader_io_get_unique_index(i);
2700
2701 if (lds_base) {
2702 dw_addr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2703 LLVMConstInt(ctx->ac.i32, param_index * 4, false),
2704 "");
2705 }
2706
2707 for (j = 0; j < length; j++) {
2708 if (!(output_usage_mask & (1 << j)))
2709 continue;
2710
2711 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2712 out_val = ac_to_integer(&ctx->ac, out_val);
2713 out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
2714
2715 if (ctx->ac.chip_class >= GFX9) {
2716 LLVMValueRef dw_addr_offset =
2717 LLVMBuildAdd(ctx->ac.builder, dw_addr,
2718 LLVMConstInt(ctx->ac.i32,
2719 j, false), "");
2720
2721 ac_lds_store(&ctx->ac, dw_addr_offset, out_val);
2722 } else {
2723 ac_build_buffer_store_dword(&ctx->ac,
2724 ctx->esgs_ring,
2725 out_val, 1,
2726 NULL, ctx->es2gs_offset,
2727 (4 * param_index + j) * 4,
2728 1, 1, true, true);
2729 }
2730 }
2731 }
2732 }
2733
2734 static void
2735 handle_ls_outputs_post(struct radv_shader_context *ctx)
2736 {
2737 LLVMValueRef vertex_id = ctx->rel_auto_id;
2738 uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->info.vs.ls_outputs_written);
2739 LLVMValueRef vertex_dw_stride = LLVMConstInt(ctx->ac.i32, num_tcs_inputs * 4, false);
2740 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2741 vertex_dw_stride, "");
2742
2743 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2744 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2745 int length = 4;
2746
2747 if (!(ctx->output_mask & (1ull << i)))
2748 continue;
2749
2750 if (i == VARYING_SLOT_CLIP_DIST0)
2751 length = ctx->num_output_clips + ctx->num_output_culls;
2752 int param = shader_io_get_unique_index(i);
2753 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2754 LLVMConstInt(ctx->ac.i32, param * 4, false),
2755 "");
2756 for (unsigned j = 0; j < length; j++) {
2757 LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2758 value = ac_to_integer(&ctx->ac, value);
2759 value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
2760 ac_lds_store(&ctx->ac, dw_addr, value);
2761 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2762 }
2763 }
2764 }
2765
2766 static void
2767 write_tess_factors(struct radv_shader_context *ctx)
2768 {
2769 unsigned stride, outer_comps, inner_comps;
2770 struct ac_build_if_state if_ctx, inner_if_ctx;
2771 LLVMValueRef invocation_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 8, 5);
2772 LLVMValueRef rel_patch_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
2773 unsigned tess_inner_index = 0, tess_outer_index;
2774 LLVMValueRef lds_base, lds_inner = NULL, lds_outer, byteoffset, buffer;
2775 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2776 int i;
2777 ac_emit_barrier(&ctx->ac, ctx->stage);
2778
2779 switch (ctx->options->key.tcs.primitive_mode) {
2780 case GL_ISOLINES:
2781 stride = 2;
2782 outer_comps = 2;
2783 inner_comps = 0;
2784 break;
2785 case GL_TRIANGLES:
2786 stride = 4;
2787 outer_comps = 3;
2788 inner_comps = 1;
2789 break;
2790 case GL_QUADS:
2791 stride = 6;
2792 outer_comps = 4;
2793 inner_comps = 2;
2794 break;
2795 default:
2796 return;
2797 }
2798
2799 ac_nir_build_if(&if_ctx, ctx,
2800 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2801 invocation_id, ctx->ac.i32_0, ""));
2802
2803 lds_base = get_tcs_out_current_patch_data_offset(ctx);
2804
2805 if (inner_comps) {
2806 tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2807 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2808 LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), "");
2809 }
2810
2811 tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2812 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2813 LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), "");
2814
2815 for (i = 0; i < 4; i++) {
2816 inner[i] = LLVMGetUndef(ctx->ac.i32);
2817 outer[i] = LLVMGetUndef(ctx->ac.i32);
2818 }
2819
2820 // LINES reversal
2821 if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
2822 outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
2823 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2824 ctx->ac.i32_1, "");
2825 outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
2826 } else {
2827 for (i = 0; i < outer_comps; i++) {
2828 outer[i] = out[i] =
2829 ac_lds_load(&ctx->ac, lds_outer);
2830 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2831 ctx->ac.i32_1, "");
2832 }
2833 for (i = 0; i < inner_comps; i++) {
2834 inner[i] = out[outer_comps+i] =
2835 ac_lds_load(&ctx->ac, lds_inner);
2836 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_inner,
2837 ctx->ac.i32_1, "");
2838 }
2839 }
2840
2841 /* Convert the outputs to vectors for stores. */
2842 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2843 vec1 = NULL;
2844
2845 if (stride > 4)
2846 vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
2847
2848
2849 buffer = ctx->hs_ring_tess_factor;
2850 tf_base = ctx->tess_factor_offset;
2851 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2852 LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
2853 unsigned tf_offset = 0;
2854
2855 if (ctx->options->chip_class <= VI) {
2856 ac_nir_build_if(&inner_if_ctx, ctx,
2857 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2858 rel_patch_id, ctx->ac.i32_0, ""));
2859
2860 /* Store the dynamic HS control word. */
2861 ac_build_buffer_store_dword(&ctx->ac, buffer,
2862 LLVMConstInt(ctx->ac.i32, 0x80000000, false),
2863 1, ctx->ac.i32_0, tf_base,
2864 0, 1, 0, true, false);
2865 tf_offset += 4;
2866
2867 ac_nir_build_endif(&inner_if_ctx);
2868 }
2869
2870 /* Store the tessellation factors. */
2871 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2872 MIN2(stride, 4), byteoffset, tf_base,
2873 tf_offset, 1, 0, true, false);
2874 if (vec1)
2875 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2876 stride - 4, byteoffset, tf_base,
2877 16 + tf_offset, 1, 0, true, false);
2878
2879 //store to offchip for TES to read - only if TES reads them
2880 if (ctx->options->key.tcs.tes_reads_tess_factors) {
2881 LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
2882 LLVMValueRef tf_inner_offset;
2883 unsigned param_outer, param_inner;
2884
2885 param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2886 tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
2887 LLVMConstInt(ctx->ac.i32, param_outer, 0));
2888
2889 outer_vec = ac_build_gather_values(&ctx->ac, outer,
2890 util_next_power_of_two(outer_comps));
2891
2892 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
2893 outer_comps, tf_outer_offset,
2894 ctx->oc_lds, 0, 1, 0, true, false);
2895 if (inner_comps) {
2896 param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2897 tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
2898 LLVMConstInt(ctx->ac.i32, param_inner, 0));
2899
2900 inner_vec = inner_comps == 1 ? inner[0] :
2901 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2902 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
2903 inner_comps, tf_inner_offset,
2904 ctx->oc_lds, 0, 1, 0, true, false);
2905 }
2906 }
2907 ac_nir_build_endif(&if_ctx);
2908 }
2909
2910 static void
2911 handle_tcs_outputs_post(struct radv_shader_context *ctx)
2912 {
2913 write_tess_factors(ctx);
2914 }
2915
2916 static bool
2917 si_export_mrt_color(struct radv_shader_context *ctx,
2918 LLVMValueRef *color, unsigned index,
2919 struct ac_export_args *args)
2920 {
2921 /* Export */
2922 si_llvm_init_export_args(ctx, color, 0xf,
2923 V_008DFC_SQ_EXP_MRT + index, args);
2924 if (!args->enabled_channels)
2925 return false; /* unnecessary NULL export */
2926
2927 return true;
2928 }
2929
2930 static void
2931 radv_export_mrt_z(struct radv_shader_context *ctx,
2932 LLVMValueRef depth, LLVMValueRef stencil,
2933 LLVMValueRef samplemask)
2934 {
2935 struct ac_export_args args;
2936
2937 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
2938
2939 ac_build_export(&ctx->ac, &args);
2940 }
2941
2942 static void
2943 handle_fs_outputs_post(struct radv_shader_context *ctx)
2944 {
2945 unsigned index = 0;
2946 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2947 struct ac_export_args color_args[8];
2948
2949 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2950 LLVMValueRef values[4];
2951
2952 if (!(ctx->output_mask & (1ull << i)))
2953 continue;
2954
2955 if (i < FRAG_RESULT_DATA0)
2956 continue;
2957
2958 for (unsigned j = 0; j < 4; j++)
2959 values[j] = ac_to_float(&ctx->ac,
2960 radv_load_output(ctx, i, j));
2961
2962 bool ret = si_export_mrt_color(ctx, values,
2963 i - FRAG_RESULT_DATA0,
2964 &color_args[index]);
2965 if (ret)
2966 index++;
2967 }
2968
2969 /* Process depth, stencil, samplemask. */
2970 if (ctx->shader_info->info.ps.writes_z) {
2971 depth = ac_to_float(&ctx->ac,
2972 radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
2973 }
2974 if (ctx->shader_info->info.ps.writes_stencil) {
2975 stencil = ac_to_float(&ctx->ac,
2976 radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
2977 }
2978 if (ctx->shader_info->info.ps.writes_sample_mask) {
2979 samplemask = ac_to_float(&ctx->ac,
2980 radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
2981 }
2982
2983 /* Set the DONE bit on last non-null color export only if Z isn't
2984 * exported.
2985 */
2986 if (index > 0 &&
2987 !ctx->shader_info->info.ps.writes_z &&
2988 !ctx->shader_info->info.ps.writes_stencil &&
2989 !ctx->shader_info->info.ps.writes_sample_mask) {
2990 unsigned last = index - 1;
2991
2992 color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
2993 color_args[last].done = 1; /* DONE bit */
2994 }
2995
2996 /* Export PS outputs. */
2997 for (unsigned i = 0; i < index; i++)
2998 ac_build_export(&ctx->ac, &color_args[i]);
2999
3000 if (depth || stencil || samplemask)
3001 radv_export_mrt_z(ctx, depth, stencil, samplemask);
3002 else if (!index)
3003 ac_build_export_null(&ctx->ac);
3004 }
3005
3006 static void
3007 emit_gs_epilogue(struct radv_shader_context *ctx)
3008 {
3009 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
3010 }
3011
3012 static void
3013 handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
3014 LLVMValueRef *addrs)
3015 {
3016 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
3017
3018 switch (ctx->stage) {
3019 case MESA_SHADER_VERTEX:
3020 if (ctx->options->key.vs.as_ls)
3021 handle_ls_outputs_post(ctx);
3022 else if (ctx->options->key.vs.as_es)
3023 handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
3024 else
3025 handle_vs_outputs_post(ctx, ctx->options->key.vs.export_prim_id,
3026 ctx->options->key.vs.export_layer_id,
3027 &ctx->shader_info->vs.outinfo);
3028 break;
3029 case MESA_SHADER_FRAGMENT:
3030 handle_fs_outputs_post(ctx);
3031 break;
3032 case MESA_SHADER_GEOMETRY:
3033 emit_gs_epilogue(ctx);
3034 break;
3035 case MESA_SHADER_TESS_CTRL:
3036 handle_tcs_outputs_post(ctx);
3037 break;
3038 case MESA_SHADER_TESS_EVAL:
3039 if (ctx->options->key.tes.as_es)
3040 handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
3041 else
3042 handle_vs_outputs_post(ctx, ctx->options->key.tes.export_prim_id,
3043 ctx->options->key.tes.export_layer_id,
3044 &ctx->shader_info->tes.outinfo);
3045 break;
3046 default:
3047 break;
3048 }
3049 }
3050
3051 static void ac_llvm_finalize_module(struct radv_shader_context *ctx,
3052 LLVMPassManagerRef passmgr,
3053 const struct radv_nir_compiler_options *options)
3054 {
3055 LLVMRunPassManager(passmgr, ctx->ac.module);
3056 LLVMDisposeBuilder(ctx->ac.builder);
3057
3058 ac_llvm_context_dispose(&ctx->ac);
3059 }
3060
3061 static void
3062 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
3063 {
3064 struct radv_vs_output_info *outinfo;
3065
3066 switch (ctx->stage) {
3067 case MESA_SHADER_FRAGMENT:
3068 case MESA_SHADER_COMPUTE:
3069 case MESA_SHADER_TESS_CTRL:
3070 case MESA_SHADER_GEOMETRY:
3071 return;
3072 case MESA_SHADER_VERTEX:
3073 if (ctx->options->key.vs.as_ls ||
3074 ctx->options->key.vs.as_es)
3075 return;
3076 outinfo = &ctx->shader_info->vs.outinfo;
3077 break;
3078 case MESA_SHADER_TESS_EVAL:
3079 if (ctx->options->key.vs.as_es)
3080 return;
3081 outinfo = &ctx->shader_info->tes.outinfo;
3082 break;
3083 default:
3084 unreachable("Unhandled shader type");
3085 }
3086
3087 ac_optimize_vs_outputs(&ctx->ac,
3088 ctx->main_function,
3089 outinfo->vs_output_param_offset,
3090 VARYING_SLOT_MAX,
3091 &outinfo->param_exports);
3092 }
3093
3094 static void
3095 ac_setup_rings(struct radv_shader_context *ctx)
3096 {
3097 if (ctx->options->chip_class <= VI &&
3098 (ctx->stage == MESA_SHADER_GEOMETRY ||
3099 ctx->options->key.vs.as_es || ctx->options->key.tes.as_es)) {
3100 unsigned ring = ctx->stage == MESA_SHADER_GEOMETRY ? RING_ESGS_GS
3101 : RING_ESGS_VS;
3102 LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, ring, false);
3103
3104 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac,
3105 ctx->ring_offsets,
3106 offset);
3107 }
3108
3109 if (ctx->is_gs_copy_shader) {
3110 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
3111 }
3112 if (ctx->stage == MESA_SHADER_GEOMETRY) {
3113 LLVMValueRef tmp;
3114 uint32_t num_entries = 64;
3115 LLVMValueRef gsvs_ring_stride = LLVMConstInt(ctx->ac.i32, ctx->max_gsvs_emit_size, false);
3116 LLVMValueRef gsvs_ring_desc = LLVMConstInt(ctx->ac.i32, ctx->max_gsvs_emit_size << 16, false);
3117 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
3118
3119 ctx->gsvs_ring = LLVMBuildBitCast(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.v4i32, "");
3120
3121 tmp = LLVMConstInt(ctx->ac.i32, num_entries, false);
3122 if (ctx->options->chip_class >= VI)
3123 tmp = LLVMBuildMul(ctx->ac.builder, gsvs_ring_stride, tmp, "");
3124 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
3125 tmp = LLVMBuildExtractElement(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
3126 tmp = LLVMBuildOr(ctx->ac.builder, tmp, gsvs_ring_desc, "");
3127 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
3128 }
3129
3130 if (ctx->stage == MESA_SHADER_TESS_CTRL ||
3131 ctx->stage == MESA_SHADER_TESS_EVAL) {
3132 ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
3133 ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
3134 }
3135 }
3136
3137 static unsigned
3138 ac_nir_get_max_workgroup_size(enum chip_class chip_class,
3139 const struct nir_shader *nir)
3140 {
3141 switch (nir->info.stage) {
3142 case MESA_SHADER_TESS_CTRL:
3143 return chip_class >= CIK ? 128 : 64;
3144 case MESA_SHADER_GEOMETRY:
3145 return chip_class >= GFX9 ? 128 : 64;
3146 case MESA_SHADER_COMPUTE:
3147 break;
3148 default:
3149 return 0;
3150 }
3151
3152 unsigned max_workgroup_size = nir->info.cs.local_size[0] *
3153 nir->info.cs.local_size[1] *
3154 nir->info.cs.local_size[2];
3155 return max_workgroup_size;
3156 }
3157
3158 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
3159 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context *ctx)
3160 {
3161 LLVMValueRef count = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
3162 LLVMConstInt(ctx->ac.i32, 8, false),
3163 LLVMConstInt(ctx->ac.i32, 8, false), false);
3164 LLVMValueRef hs_empty = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, count,
3165 ctx->ac.i32_0, "");
3166 ctx->abi.instance_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->rel_auto_id, ctx->abi.instance_id, "");
3167 ctx->rel_auto_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_rel_ids, ctx->rel_auto_id, "");
3168 ctx->abi.vertex_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_patch_id, ctx->abi.vertex_id, "");
3169 }
3170
3171 static void prepare_gs_input_vgprs(struct radv_shader_context *ctx)
3172 {
3173 for(int i = 5; i >= 0; --i) {
3174 ctx->gs_vtx_offset[i] = ac_build_bfe(&ctx->ac, ctx->gs_vtx_offset[i & ~1],
3175 LLVMConstInt(ctx->ac.i32, (i & 1) * 16, false),
3176 LLVMConstInt(ctx->ac.i32, 16, false), false);
3177 }
3178
3179 ctx->gs_wave_id = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
3180 LLVMConstInt(ctx->ac.i32, 16, false),
3181 LLVMConstInt(ctx->ac.i32, 8, false), false);
3182 }
3183
3184
3185 static
3186 LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
3187 struct nir_shader *const *shaders,
3188 int shader_count,
3189 struct radv_shader_variant_info *shader_info,
3190 const struct radv_nir_compiler_options *options)
3191 {
3192 struct radv_shader_context ctx = {0};
3193 unsigned i;
3194 ctx.options = options;
3195 ctx.shader_info = shader_info;
3196
3197 ac_llvm_context_init(&ctx.ac, options->chip_class, options->family);
3198 ctx.context = ctx.ac.context;
3199 ctx.ac.module = ac_create_module(ac_llvm->tm, ctx.context);
3200
3201 enum ac_float_mode float_mode =
3202 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3203 AC_FLOAT_MODE_DEFAULT;
3204
3205 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3206
3207 memset(shader_info, 0, sizeof(*shader_info));
3208
3209 for(int i = 0; i < shader_count; ++i)
3210 radv_nir_shader_info_pass(shaders[i], options, &shader_info->info);
3211
3212 for (i = 0; i < RADV_UD_MAX_SETS; i++)
3213 shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
3214 for (i = 0; i < AC_UD_MAX_UD; i++)
3215 shader_info->user_sgprs_locs.shader_data[i].sgpr_idx = -1;
3216
3217 ctx.max_workgroup_size = 0;
3218 for (int i = 0; i < shader_count; ++i) {
3219 ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
3220 ac_nir_get_max_workgroup_size(ctx.options->chip_class,
3221 shaders[i]));
3222 }
3223
3224 create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
3225 shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
3226
3227 ctx.abi.inputs = &ctx.inputs[0];
3228 ctx.abi.emit_outputs = handle_shader_outputs_post;
3229 ctx.abi.emit_vertex = visit_emit_vertex;
3230 ctx.abi.load_ubo = radv_load_ubo;
3231 ctx.abi.load_ssbo = radv_load_ssbo;
3232 ctx.abi.load_sampler_desc = radv_get_sampler_desc;
3233 ctx.abi.load_resource = radv_load_resource;
3234 ctx.abi.clamp_shadow_reference = false;
3235 ctx.abi.gfx9_stride_size_workaround = ctx.ac.chip_class == GFX9;
3236
3237 if (shader_count >= 2)
3238 ac_init_exec_full_mask(&ctx.ac);
3239
3240 if (ctx.ac.chip_class == GFX9 &&
3241 shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
3242 ac_nir_fixup_ls_hs_input_vgprs(&ctx);
3243
3244 for(int i = 0; i < shader_count; ++i) {
3245 ctx.stage = shaders[i]->info.stage;
3246 ctx.output_mask = 0;
3247 ctx.num_output_clips = shaders[i]->info.clip_distance_array_size;
3248 ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
3249
3250 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3251 ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
3252 ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
3253 ctx.abi.load_inputs = load_gs_input;
3254 ctx.abi.emit_primitive = visit_end_primitive;
3255 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3256 ctx.tcs_outputs_read = shaders[i]->info.outputs_read;
3257 ctx.tcs_patch_outputs_read = shaders[i]->info.patch_outputs_read;
3258 ctx.abi.load_tess_varyings = load_tcs_varyings;
3259 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3260 ctx.abi.store_tcs_outputs = store_tcs_output;
3261 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3262 if (shader_count == 1)
3263 ctx.tcs_num_inputs = ctx.options->key.tcs.num_inputs;
3264 else
3265 ctx.tcs_num_inputs = util_last_bit64(shader_info->info.vs.ls_outputs_written);
3266 ctx.tcs_num_patches = get_tcs_num_patches(&ctx);
3267 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
3268 ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
3269 ctx.abi.load_tess_varyings = load_tes_input;
3270 ctx.abi.load_tess_coord = load_tess_coord;
3271 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3272 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3273 ctx.tcs_num_patches = ctx.options->key.tes.num_patches;
3274 } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
3275 if (shader_info->info.vs.needs_instance_id) {
3276 if (ctx.options->key.vs.as_ls) {
3277 ctx.shader_info->vs.vgpr_comp_cnt =
3278 MAX2(2, ctx.shader_info->vs.vgpr_comp_cnt);
3279 } else {
3280 ctx.shader_info->vs.vgpr_comp_cnt =
3281 MAX2(1, ctx.shader_info->vs.vgpr_comp_cnt);
3282 }
3283 }
3284 ctx.abi.load_base_vertex = radv_load_base_vertex;
3285 } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
3286 shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
3287 ctx.abi.lookup_interp_param = lookup_interp_param;
3288 ctx.abi.load_sample_position = load_sample_position;
3289 ctx.abi.load_sample_mask_in = load_sample_mask_in;
3290 ctx.abi.emit_kill = radv_emit_kill;
3291 }
3292
3293 if (i)
3294 ac_emit_barrier(&ctx.ac, ctx.stage);
3295
3296 nir_foreach_variable(variable, &shaders[i]->outputs)
3297 scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
3298
3299 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3300 unsigned addclip = shaders[i]->info.clip_distance_array_size +
3301 shaders[i]->info.cull_distance_array_size > 4;
3302 ctx.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
3303 ctx.max_gsvs_emit_size = ctx.gsvs_vertex_size *
3304 shaders[i]->info.gs.vertices_out;
3305 }
3306
3307 ac_setup_rings(&ctx);
3308
3309 LLVMBasicBlockRef merge_block;
3310 if (shader_count >= 2) {
3311 LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
3312 LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3313 merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3314
3315 LLVMValueRef count = ac_build_bfe(&ctx.ac, ctx.merged_wave_info,
3316 LLVMConstInt(ctx.ac.i32, 8 * i, false),
3317 LLVMConstInt(ctx.ac.i32, 8, false), false);
3318 LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac);
3319 LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT,
3320 thread_id, count, "");
3321 LLVMBuildCondBr(ctx.ac.builder, cond, then_block, merge_block);
3322
3323 LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
3324 }
3325
3326 if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
3327 handle_fs_inputs(&ctx, shaders[i]);
3328 else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
3329 handle_vs_inputs(&ctx, shaders[i]);
3330 else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
3331 prepare_gs_input_vgprs(&ctx);
3332
3333 ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i]);
3334
3335 if (shader_count >= 2) {
3336 LLVMBuildBr(ctx.ac.builder, merge_block);
3337 LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
3338 }
3339
3340 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3341 shader_info->gs.gsvs_vertex_size = ctx.gsvs_vertex_size;
3342 shader_info->gs.max_gsvs_emit_size = ctx.max_gsvs_emit_size;
3343 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3344 shader_info->tcs.num_patches = ctx.tcs_num_patches;
3345 shader_info->tcs.lds_size = calculate_tess_lds_size(&ctx);
3346 }
3347 }
3348
3349 LLVMBuildRetVoid(ctx.ac.builder);
3350
3351 if (options->dump_preoptir)
3352 ac_dump_module(ctx.ac.module);
3353
3354 ac_llvm_finalize_module(&ctx, ac_llvm->passmgr, options);
3355
3356 if (shader_count == 1)
3357 ac_nir_eliminate_const_vs_outputs(&ctx);
3358
3359 if (options->dump_shader) {
3360 ctx.shader_info->private_mem_vgprs =
3361 ac_count_scratch_private_memory(ctx.main_function);
3362 }
3363
3364 return ctx.ac.module;
3365 }
3366
3367 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
3368 {
3369 unsigned *retval = (unsigned *)context;
3370 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
3371 char *description = LLVMGetDiagInfoDescription(di);
3372
3373 if (severity == LLVMDSError) {
3374 *retval = 1;
3375 fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n",
3376 description);
3377 }
3378
3379 LLVMDisposeMessage(description);
3380 }
3381
3382 static unsigned ac_llvm_compile(LLVMModuleRef M,
3383 struct ac_shader_binary *binary,
3384 struct ac_llvm_compiler *ac_llvm)
3385 {
3386 unsigned retval = 0;
3387 LLVMContextRef llvm_ctx;
3388
3389 /* Setup Diagnostic Handler*/
3390 llvm_ctx = LLVMGetModuleContext(M);
3391
3392 LLVMContextSetDiagnosticHandler(llvm_ctx, ac_diagnostic_handler,
3393 &retval);
3394
3395 /* Compile IR*/
3396 if (!radv_compile_to_binary(ac_llvm, M, binary))
3397 retval = 1;
3398 return retval;
3399 }
3400
3401 static void ac_compile_llvm_module(struct ac_llvm_compiler *ac_llvm,
3402 LLVMModuleRef llvm_module,
3403 struct ac_shader_binary *binary,
3404 struct ac_shader_config *config,
3405 struct radv_shader_variant_info *shader_info,
3406 gl_shader_stage stage,
3407 const struct radv_nir_compiler_options *options)
3408 {
3409 if (options->dump_shader)
3410 ac_dump_module(llvm_module);
3411
3412 memset(binary, 0, sizeof(*binary));
3413
3414 if (options->record_llvm_ir) {
3415 char *llvm_ir = LLVMPrintModuleToString(llvm_module);
3416 binary->llvm_ir_string = strdup(llvm_ir);
3417 LLVMDisposeMessage(llvm_ir);
3418 }
3419
3420 int v = ac_llvm_compile(llvm_module, binary, ac_llvm);
3421 if (v) {
3422 fprintf(stderr, "compile failed\n");
3423 }
3424
3425 if (options->dump_shader)
3426 fprintf(stderr, "disasm:\n%s\n", binary->disasm_string);
3427
3428 ac_shader_binary_read_config(binary, config, 0, options->supports_spill);
3429
3430 LLVMContextRef ctx = LLVMGetModuleContext(llvm_module);
3431 LLVMDisposeModule(llvm_module);
3432 LLVMContextDispose(ctx);
3433
3434 if (stage == MESA_SHADER_FRAGMENT) {
3435 shader_info->num_input_vgprs = 0;
3436 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
3437 shader_info->num_input_vgprs += 2;
3438 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
3439 shader_info->num_input_vgprs += 2;
3440 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
3441 shader_info->num_input_vgprs += 2;
3442 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
3443 shader_info->num_input_vgprs += 3;
3444 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
3445 shader_info->num_input_vgprs += 2;
3446 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
3447 shader_info->num_input_vgprs += 2;
3448 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
3449 shader_info->num_input_vgprs += 2;
3450 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
3451 shader_info->num_input_vgprs += 1;
3452 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
3453 shader_info->num_input_vgprs += 1;
3454 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
3455 shader_info->num_input_vgprs += 1;
3456 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
3457 shader_info->num_input_vgprs += 1;
3458 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
3459 shader_info->num_input_vgprs += 1;
3460 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr))
3461 shader_info->num_input_vgprs += 1;
3462 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr))
3463 shader_info->num_input_vgprs += 1;
3464 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
3465 shader_info->num_input_vgprs += 1;
3466 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
3467 shader_info->num_input_vgprs += 1;
3468 }
3469 config->num_vgprs = MAX2(config->num_vgprs, shader_info->num_input_vgprs);
3470
3471 /* +3 for scratch wave offset and VCC */
3472 config->num_sgprs = MAX2(config->num_sgprs,
3473 shader_info->num_input_sgprs + 3);
3474
3475 /* Enable 64-bit and 16-bit denormals, because there is no performance
3476 * cost.
3477 *
3478 * If denormals are enabled, all floating-point output modifiers are
3479 * ignored.
3480 *
3481 * Don't enable denormals for 32-bit floats, because:
3482 * - Floating-point output modifiers would be ignored by the hw.
3483 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3484 * have to stop using those.
3485 * - SI & CI would be very slow.
3486 */
3487 config->float_mode |= V_00B028_FP_64_DENORMS;
3488 }
3489
3490 static void
3491 ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_shader *nir, const struct radv_nir_compiler_options *options)
3492 {
3493 switch (nir->info.stage) {
3494 case MESA_SHADER_COMPUTE:
3495 for (int i = 0; i < 3; ++i)
3496 shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
3497 break;
3498 case MESA_SHADER_FRAGMENT:
3499 shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
3500 break;
3501 case MESA_SHADER_GEOMETRY:
3502 shader_info->gs.vertices_in = nir->info.gs.vertices_in;
3503 shader_info->gs.vertices_out = nir->info.gs.vertices_out;
3504 shader_info->gs.output_prim = nir->info.gs.output_primitive;
3505 shader_info->gs.invocations = nir->info.gs.invocations;
3506 break;
3507 case MESA_SHADER_TESS_EVAL:
3508 shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
3509 shader_info->tes.spacing = nir->info.tess.spacing;
3510 shader_info->tes.ccw = nir->info.tess.ccw;
3511 shader_info->tes.point_mode = nir->info.tess.point_mode;
3512 shader_info->tes.as_es = options->key.tes.as_es;
3513 break;
3514 case MESA_SHADER_TESS_CTRL:
3515 shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
3516 break;
3517 case MESA_SHADER_VERTEX:
3518 shader_info->vs.as_es = options->key.vs.as_es;
3519 shader_info->vs.as_ls = options->key.vs.as_ls;
3520 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3521 if (options->key.vs.as_ls)
3522 shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
3523 break;
3524 default:
3525 break;
3526 }
3527 }
3528
3529 void
3530 radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
3531 struct ac_shader_binary *binary,
3532 struct ac_shader_config *config,
3533 struct radv_shader_variant_info *shader_info,
3534 struct nir_shader *const *nir,
3535 int nir_count,
3536 const struct radv_nir_compiler_options *options)
3537 {
3538
3539 LLVMModuleRef llvm_module;
3540
3541 llvm_module = ac_translate_nir_to_llvm(ac_llvm, nir, nir_count, shader_info,
3542 options);
3543
3544 ac_compile_llvm_module(ac_llvm, llvm_module, binary, config, shader_info,
3545 nir[0]->info.stage, options);
3546
3547 for (int i = 0; i < nir_count; ++i)
3548 ac_fill_shader_info(shader_info, nir[i], options);
3549
3550 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3551 if (options->chip_class == GFX9) {
3552 if (nir_count == 2 &&
3553 nir[1]->info.stage == MESA_SHADER_GEOMETRY) {
3554 shader_info->gs.es_type = nir[0]->info.stage;
3555 }
3556 }
3557 }
3558
3559 static void
3560 ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
3561 {
3562 LLVMValueRef vtx_offset =
3563 LLVMBuildMul(ctx->ac.builder, ctx->abi.vertex_id,
3564 LLVMConstInt(ctx->ac.i32, 4, false), "");
3565 int idx = 0;
3566
3567 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3568 int length = 4;
3569 int slot = idx;
3570 int slot_inc = 1;
3571 if (!(ctx->output_mask & (1ull << i)))
3572 continue;
3573
3574 if (i == VARYING_SLOT_CLIP_DIST0) {
3575 /* unpack clip and cull from a single set of slots */
3576 length = ctx->num_output_clips + ctx->num_output_culls;
3577 if (length > 4)
3578 slot_inc = 2;
3579 }
3580
3581 for (unsigned j = 0; j < length; j++) {
3582 LLVMValueRef value, soffset;
3583
3584 soffset = LLVMConstInt(ctx->ac.i32,
3585 (slot * 4 + j) *
3586 ctx->gs_max_out_vertices * 16 * 4, false);
3587
3588 value = ac_build_buffer_load(&ctx->ac, ctx->gsvs_ring,
3589 1, ctx->ac.i32_0,
3590 vtx_offset, soffset,
3591 0, 1, 1, true, false);
3592
3593 LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
3594 if (ac_get_type_size(type) == 2) {
3595 value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
3596 value = LLVMBuildTrunc(ctx->ac.builder, value, ctx->ac.i16, "");
3597 }
3598
3599 LLVMBuildStore(ctx->ac.builder,
3600 ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
3601 }
3602 idx += slot_inc;
3603 }
3604 handle_vs_outputs_post(ctx, false, false, &ctx->shader_info->vs.outinfo);
3605 }
3606
3607 void
3608 radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
3609 struct nir_shader *geom_shader,
3610 struct ac_shader_binary *binary,
3611 struct ac_shader_config *config,
3612 struct radv_shader_variant_info *shader_info,
3613 const struct radv_nir_compiler_options *options)
3614 {
3615 struct radv_shader_context ctx = {0};
3616 ctx.options = options;
3617 ctx.shader_info = shader_info;
3618
3619 ac_llvm_context_init(&ctx.ac, options->chip_class, options->family);
3620 ctx.context = ctx.ac.context;
3621 ctx.ac.module = ac_create_module(ac_llvm->tm, ctx.context);
3622
3623 ctx.is_gs_copy_shader = true;
3624
3625 enum ac_float_mode float_mode =
3626 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3627 AC_FLOAT_MODE_DEFAULT;
3628
3629 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3630 ctx.stage = MESA_SHADER_VERTEX;
3631
3632 radv_nir_shader_info_pass(geom_shader, options, &shader_info->info);
3633
3634 create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
3635
3636 ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
3637 ac_setup_rings(&ctx);
3638
3639 ctx.num_output_clips = geom_shader->info.clip_distance_array_size;
3640 ctx.num_output_culls = geom_shader->info.cull_distance_array_size;
3641
3642 nir_foreach_variable(variable, &geom_shader->outputs) {
3643 scan_shader_output_decl(&ctx, variable, geom_shader, MESA_SHADER_VERTEX);
3644 ac_handle_shader_output_decl(&ctx.ac, &ctx.abi, geom_shader,
3645 variable, MESA_SHADER_VERTEX);
3646 }
3647
3648 ac_gs_copy_shader_emit(&ctx);
3649
3650 LLVMBuildRetVoid(ctx.ac.builder);
3651
3652 ac_llvm_finalize_module(&ctx, ac_llvm->passmgr, options);
3653
3654 ac_compile_llvm_module(ac_llvm, ctx.ac.module, binary, config, shader_info,
3655 MESA_SHADER_VERTEX, options);
3656 }