radv: get rid of geometry user sgpr for num entries.
[mesa.git] / src / amd / vulkan / radv_nir_to_llvm.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "nir/nir.h"
31
32 #include <llvm-c/Core.h>
33 #include <llvm-c/TargetMachine.h>
34 #include <llvm-c/Transforms/Scalar.h>
35
36 #include "sid.h"
37 #include "gfx9d.h"
38 #include "ac_binary.h"
39 #include "ac_llvm_util.h"
40 #include "ac_llvm_build.h"
41 #include "ac_shader_abi.h"
42 #include "ac_shader_util.h"
43 #include "ac_exp_param.h"
44
45 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
46
47 struct radv_shader_context {
48 struct ac_llvm_context ac;
49 const struct radv_nir_compiler_options *options;
50 struct radv_shader_variant_info *shader_info;
51 struct ac_shader_abi abi;
52
53 unsigned max_workgroup_size;
54 LLVMContextRef context;
55 LLVMValueRef main_function;
56
57 LLVMValueRef descriptor_sets[RADV_UD_MAX_SETS];
58 LLVMValueRef ring_offsets;
59
60 LLVMValueRef vertex_buffers;
61 LLVMValueRef rel_auto_id;
62 LLVMValueRef vs_prim_id;
63 LLVMValueRef es2gs_offset;
64
65 LLVMValueRef oc_lds;
66 LLVMValueRef merged_wave_info;
67 LLVMValueRef tess_factor_offset;
68 LLVMValueRef tes_rel_patch_id;
69 LLVMValueRef tes_u;
70 LLVMValueRef tes_v;
71
72 LLVMValueRef gsvs_ring_stride;
73 LLVMValueRef gs2vs_offset;
74 LLVMValueRef gs_wave_id;
75 LLVMValueRef gs_vtx_offset[6];
76
77 LLVMValueRef esgs_ring;
78 LLVMValueRef gsvs_ring;
79 LLVMValueRef hs_ring_tess_offchip;
80 LLVMValueRef hs_ring_tess_factor;
81
82 LLVMValueRef sample_pos_offset;
83 LLVMValueRef persp_sample, persp_center, persp_centroid;
84 LLVMValueRef linear_sample, linear_center, linear_centroid;
85
86 gl_shader_stage stage;
87
88 LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
89
90 uint64_t input_mask;
91 uint64_t output_mask;
92 uint8_t num_output_clips;
93 uint8_t num_output_culls;
94
95 bool is_gs_copy_shader;
96 LLVMValueRef gs_next_vertex;
97 unsigned gs_max_out_vertices;
98
99 unsigned tes_primitive_mode;
100
101 uint32_t tcs_patch_outputs_read;
102 uint64_t tcs_outputs_read;
103 uint32_t tcs_vertices_per_patch;
104 uint32_t tcs_num_inputs;
105 uint32_t tcs_num_patches;
106 };
107
108 enum radeon_llvm_calling_convention {
109 RADEON_LLVM_AMDGPU_VS = 87,
110 RADEON_LLVM_AMDGPU_GS = 88,
111 RADEON_LLVM_AMDGPU_PS = 89,
112 RADEON_LLVM_AMDGPU_CS = 90,
113 RADEON_LLVM_AMDGPU_HS = 93,
114 };
115
116 static inline struct radv_shader_context *
117 radv_shader_context_from_abi(struct ac_shader_abi *abi)
118 {
119 struct radv_shader_context *ctx = NULL;
120 return container_of(abi, ctx, abi);
121 }
122
123 static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
124 {
125 switch (ctx->stage) {
126 case MESA_SHADER_TESS_CTRL:
127 return ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
128 case MESA_SHADER_TESS_EVAL:
129 return ctx->tes_rel_patch_id;
130 break;
131 default:
132 unreachable("Illegal stage");
133 }
134 }
135
136 static unsigned
137 get_tcs_num_patches(struct radv_shader_context *ctx)
138 {
139 unsigned num_tcs_input_cp = ctx->options->key.tcs.input_vertices;
140 unsigned num_tcs_output_cp = ctx->tcs_vertices_per_patch;
141 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
142 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
143 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
144 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
145 uint32_t output_vertex_size = num_tcs_outputs * 16;
146 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
147 uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
148 unsigned num_patches;
149 unsigned hardware_lds_size;
150
151 /* Ensure that we only need one wave per SIMD so we don't need to check
152 * resource usage. Also ensures that the number of tcs in and out
153 * vertices per threadgroup are at most 256.
154 */
155 num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
156 /* Make sure that the data fits in LDS. This assumes the shaders only
157 * use LDS for the inputs and outputs.
158 */
159 hardware_lds_size = ctx->options->chip_class >= CIK ? 65536 : 32768;
160 num_patches = MIN2(num_patches, hardware_lds_size / (input_patch_size + output_patch_size));
161 /* Make sure the output data fits in the offchip buffer */
162 num_patches = MIN2(num_patches, (ctx->options->tess_offchip_block_dw_size * 4) / output_patch_size);
163 /* Not necessary for correctness, but improves performance. The
164 * specific value is taken from the proprietary driver.
165 */
166 num_patches = MIN2(num_patches, 40);
167
168 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
169 if (ctx->options->chip_class == SI) {
170 unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
171 num_patches = MIN2(num_patches, one_wave);
172 }
173 return num_patches;
174 }
175
176 static unsigned
177 calculate_tess_lds_size(struct radv_shader_context *ctx)
178 {
179 unsigned num_tcs_input_cp = ctx->options->key.tcs.input_vertices;
180 unsigned num_tcs_output_cp;
181 unsigned num_tcs_outputs, num_tcs_patch_outputs;
182 unsigned input_vertex_size, output_vertex_size;
183 unsigned input_patch_size, output_patch_size;
184 unsigned pervertex_output_patch_size;
185 unsigned output_patch0_offset;
186 unsigned num_patches;
187 unsigned lds_size;
188
189 num_tcs_output_cp = ctx->tcs_vertices_per_patch;
190 num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
191 num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
192
193 input_vertex_size = ctx->tcs_num_inputs * 16;
194 output_vertex_size = num_tcs_outputs * 16;
195
196 input_patch_size = num_tcs_input_cp * input_vertex_size;
197
198 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
199 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
200
201 num_patches = ctx->tcs_num_patches;
202 output_patch0_offset = input_patch_size * num_patches;
203
204 lds_size = output_patch0_offset + output_patch_size * num_patches;
205 return lds_size;
206 }
207
208 /* Tessellation shaders pass outputs to the next shader using LDS.
209 *
210 * LS outputs = TCS inputs
211 * TCS outputs = TES inputs
212 *
213 * The LDS layout is:
214 * - TCS inputs for patch 0
215 * - TCS inputs for patch 1
216 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
217 * - ...
218 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
219 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
220 * - TCS outputs for patch 1
221 * - Per-patch TCS outputs for patch 1
222 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
223 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
224 * - ...
225 *
226 * All three shaders VS(LS), TCS, TES share the same LDS space.
227 */
228 static LLVMValueRef
229 get_tcs_in_patch_stride(struct radv_shader_context *ctx)
230 {
231 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
232 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
233 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
234
235 input_patch_size /= 4;
236 return LLVMConstInt(ctx->ac.i32, input_patch_size, false);
237 }
238
239 static LLVMValueRef
240 get_tcs_out_patch_stride(struct radv_shader_context *ctx)
241 {
242 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
243 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
244 uint32_t output_vertex_size = num_tcs_outputs * 16;
245 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
246 uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
247 output_patch_size /= 4;
248 return LLVMConstInt(ctx->ac.i32, output_patch_size, false);
249 }
250
251 static LLVMValueRef
252 get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
253 {
254 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
255 uint32_t output_vertex_size = num_tcs_outputs * 16;
256 output_vertex_size /= 4;
257 return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
258 }
259
260 static LLVMValueRef
261 get_tcs_out_patch0_offset(struct radv_shader_context *ctx)
262 {
263 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
264 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
265 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
266 uint32_t output_patch0_offset = input_patch_size;
267 unsigned num_patches = ctx->tcs_num_patches;
268
269 output_patch0_offset *= num_patches;
270 output_patch0_offset /= 4;
271 return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
272 }
273
274 static LLVMValueRef
275 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
276 {
277 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
278 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
279 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
280 uint32_t output_patch0_offset = input_patch_size;
281
282 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
283 uint32_t output_vertex_size = num_tcs_outputs * 16;
284 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
285 unsigned num_patches = ctx->tcs_num_patches;
286
287 output_patch0_offset *= num_patches;
288 output_patch0_offset += pervertex_output_patch_size;
289 output_patch0_offset /= 4;
290 return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
291 }
292
293 static LLVMValueRef
294 get_tcs_in_current_patch_offset(struct radv_shader_context *ctx)
295 {
296 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
297 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
298
299 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
300 }
301
302 static LLVMValueRef
303 get_tcs_out_current_patch_offset(struct radv_shader_context *ctx)
304 {
305 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
306 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
307 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
308
309 return LLVMBuildAdd(ctx->ac.builder, patch0_offset,
310 LLVMBuildMul(ctx->ac.builder, patch_stride,
311 rel_patch_id, ""),
312 "");
313 }
314
315 static LLVMValueRef
316 get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
317 {
318 LLVMValueRef patch0_patch_data_offset =
319 get_tcs_out_patch0_patch_data_offset(ctx);
320 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
321 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
322
323 return LLVMBuildAdd(ctx->ac.builder, patch0_patch_data_offset,
324 LLVMBuildMul(ctx->ac.builder, patch_stride,
325 rel_patch_id, ""),
326 "");
327 }
328
329 #define MAX_ARGS 23
330 struct arg_info {
331 LLVMTypeRef types[MAX_ARGS];
332 LLVMValueRef *assign[MAX_ARGS];
333 unsigned array_params_mask;
334 uint8_t count;
335 uint8_t sgpr_count;
336 uint8_t num_sgprs_used;
337 uint8_t num_vgprs_used;
338 };
339
340 enum ac_arg_regfile {
341 ARG_SGPR,
342 ARG_VGPR,
343 };
344
345 static void
346 add_arg(struct arg_info *info, enum ac_arg_regfile regfile, LLVMTypeRef type,
347 LLVMValueRef *param_ptr)
348 {
349 assert(info->count < MAX_ARGS);
350
351 info->assign[info->count] = param_ptr;
352 info->types[info->count] = type;
353 info->count++;
354
355 if (regfile == ARG_SGPR) {
356 info->num_sgprs_used += ac_get_type_size(type) / 4;
357 info->sgpr_count++;
358 } else {
359 assert(regfile == ARG_VGPR);
360 info->num_vgprs_used += ac_get_type_size(type) / 4;
361 }
362 }
363
364 static inline void
365 add_array_arg(struct arg_info *info, LLVMTypeRef type, LLVMValueRef *param_ptr)
366 {
367 info->array_params_mask |= (1 << info->count);
368 add_arg(info, ARG_SGPR, type, param_ptr);
369 }
370
371 static void assign_arguments(LLVMValueRef main_function,
372 struct arg_info *info)
373 {
374 unsigned i;
375 for (i = 0; i < info->count; i++) {
376 if (info->assign[i])
377 *info->assign[i] = LLVMGetParam(main_function, i);
378 }
379 }
380
381 static LLVMValueRef
382 create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
383 LLVMBuilderRef builder, LLVMTypeRef *return_types,
384 unsigned num_return_elems,
385 struct arg_info *args,
386 unsigned max_workgroup_size,
387 bool unsafe_math)
388 {
389 LLVMTypeRef main_function_type, ret_type;
390 LLVMBasicBlockRef main_function_body;
391
392 if (num_return_elems)
393 ret_type = LLVMStructTypeInContext(ctx, return_types,
394 num_return_elems, true);
395 else
396 ret_type = LLVMVoidTypeInContext(ctx);
397
398 /* Setup the function */
399 main_function_type =
400 LLVMFunctionType(ret_type, args->types, args->count, 0);
401 LLVMValueRef main_function =
402 LLVMAddFunction(module, "main", main_function_type);
403 main_function_body =
404 LLVMAppendBasicBlockInContext(ctx, main_function, "main_body");
405 LLVMPositionBuilderAtEnd(builder, main_function_body);
406
407 LLVMSetFunctionCallConv(main_function, RADEON_LLVM_AMDGPU_CS);
408 for (unsigned i = 0; i < args->sgpr_count; ++i) {
409 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_INREG);
410
411 if (args->array_params_mask & (1 << i)) {
412 LLVMValueRef P = LLVMGetParam(main_function, i);
413 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_NOALIAS);
414 ac_add_attr_dereferenceable(P, UINT64_MAX);
415 }
416 }
417
418 if (max_workgroup_size) {
419 ac_llvm_add_target_dep_function_attr(main_function,
420 "amdgpu-max-work-group-size",
421 max_workgroup_size);
422 }
423 if (unsafe_math) {
424 /* These were copied from some LLVM test. */
425 LLVMAddTargetDependentFunctionAttr(main_function,
426 "less-precise-fpmad",
427 "true");
428 LLVMAddTargetDependentFunctionAttr(main_function,
429 "no-infs-fp-math",
430 "true");
431 LLVMAddTargetDependentFunctionAttr(main_function,
432 "no-nans-fp-math",
433 "true");
434 LLVMAddTargetDependentFunctionAttr(main_function,
435 "unsafe-fp-math",
436 "true");
437 LLVMAddTargetDependentFunctionAttr(main_function,
438 "no-signed-zeros-fp-math",
439 "true");
440 }
441 return main_function;
442 }
443
444
445 static void
446 set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx, uint8_t num_sgprs,
447 uint32_t indirect_offset)
448 {
449 ud_info->sgpr_idx = *sgpr_idx;
450 ud_info->num_sgprs = num_sgprs;
451 ud_info->indirect = indirect_offset > 0;
452 ud_info->indirect_offset = indirect_offset;
453 *sgpr_idx += num_sgprs;
454 }
455
456 static void
457 set_loc_shader(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
458 uint8_t num_sgprs)
459 {
460 struct radv_userdata_info *ud_info =
461 &ctx->shader_info->user_sgprs_locs.shader_data[idx];
462 assert(ud_info);
463
464 set_loc(ud_info, sgpr_idx, num_sgprs, 0);
465 }
466
467 static void
468 set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
469 uint32_t indirect_offset)
470 {
471 struct radv_userdata_info *ud_info =
472 &ctx->shader_info->user_sgprs_locs.descriptor_sets[idx];
473 assert(ud_info);
474
475 set_loc(ud_info, sgpr_idx, 2, indirect_offset);
476 }
477
478 struct user_sgpr_info {
479 bool need_ring_offsets;
480 uint8_t sgpr_count;
481 bool indirect_all_descriptor_sets;
482 };
483
484 static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
485 gl_shader_stage stage)
486 {
487 switch (stage) {
488 case MESA_SHADER_VERTEX:
489 if (ctx->shader_info->info.needs_multiview_view_index ||
490 (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
491 return true;
492 break;
493 case MESA_SHADER_TESS_EVAL:
494 if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
495 return true;
496 break;
497 case MESA_SHADER_GEOMETRY:
498 case MESA_SHADER_TESS_CTRL:
499 if (ctx->shader_info->info.needs_multiview_view_index)
500 return true;
501 break;
502 default:
503 break;
504 }
505 return false;
506 }
507
508 static uint8_t
509 count_vs_user_sgprs(struct radv_shader_context *ctx)
510 {
511 uint8_t count = 0;
512
513 count += ctx->shader_info->info.vs.has_vertex_buffers ? 2 : 0;
514 count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
515
516 return count;
517 }
518
519 static void allocate_user_sgprs(struct radv_shader_context *ctx,
520 gl_shader_stage stage,
521 bool has_previous_stage,
522 gl_shader_stage previous_stage,
523 bool needs_view_index,
524 struct user_sgpr_info *user_sgpr_info)
525 {
526 memset(user_sgpr_info, 0, sizeof(struct user_sgpr_info));
527
528 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
529 if (stage == MESA_SHADER_GEOMETRY ||
530 stage == MESA_SHADER_VERTEX ||
531 stage == MESA_SHADER_TESS_CTRL ||
532 stage == MESA_SHADER_TESS_EVAL ||
533 ctx->is_gs_copy_shader)
534 user_sgpr_info->need_ring_offsets = true;
535
536 if (stage == MESA_SHADER_FRAGMENT &&
537 ctx->shader_info->info.ps.needs_sample_positions)
538 user_sgpr_info->need_ring_offsets = true;
539
540 /* 2 user sgprs will nearly always be allocated for scratch/rings */
541 if (ctx->options->supports_spill || user_sgpr_info->need_ring_offsets) {
542 user_sgpr_info->sgpr_count += 2;
543 }
544
545 switch (stage) {
546 case MESA_SHADER_COMPUTE:
547 if (ctx->shader_info->info.cs.uses_grid_size)
548 user_sgpr_info->sgpr_count += 3;
549 break;
550 case MESA_SHADER_FRAGMENT:
551 user_sgpr_info->sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
552 break;
553 case MESA_SHADER_VERTEX:
554 if (!ctx->is_gs_copy_shader)
555 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
556 break;
557 case MESA_SHADER_TESS_CTRL:
558 if (has_previous_stage) {
559 if (previous_stage == MESA_SHADER_VERTEX)
560 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
561 }
562 break;
563 case MESA_SHADER_TESS_EVAL:
564 break;
565 case MESA_SHADER_GEOMETRY:
566 if (has_previous_stage) {
567 if (previous_stage == MESA_SHADER_VERTEX) {
568 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
569 }
570 }
571 user_sgpr_info->sgpr_count += 1;
572 break;
573 default:
574 break;
575 }
576
577 if (needs_view_index)
578 user_sgpr_info->sgpr_count++;
579
580 if (ctx->shader_info->info.loads_push_constants)
581 user_sgpr_info->sgpr_count += 2;
582
583 uint32_t available_sgprs = ctx->options->chip_class >= GFX9 ? 32 : 16;
584 uint32_t remaining_sgprs = available_sgprs - user_sgpr_info->sgpr_count;
585
586 if (remaining_sgprs / 2 < util_bitcount(ctx->shader_info->info.desc_set_used_mask)) {
587 user_sgpr_info->sgpr_count += 2;
588 user_sgpr_info->indirect_all_descriptor_sets = true;
589 } else {
590 user_sgpr_info->sgpr_count += util_bitcount(ctx->shader_info->info.desc_set_used_mask) * 2;
591 }
592 }
593
594 static void
595 declare_global_input_sgprs(struct radv_shader_context *ctx,
596 gl_shader_stage stage,
597 bool has_previous_stage,
598 gl_shader_stage previous_stage,
599 const struct user_sgpr_info *user_sgpr_info,
600 struct arg_info *args,
601 LLVMValueRef *desc_sets)
602 {
603 LLVMTypeRef type = ac_array_in_const_addr_space(ctx->ac.i8);
604 unsigned num_sets = ctx->options->layout ?
605 ctx->options->layout->num_sets : 0;
606 unsigned stage_mask = 1 << stage;
607
608 if (has_previous_stage)
609 stage_mask |= 1 << previous_stage;
610
611 /* 1 for each descriptor set */
612 if (!user_sgpr_info->indirect_all_descriptor_sets) {
613 for (unsigned i = 0; i < num_sets; ++i) {
614 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
615 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
616 add_array_arg(args, type,
617 &ctx->descriptor_sets[i]);
618 }
619 }
620 } else {
621 add_array_arg(args, ac_array_in_const_addr_space(type), desc_sets);
622 }
623
624 if (ctx->shader_info->info.loads_push_constants) {
625 /* 1 for push constants and dynamic descriptors */
626 add_array_arg(args, type, &ctx->abi.push_constants);
627 }
628 }
629
630 static void
631 declare_vs_specific_input_sgprs(struct radv_shader_context *ctx,
632 gl_shader_stage stage,
633 bool has_previous_stage,
634 gl_shader_stage previous_stage,
635 struct arg_info *args)
636 {
637 if (!ctx->is_gs_copy_shader &&
638 (stage == MESA_SHADER_VERTEX ||
639 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
640 if (ctx->shader_info->info.vs.has_vertex_buffers) {
641 add_arg(args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
642 &ctx->vertex_buffers);
643 }
644 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.base_vertex);
645 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.start_instance);
646 if (ctx->shader_info->info.vs.needs_draw_id) {
647 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.draw_id);
648 }
649 }
650 }
651
652 static void
653 declare_vs_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
654 {
655 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.vertex_id);
656 if (!ctx->is_gs_copy_shader) {
657 if (ctx->options->key.vs.as_ls) {
658 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->rel_auto_id);
659 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
660 } else {
661 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
662 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->vs_prim_id);
663 }
664 add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
665 }
666 }
667
668 static void
669 declare_tes_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
670 {
671 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_u);
672 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_v);
673 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->tes_rel_patch_id);
674 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.tes_patch_id);
675 }
676
677 static void
678 set_global_input_locs(struct radv_shader_context *ctx, gl_shader_stage stage,
679 bool has_previous_stage, gl_shader_stage previous_stage,
680 const struct user_sgpr_info *user_sgpr_info,
681 LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
682 {
683 unsigned num_sets = ctx->options->layout ?
684 ctx->options->layout->num_sets : 0;
685 unsigned stage_mask = 1 << stage;
686
687 if (has_previous_stage)
688 stage_mask |= 1 << previous_stage;
689
690 if (!user_sgpr_info->indirect_all_descriptor_sets) {
691 for (unsigned i = 0; i < num_sets; ++i) {
692 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
693 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
694 set_loc_desc(ctx, i, user_sgpr_idx, 0);
695 } else
696 ctx->descriptor_sets[i] = NULL;
697 }
698 } else {
699 set_loc_shader(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS,
700 user_sgpr_idx, 2);
701
702 for (unsigned i = 0; i < num_sets; ++i) {
703 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
704 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
705 set_loc_desc(ctx, i, user_sgpr_idx, i * 8);
706 ctx->descriptor_sets[i] =
707 ac_build_load_to_sgpr(&ctx->ac,
708 desc_sets,
709 LLVMConstInt(ctx->ac.i32, i, false));
710
711 } else
712 ctx->descriptor_sets[i] = NULL;
713 }
714 ctx->shader_info->need_indirect_descriptor_sets = true;
715 }
716
717 if (ctx->shader_info->info.loads_push_constants) {
718 set_loc_shader(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx, 2);
719 }
720 }
721
722 static void
723 set_vs_specific_input_locs(struct radv_shader_context *ctx,
724 gl_shader_stage stage, bool has_previous_stage,
725 gl_shader_stage previous_stage,
726 uint8_t *user_sgpr_idx)
727 {
728 if (!ctx->is_gs_copy_shader &&
729 (stage == MESA_SHADER_VERTEX ||
730 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
731 if (ctx->shader_info->info.vs.has_vertex_buffers) {
732 set_loc_shader(ctx, AC_UD_VS_VERTEX_BUFFERS,
733 user_sgpr_idx, 2);
734 }
735
736 unsigned vs_num = 2;
737 if (ctx->shader_info->info.vs.needs_draw_id)
738 vs_num++;
739
740 set_loc_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE,
741 user_sgpr_idx, vs_num);
742 }
743 }
744
745 static void set_llvm_calling_convention(LLVMValueRef func,
746 gl_shader_stage stage)
747 {
748 enum radeon_llvm_calling_convention calling_conv;
749
750 switch (stage) {
751 case MESA_SHADER_VERTEX:
752 case MESA_SHADER_TESS_EVAL:
753 calling_conv = RADEON_LLVM_AMDGPU_VS;
754 break;
755 case MESA_SHADER_GEOMETRY:
756 calling_conv = RADEON_LLVM_AMDGPU_GS;
757 break;
758 case MESA_SHADER_TESS_CTRL:
759 calling_conv = HAVE_LLVM >= 0x0500 ? RADEON_LLVM_AMDGPU_HS : RADEON_LLVM_AMDGPU_VS;
760 break;
761 case MESA_SHADER_FRAGMENT:
762 calling_conv = RADEON_LLVM_AMDGPU_PS;
763 break;
764 case MESA_SHADER_COMPUTE:
765 calling_conv = RADEON_LLVM_AMDGPU_CS;
766 break;
767 default:
768 unreachable("Unhandle shader type");
769 }
770
771 LLVMSetFunctionCallConv(func, calling_conv);
772 }
773
774 static void create_function(struct radv_shader_context *ctx,
775 gl_shader_stage stage,
776 bool has_previous_stage,
777 gl_shader_stage previous_stage)
778 {
779 uint8_t user_sgpr_idx;
780 struct user_sgpr_info user_sgpr_info;
781 struct arg_info args = {};
782 LLVMValueRef desc_sets;
783 bool needs_view_index = needs_view_index_sgpr(ctx, stage);
784 allocate_user_sgprs(ctx, stage, has_previous_stage,
785 previous_stage, needs_view_index, &user_sgpr_info);
786
787 if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
788 add_arg(&args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
789 &ctx->ring_offsets);
790 }
791
792 switch (stage) {
793 case MESA_SHADER_COMPUTE:
794 declare_global_input_sgprs(ctx, stage, has_previous_stage,
795 previous_stage, &user_sgpr_info,
796 &args, &desc_sets);
797
798 if (ctx->shader_info->info.cs.uses_grid_size) {
799 add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
800 &ctx->abi.num_work_groups);
801 }
802
803 for (int i = 0; i < 3; i++) {
804 ctx->abi.workgroup_ids[i] = NULL;
805 if (ctx->shader_info->info.cs.uses_block_id[i]) {
806 add_arg(&args, ARG_SGPR, ctx->ac.i32,
807 &ctx->abi.workgroup_ids[i]);
808 }
809 }
810
811 if (ctx->shader_info->info.cs.uses_local_invocation_idx)
812 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.tg_size);
813 add_arg(&args, ARG_VGPR, ctx->ac.v3i32,
814 &ctx->abi.local_invocation_ids);
815 break;
816 case MESA_SHADER_VERTEX:
817 declare_global_input_sgprs(ctx, stage, has_previous_stage,
818 previous_stage, &user_sgpr_info,
819 &args, &desc_sets);
820 declare_vs_specific_input_sgprs(ctx, stage, has_previous_stage,
821 previous_stage, &args);
822
823 if (needs_view_index)
824 add_arg(&args, ARG_SGPR, ctx->ac.i32,
825 &ctx->abi.view_index);
826 if (ctx->options->key.vs.as_es)
827 add_arg(&args, ARG_SGPR, ctx->ac.i32,
828 &ctx->es2gs_offset);
829
830 declare_vs_input_vgprs(ctx, &args);
831 break;
832 case MESA_SHADER_TESS_CTRL:
833 if (has_previous_stage) {
834 // First 6 system regs
835 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
836 add_arg(&args, ARG_SGPR, ctx->ac.i32,
837 &ctx->merged_wave_info);
838 add_arg(&args, ARG_SGPR, ctx->ac.i32,
839 &ctx->tess_factor_offset);
840
841 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
842 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
843 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
844
845 declare_global_input_sgprs(ctx, stage,
846 has_previous_stage,
847 previous_stage,
848 &user_sgpr_info, &args,
849 &desc_sets);
850 declare_vs_specific_input_sgprs(ctx, stage,
851 has_previous_stage,
852 previous_stage, &args);
853
854 if (needs_view_index)
855 add_arg(&args, ARG_SGPR, ctx->ac.i32,
856 &ctx->abi.view_index);
857
858 add_arg(&args, ARG_VGPR, ctx->ac.i32,
859 &ctx->abi.tcs_patch_id);
860 add_arg(&args, ARG_VGPR, ctx->ac.i32,
861 &ctx->abi.tcs_rel_ids);
862
863 declare_vs_input_vgprs(ctx, &args);
864 } else {
865 declare_global_input_sgprs(ctx, stage,
866 has_previous_stage,
867 previous_stage,
868 &user_sgpr_info, &args,
869 &desc_sets);
870
871 if (needs_view_index)
872 add_arg(&args, ARG_SGPR, ctx->ac.i32,
873 &ctx->abi.view_index);
874
875 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
876 add_arg(&args, ARG_SGPR, ctx->ac.i32,
877 &ctx->tess_factor_offset);
878 add_arg(&args, ARG_VGPR, ctx->ac.i32,
879 &ctx->abi.tcs_patch_id);
880 add_arg(&args, ARG_VGPR, ctx->ac.i32,
881 &ctx->abi.tcs_rel_ids);
882 }
883 break;
884 case MESA_SHADER_TESS_EVAL:
885 declare_global_input_sgprs(ctx, stage, has_previous_stage,
886 previous_stage, &user_sgpr_info,
887 &args, &desc_sets);
888
889 if (needs_view_index)
890 add_arg(&args, ARG_SGPR, ctx->ac.i32,
891 &ctx->abi.view_index);
892
893 if (ctx->options->key.tes.as_es) {
894 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
895 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
896 add_arg(&args, ARG_SGPR, ctx->ac.i32,
897 &ctx->es2gs_offset);
898 } else {
899 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
900 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
901 }
902 declare_tes_input_vgprs(ctx, &args);
903 break;
904 case MESA_SHADER_GEOMETRY:
905 if (has_previous_stage) {
906 // First 6 system regs
907 add_arg(&args, ARG_SGPR, ctx->ac.i32,
908 &ctx->gs2vs_offset);
909 add_arg(&args, ARG_SGPR, ctx->ac.i32,
910 &ctx->merged_wave_info);
911 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
912
913 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
914 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
915 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
916
917 declare_global_input_sgprs(ctx, stage,
918 has_previous_stage,
919 previous_stage,
920 &user_sgpr_info, &args,
921 &desc_sets);
922
923 if (previous_stage != MESA_SHADER_TESS_EVAL) {
924 declare_vs_specific_input_sgprs(ctx, stage,
925 has_previous_stage,
926 previous_stage,
927 &args);
928 }
929
930 add_arg(&args, ARG_SGPR, ctx->ac.i32,
931 &ctx->gsvs_ring_stride);
932 if (needs_view_index)
933 add_arg(&args, ARG_SGPR, ctx->ac.i32,
934 &ctx->abi.view_index);
935
936 add_arg(&args, ARG_VGPR, ctx->ac.i32,
937 &ctx->gs_vtx_offset[0]);
938 add_arg(&args, ARG_VGPR, ctx->ac.i32,
939 &ctx->gs_vtx_offset[2]);
940 add_arg(&args, ARG_VGPR, ctx->ac.i32,
941 &ctx->abi.gs_prim_id);
942 add_arg(&args, ARG_VGPR, ctx->ac.i32,
943 &ctx->abi.gs_invocation_id);
944 add_arg(&args, ARG_VGPR, ctx->ac.i32,
945 &ctx->gs_vtx_offset[4]);
946
947 if (previous_stage == MESA_SHADER_VERTEX) {
948 declare_vs_input_vgprs(ctx, &args);
949 } else {
950 declare_tes_input_vgprs(ctx, &args);
951 }
952 } else {
953 declare_global_input_sgprs(ctx, stage,
954 has_previous_stage,
955 previous_stage,
956 &user_sgpr_info, &args,
957 &desc_sets);
958
959 add_arg(&args, ARG_SGPR, ctx->ac.i32,
960 &ctx->gsvs_ring_stride);
961 if (needs_view_index)
962 add_arg(&args, ARG_SGPR, ctx->ac.i32,
963 &ctx->abi.view_index);
964
965 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs2vs_offset);
966 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs_wave_id);
967 add_arg(&args, ARG_VGPR, ctx->ac.i32,
968 &ctx->gs_vtx_offset[0]);
969 add_arg(&args, ARG_VGPR, ctx->ac.i32,
970 &ctx->gs_vtx_offset[1]);
971 add_arg(&args, ARG_VGPR, ctx->ac.i32,
972 &ctx->abi.gs_prim_id);
973 add_arg(&args, ARG_VGPR, ctx->ac.i32,
974 &ctx->gs_vtx_offset[2]);
975 add_arg(&args, ARG_VGPR, ctx->ac.i32,
976 &ctx->gs_vtx_offset[3]);
977 add_arg(&args, ARG_VGPR, ctx->ac.i32,
978 &ctx->gs_vtx_offset[4]);
979 add_arg(&args, ARG_VGPR, ctx->ac.i32,
980 &ctx->gs_vtx_offset[5]);
981 add_arg(&args, ARG_VGPR, ctx->ac.i32,
982 &ctx->abi.gs_invocation_id);
983 }
984 break;
985 case MESA_SHADER_FRAGMENT:
986 declare_global_input_sgprs(ctx, stage, has_previous_stage,
987 previous_stage, &user_sgpr_info,
988 &args, &desc_sets);
989
990 if (ctx->shader_info->info.ps.needs_sample_positions)
991 add_arg(&args, ARG_SGPR, ctx->ac.i32,
992 &ctx->sample_pos_offset);
993
994 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.prim_mask);
995 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_sample);
996 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_center);
997 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_centroid);
998 add_arg(&args, ARG_VGPR, ctx->ac.v3i32, NULL); /* persp pull model */
999 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_sample);
1000 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_center);
1001 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_centroid);
1002 add_arg(&args, ARG_VGPR, ctx->ac.f32, NULL); /* line stipple tex */
1003 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[0]);
1004 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[1]);
1005 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[2]);
1006 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[3]);
1007 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.front_face);
1008 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.ancillary);
1009 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.sample_coverage);
1010 add_arg(&args, ARG_VGPR, ctx->ac.i32, NULL); /* fixed pt */
1011 break;
1012 default:
1013 unreachable("Shader stage not implemented");
1014 }
1015
1016 ctx->main_function = create_llvm_function(
1017 ctx->context, ctx->ac.module, ctx->ac.builder, NULL, 0, &args,
1018 ctx->max_workgroup_size,
1019 ctx->options->unsafe_math);
1020 set_llvm_calling_convention(ctx->main_function, stage);
1021
1022
1023 ctx->shader_info->num_input_vgprs = 0;
1024 ctx->shader_info->num_input_sgprs = ctx->options->supports_spill ? 2 : 0;
1025
1026 ctx->shader_info->num_input_sgprs += args.num_sgprs_used;
1027
1028 if (ctx->stage != MESA_SHADER_FRAGMENT)
1029 ctx->shader_info->num_input_vgprs = args.num_vgprs_used;
1030
1031 assign_arguments(ctx->main_function, &args);
1032
1033 user_sgpr_idx = 0;
1034
1035 if (ctx->options->supports_spill || user_sgpr_info.need_ring_offsets) {
1036 set_loc_shader(ctx, AC_UD_SCRATCH_RING_OFFSETS,
1037 &user_sgpr_idx, 2);
1038 if (ctx->options->supports_spill) {
1039 ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
1040 LLVMPointerType(ctx->ac.i8, AC_CONST_ADDR_SPACE),
1041 NULL, 0, AC_FUNC_ATTR_READNONE);
1042 ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
1043 ac_array_in_const_addr_space(ctx->ac.v4i32), "");
1044 }
1045 }
1046
1047 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1048 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1049 if (has_previous_stage)
1050 user_sgpr_idx = 0;
1051
1052 set_global_input_locs(ctx, stage, has_previous_stage, previous_stage,
1053 &user_sgpr_info, desc_sets, &user_sgpr_idx);
1054
1055 switch (stage) {
1056 case MESA_SHADER_COMPUTE:
1057 if (ctx->shader_info->info.cs.uses_grid_size) {
1058 set_loc_shader(ctx, AC_UD_CS_GRID_SIZE,
1059 &user_sgpr_idx, 3);
1060 }
1061 break;
1062 case MESA_SHADER_VERTEX:
1063 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1064 previous_stage, &user_sgpr_idx);
1065 if (ctx->abi.view_index)
1066 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1067 break;
1068 case MESA_SHADER_TESS_CTRL:
1069 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1070 previous_stage, &user_sgpr_idx);
1071 if (ctx->abi.view_index)
1072 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1073 break;
1074 case MESA_SHADER_TESS_EVAL:
1075 if (ctx->abi.view_index)
1076 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1077 break;
1078 case MESA_SHADER_GEOMETRY:
1079 if (has_previous_stage) {
1080 if (previous_stage == MESA_SHADER_VERTEX)
1081 set_vs_specific_input_locs(ctx, stage,
1082 has_previous_stage,
1083 previous_stage,
1084 &user_sgpr_idx);
1085 }
1086 set_loc_shader(ctx, AC_UD_GS_VS_RING_STRIDE_ENTRIES,
1087 &user_sgpr_idx, 1);
1088 if (ctx->abi.view_index)
1089 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1090 break;
1091 case MESA_SHADER_FRAGMENT:
1092 if (ctx->shader_info->info.ps.needs_sample_positions) {
1093 set_loc_shader(ctx, AC_UD_PS_SAMPLE_POS_OFFSET,
1094 &user_sgpr_idx, 1);
1095 }
1096 break;
1097 default:
1098 unreachable("Shader stage not implemented");
1099 }
1100
1101 if (stage == MESA_SHADER_TESS_CTRL ||
1102 (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_ls) ||
1103 /* GFX9 has the ESGS ring buffer in LDS. */
1104 (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
1105 ac_declare_lds_as_pointer(&ctx->ac);
1106 }
1107
1108 ctx->shader_info->num_user_sgprs = user_sgpr_idx;
1109 }
1110
1111
1112 static LLVMValueRef
1113 radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
1114 unsigned desc_set, unsigned binding)
1115 {
1116 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1117 LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
1118 struct radv_pipeline_layout *pipeline_layout = ctx->options->layout;
1119 struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
1120 unsigned base_offset = layout->binding[binding].offset;
1121 LLVMValueRef offset, stride;
1122
1123 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1124 layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
1125 unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
1126 layout->binding[binding].dynamic_offset_offset;
1127 desc_ptr = ctx->abi.push_constants;
1128 base_offset = pipeline_layout->push_constant_size + 16 * idx;
1129 stride = LLVMConstInt(ctx->ac.i32, 16, false);
1130 } else
1131 stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
1132
1133 offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
1134 index = LLVMBuildMul(ctx->ac.builder, index, stride, "");
1135 offset = LLVMBuildAdd(ctx->ac.builder, offset, index, "");
1136
1137 desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
1138 desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
1139 LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1140
1141 return desc_ptr;
1142 }
1143
1144
1145 /* The offchip buffer layout for TCS->TES is
1146 *
1147 * - attribute 0 of patch 0 vertex 0
1148 * - attribute 0 of patch 0 vertex 1
1149 * - attribute 0 of patch 0 vertex 2
1150 * ...
1151 * - attribute 0 of patch 1 vertex 0
1152 * - attribute 0 of patch 1 vertex 1
1153 * ...
1154 * - attribute 1 of patch 0 vertex 0
1155 * - attribute 1 of patch 0 vertex 1
1156 * ...
1157 * - per patch attribute 0 of patch 0
1158 * - per patch attribute 0 of patch 1
1159 * ...
1160 *
1161 * Note that every attribute has 4 components.
1162 */
1163 static LLVMValueRef get_non_vertex_index_offset(struct radv_shader_context *ctx)
1164 {
1165 uint32_t num_patches = ctx->tcs_num_patches;
1166 uint32_t num_tcs_outputs;
1167 if (ctx->stage == MESA_SHADER_TESS_CTRL)
1168 num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
1169 else
1170 num_tcs_outputs = ctx->options->key.tes.tcs_num_outputs;
1171
1172 uint32_t output_vertex_size = num_tcs_outputs * 16;
1173 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
1174
1175 return LLVMConstInt(ctx->ac.i32, pervertex_output_patch_size * num_patches, false);
1176 }
1177
1178 static LLVMValueRef calc_param_stride(struct radv_shader_context *ctx,
1179 LLVMValueRef vertex_index)
1180 {
1181 LLVMValueRef param_stride;
1182 if (vertex_index)
1183 param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch * ctx->tcs_num_patches, false);
1184 else
1185 param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_num_patches, false);
1186 return param_stride;
1187 }
1188
1189 static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
1190 LLVMValueRef vertex_index,
1191 LLVMValueRef param_index)
1192 {
1193 LLVMValueRef base_addr;
1194 LLVMValueRef param_stride, constant16;
1195 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
1196 LLVMValueRef vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch, false);
1197 constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
1198 param_stride = calc_param_stride(ctx, vertex_index);
1199 if (vertex_index) {
1200 base_addr = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
1201 vertices_per_patch, "");
1202
1203 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1204 vertex_index, "");
1205 } else {
1206 base_addr = rel_patch_id;
1207 }
1208
1209 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1210 LLVMBuildMul(ctx->ac.builder, param_index,
1211 param_stride, ""), "");
1212
1213 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
1214
1215 if (!vertex_index) {
1216 LLVMValueRef patch_data_offset = get_non_vertex_index_offset(ctx);
1217
1218 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1219 patch_data_offset, "");
1220 }
1221 return base_addr;
1222 }
1223
1224 static LLVMValueRef get_tcs_tes_buffer_address_params(struct radv_shader_context *ctx,
1225 unsigned param,
1226 unsigned const_index,
1227 bool is_compact,
1228 LLVMValueRef vertex_index,
1229 LLVMValueRef indir_index)
1230 {
1231 LLVMValueRef param_index;
1232
1233 if (indir_index)
1234 param_index = LLVMBuildAdd(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, param, false),
1235 indir_index, "");
1236 else {
1237 if (const_index && !is_compact)
1238 param += const_index;
1239 param_index = LLVMConstInt(ctx->ac.i32, param, false);
1240 }
1241 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
1242 }
1243
1244 static LLVMValueRef
1245 get_dw_address(struct radv_shader_context *ctx,
1246 LLVMValueRef dw_addr,
1247 unsigned param,
1248 unsigned const_index,
1249 bool compact_const_index,
1250 LLVMValueRef vertex_index,
1251 LLVMValueRef stride,
1252 LLVMValueRef indir_index)
1253
1254 {
1255
1256 if (vertex_index) {
1257 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1258 LLVMBuildMul(ctx->ac.builder,
1259 vertex_index,
1260 stride, ""), "");
1261 }
1262
1263 if (indir_index)
1264 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1265 LLVMBuildMul(ctx->ac.builder, indir_index,
1266 LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
1267 else if (const_index && !compact_const_index)
1268 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1269 LLVMConstInt(ctx->ac.i32, const_index * 4, false), "");
1270
1271 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1272 LLVMConstInt(ctx->ac.i32, param * 4, false), "");
1273
1274 if (const_index && compact_const_index)
1275 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1276 LLVMConstInt(ctx->ac.i32, const_index, false), "");
1277 return dw_addr;
1278 }
1279
1280 static LLVMValueRef
1281 load_tcs_varyings(struct ac_shader_abi *abi,
1282 LLVMTypeRef type,
1283 LLVMValueRef vertex_index,
1284 LLVMValueRef indir_index,
1285 unsigned const_index,
1286 unsigned location,
1287 unsigned driver_location,
1288 unsigned component,
1289 unsigned num_components,
1290 bool is_patch,
1291 bool is_compact,
1292 bool load_input)
1293 {
1294 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1295 LLVMValueRef dw_addr, stride;
1296 LLVMValueRef value[4], result;
1297 unsigned param = shader_io_get_unique_index(location);
1298
1299 if (load_input) {
1300 uint32_t input_vertex_size = (ctx->tcs_num_inputs * 16) / 4;
1301 stride = LLVMConstInt(ctx->ac.i32, input_vertex_size, false);
1302 dw_addr = get_tcs_in_current_patch_offset(ctx);
1303 } else {
1304 if (!is_patch) {
1305 stride = get_tcs_out_vertex_stride(ctx);
1306 dw_addr = get_tcs_out_current_patch_offset(ctx);
1307 } else {
1308 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1309 stride = NULL;
1310 }
1311 }
1312
1313 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1314 indir_index);
1315
1316 for (unsigned i = 0; i < num_components + component; i++) {
1317 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1318 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1319 ctx->ac.i32_1, "");
1320 }
1321 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1322 return result;
1323 }
1324
1325 static void
1326 store_tcs_output(struct ac_shader_abi *abi,
1327 const nir_variable *var,
1328 LLVMValueRef vertex_index,
1329 LLVMValueRef param_index,
1330 unsigned const_index,
1331 LLVMValueRef src,
1332 unsigned writemask)
1333 {
1334 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1335 const unsigned location = var->data.location;
1336 const unsigned component = var->data.location_frac;
1337 const bool is_patch = var->data.patch;
1338 const bool is_compact = var->data.compact;
1339 LLVMValueRef dw_addr;
1340 LLVMValueRef stride = NULL;
1341 LLVMValueRef buf_addr = NULL;
1342 unsigned param;
1343 bool store_lds = true;
1344
1345 if (is_patch) {
1346 if (!(ctx->tcs_patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
1347 store_lds = false;
1348 } else {
1349 if (!(ctx->tcs_outputs_read & (1ULL << location)))
1350 store_lds = false;
1351 }
1352
1353 param = shader_io_get_unique_index(location);
1354 if (location == VARYING_SLOT_CLIP_DIST0 &&
1355 is_compact && const_index > 3) {
1356 const_index -= 3;
1357 param++;
1358 }
1359
1360 if (!is_patch) {
1361 stride = get_tcs_out_vertex_stride(ctx);
1362 dw_addr = get_tcs_out_current_patch_offset(ctx);
1363 } else {
1364 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1365 }
1366
1367 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1368 param_index);
1369 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
1370 vertex_index, param_index);
1371
1372 bool is_tess_factor = false;
1373 if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
1374 location == VARYING_SLOT_TESS_LEVEL_OUTER)
1375 is_tess_factor = true;
1376
1377 unsigned base = is_compact ? const_index : 0;
1378 for (unsigned chan = 0; chan < 8; chan++) {
1379 if (!(writemask & (1 << chan)))
1380 continue;
1381 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1382
1383 if (store_lds || is_tess_factor) {
1384 LLVMValueRef dw_addr_chan =
1385 LLVMBuildAdd(ctx->ac.builder, dw_addr,
1386 LLVMConstInt(ctx->ac.i32, chan, false), "");
1387 ac_lds_store(&ctx->ac, dw_addr_chan, value);
1388 }
1389
1390 if (!is_tess_factor && writemask != 0xF)
1391 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
1392 buf_addr, ctx->oc_lds,
1393 4 * (base + chan), 1, 0, true, false);
1394 }
1395
1396 if (writemask == 0xF) {
1397 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
1398 buf_addr, ctx->oc_lds,
1399 (base * 4), 1, 0, true, false);
1400 }
1401 }
1402
1403 static LLVMValueRef
1404 load_tes_input(struct ac_shader_abi *abi,
1405 LLVMTypeRef type,
1406 LLVMValueRef vertex_index,
1407 LLVMValueRef param_index,
1408 unsigned const_index,
1409 unsigned location,
1410 unsigned driver_location,
1411 unsigned component,
1412 unsigned num_components,
1413 bool is_patch,
1414 bool is_compact,
1415 bool load_input)
1416 {
1417 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1418 LLVMValueRef buf_addr;
1419 LLVMValueRef result;
1420 unsigned param = shader_io_get_unique_index(location);
1421
1422 if (location == VARYING_SLOT_CLIP_DIST0 && is_compact && const_index > 3) {
1423 const_index -= 3;
1424 param++;
1425 }
1426
1427 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
1428 is_compact, vertex_index, param_index);
1429
1430 LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, component * 4, false);
1431 buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");
1432
1433 result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
1434 buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
1435 result = ac_trim_vector(&ctx->ac, result, num_components);
1436 return result;
1437 }
1438
1439 static LLVMValueRef
1440 load_gs_input(struct ac_shader_abi *abi,
1441 unsigned location,
1442 unsigned driver_location,
1443 unsigned component,
1444 unsigned num_components,
1445 unsigned vertex_index,
1446 unsigned const_index,
1447 LLVMTypeRef type)
1448 {
1449 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1450 LLVMValueRef vtx_offset;
1451 unsigned param, vtx_offset_param;
1452 LLVMValueRef value[4], result;
1453
1454 vtx_offset_param = vertex_index;
1455 assert(vtx_offset_param < 6);
1456 vtx_offset = LLVMBuildMul(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
1457 LLVMConstInt(ctx->ac.i32, 4, false), "");
1458
1459 param = shader_io_get_unique_index(location);
1460
1461 for (unsigned i = component; i < num_components + component; i++) {
1462 if (ctx->ac.chip_class >= GFX9) {
1463 LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
1464 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1465 LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
1466 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1467 } else {
1468 LLVMValueRef soffset =
1469 LLVMConstInt(ctx->ac.i32,
1470 (param * 4 + i + const_index) * 256,
1471 false);
1472
1473 value[i] = ac_build_buffer_load(&ctx->ac,
1474 ctx->esgs_ring, 1,
1475 ctx->ac.i32_0,
1476 vtx_offset, soffset,
1477 0, 1, 0, true, false);
1478
1479 value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i],
1480 type, "");
1481 }
1482 }
1483 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1484 result = ac_to_integer(&ctx->ac, result);
1485 return result;
1486 }
1487
1488
1489 static void radv_emit_kill(struct ac_shader_abi *abi, LLVMValueRef visible)
1490 {
1491 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1492 ac_build_kill_if_false(&ctx->ac, visible);
1493 }
1494
1495 static LLVMValueRef lookup_interp_param(struct ac_shader_abi *abi,
1496 enum glsl_interp_mode interp, unsigned location)
1497 {
1498 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1499
1500 switch (interp) {
1501 case INTERP_MODE_FLAT:
1502 default:
1503 return NULL;
1504 case INTERP_MODE_SMOOTH:
1505 case INTERP_MODE_NONE:
1506 if (location == INTERP_CENTER)
1507 return ctx->persp_center;
1508 else if (location == INTERP_CENTROID)
1509 return ctx->persp_centroid;
1510 else if (location == INTERP_SAMPLE)
1511 return ctx->persp_sample;
1512 break;
1513 case INTERP_MODE_NOPERSPECTIVE:
1514 if (location == INTERP_CENTER)
1515 return ctx->linear_center;
1516 else if (location == INTERP_CENTROID)
1517 return ctx->linear_centroid;
1518 else if (location == INTERP_SAMPLE)
1519 return ctx->linear_sample;
1520 break;
1521 }
1522 return NULL;
1523 }
1524
1525 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
1526 LLVMValueRef sample_id)
1527 {
1528 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1529
1530 LLVMValueRef result;
1531 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
1532
1533 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
1534 ac_array_in_const_addr_space(ctx->ac.v2f32), "");
1535
1536 sample_id = LLVMBuildAdd(ctx->ac.builder, sample_id, ctx->sample_pos_offset, "");
1537 result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);
1538
1539 return result;
1540 }
1541
1542
1543 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
1544 {
1545 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1546 uint8_t log2_ps_iter_samples = ctx->shader_info->info.ps.force_persample ?
1547 ctx->options->key.fs.log2_num_samples :
1548 ctx->options->key.fs.log2_ps_iter_samples;
1549
1550 /* The bit pattern matches that used by fixed function fragment
1551 * processing. */
1552 static const uint16_t ps_iter_masks[] = {
1553 0xffff, /* not used */
1554 0x5555,
1555 0x1111,
1556 0x0101,
1557 0x0001,
1558 };
1559 assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
1560
1561 uint32_t ps_iter_mask = ps_iter_masks[log2_ps_iter_samples];
1562
1563 LLVMValueRef result, sample_id;
1564 sample_id = ac_unpack_param(&ctx->ac, abi->ancillary, 8, 4);
1565 sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, ps_iter_mask, false), sample_id, "");
1566 result = LLVMBuildAnd(ctx->ac.builder, sample_id, abi->sample_coverage, "");
1567 return result;
1568 }
1569
1570
1571 static void
1572 visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
1573 {
1574 LLVMValueRef gs_next_vertex;
1575 LLVMValueRef can_emit;
1576 int idx;
1577 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1578
1579 assert(stream == 0);
1580
1581 /* Write vertex attribute values to GSVS ring */
1582 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
1583 ctx->gs_next_vertex,
1584 "");
1585
1586 /* If this thread has already emitted the declared maximum number of
1587 * vertices, kill it: excessive vertex emissions are not supposed to
1588 * have any effect, and GS threads have no externally observable
1589 * effects other than emitting vertices.
1590 */
1591 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
1592 LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
1593 ac_build_kill_if_false(&ctx->ac, can_emit);
1594
1595 /* loop num outputs */
1596 idx = 0;
1597 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
1598 LLVMValueRef *out_ptr = &addrs[i * 4];
1599 int length = 4;
1600 int slot = idx;
1601 int slot_inc = 1;
1602
1603 if (!(ctx->output_mask & (1ull << i)))
1604 continue;
1605
1606 if (i == VARYING_SLOT_CLIP_DIST0) {
1607 /* pack clip and cull into a single set of slots */
1608 length = ctx->num_output_clips + ctx->num_output_culls;
1609 if (length > 4)
1610 slot_inc = 2;
1611 }
1612 for (unsigned j = 0; j < length; j++) {
1613 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
1614 out_ptr[j], "");
1615 LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
1616 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
1617 voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
1618
1619 out_val = LLVMBuildBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
1620
1621 ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
1622 out_val, 1,
1623 voffset, ctx->gs2vs_offset, 0,
1624 1, 1, true, true);
1625 }
1626 idx += slot_inc;
1627 }
1628
1629 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
1630 ctx->ac.i32_1, "");
1631 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex);
1632
1633 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
1634 }
1635
1636 static void
1637 visit_end_primitive(struct ac_shader_abi *abi, unsigned stream)
1638 {
1639 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1640 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8), ctx->gs_wave_id);
1641 }
1642
1643 static LLVMValueRef
1644 load_tess_coord(struct ac_shader_abi *abi)
1645 {
1646 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1647
1648 LLVMValueRef coord[4] = {
1649 ctx->tes_u,
1650 ctx->tes_v,
1651 ctx->ac.f32_0,
1652 ctx->ac.f32_0,
1653 };
1654
1655 if (ctx->tes_primitive_mode == GL_TRIANGLES)
1656 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1657 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
1658
1659 return ac_build_gather_values(&ctx->ac, coord, 3);
1660 }
1661
1662 static LLVMValueRef
1663 load_patch_vertices_in(struct ac_shader_abi *abi)
1664 {
1665 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1666 return LLVMConstInt(ctx->ac.i32, ctx->options->key.tcs.input_vertices, false);
1667 }
1668
1669
1670 static LLVMValueRef radv_load_base_vertex(struct ac_shader_abi *abi)
1671 {
1672 return abi->base_vertex;
1673 }
1674
1675 static LLVMValueRef radv_load_ssbo(struct ac_shader_abi *abi,
1676 LLVMValueRef buffer_ptr, bool write)
1677 {
1678 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1679 LLVMValueRef result;
1680
1681 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1682
1683 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1684 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1685
1686 return result;
1687 }
1688
1689 static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi, LLVMValueRef buffer_ptr)
1690 {
1691 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1692 LLVMValueRef result;
1693
1694 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1695
1696 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1697 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1698
1699 return result;
1700 }
1701
1702 static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
1703 unsigned descriptor_set,
1704 unsigned base_index,
1705 unsigned constant_index,
1706 LLVMValueRef index,
1707 enum ac_descriptor_type desc_type,
1708 bool image, bool write)
1709 {
1710 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1711 LLVMValueRef list = ctx->descriptor_sets[descriptor_set];
1712 struct radv_descriptor_set_layout *layout = ctx->options->layout->set[descriptor_set].layout;
1713 struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
1714 unsigned offset = binding->offset;
1715 unsigned stride = binding->size;
1716 unsigned type_size;
1717 LLVMBuilderRef builder = ctx->ac.builder;
1718 LLVMTypeRef type;
1719
1720 assert(base_index < layout->binding_count);
1721
1722 switch (desc_type) {
1723 case AC_DESC_IMAGE:
1724 type = ctx->ac.v8i32;
1725 type_size = 32;
1726 break;
1727 case AC_DESC_FMASK:
1728 type = ctx->ac.v8i32;
1729 offset += 32;
1730 type_size = 32;
1731 break;
1732 case AC_DESC_SAMPLER:
1733 type = ctx->ac.v4i32;
1734 if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1735 offset += 64;
1736
1737 type_size = 16;
1738 break;
1739 case AC_DESC_BUFFER:
1740 type = ctx->ac.v4i32;
1741 type_size = 16;
1742 break;
1743 default:
1744 unreachable("invalid desc_type\n");
1745 }
1746
1747 offset += constant_index * stride;
1748
1749 if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
1750 (!index || binding->immutable_samplers_equal)) {
1751 if (binding->immutable_samplers_equal)
1752 constant_index = 0;
1753
1754 const uint32_t *samplers = radv_immutable_samplers(layout, binding);
1755
1756 LLVMValueRef constants[] = {
1757 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
1758 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
1759 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
1760 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
1761 };
1762 return ac_build_gather_values(&ctx->ac, constants, 4);
1763 }
1764
1765 assert(stride % type_size == 0);
1766
1767 if (!index)
1768 index = ctx->ac.i32_0;
1769
1770 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
1771
1772 list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
1773 list = LLVMBuildPointerCast(builder, list, ac_array_in_const_addr_space(type), "");
1774
1775 return ac_build_load_to_sgpr(&ctx->ac, list, index);
1776 }
1777
1778
1779 static void
1780 handle_vs_input_decl(struct radv_shader_context *ctx,
1781 struct nir_variable *variable)
1782 {
1783 LLVMValueRef t_list_ptr = ctx->vertex_buffers;
1784 LLVMValueRef t_offset;
1785 LLVMValueRef t_list;
1786 LLVMValueRef input;
1787 LLVMValueRef buffer_index;
1788 int index = variable->data.location - VERT_ATTRIB_GENERIC0;
1789 int idx = variable->data.location;
1790 unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
1791 uint8_t input_usage_mask =
1792 ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
1793 unsigned num_channels = util_last_bit(input_usage_mask);
1794
1795 variable->data.driver_location = idx * 4;
1796
1797 for (unsigned i = 0; i < attrib_count; ++i, ++idx) {
1798 if (ctx->options->key.vs.instance_rate_inputs & (1u << (index + i))) {
1799 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.instance_id,
1800 ctx->abi.start_instance, "");
1801 if (ctx->options->key.vs.as_ls) {
1802 ctx->shader_info->vs.vgpr_comp_cnt =
1803 MAX2(2, ctx->shader_info->vs.vgpr_comp_cnt);
1804 } else {
1805 ctx->shader_info->vs.vgpr_comp_cnt =
1806 MAX2(1, ctx->shader_info->vs.vgpr_comp_cnt);
1807 }
1808 } else
1809 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.vertex_id,
1810 ctx->abi.base_vertex, "");
1811 t_offset = LLVMConstInt(ctx->ac.i32, index + i, false);
1812
1813 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
1814
1815 input = ac_build_buffer_load_format(&ctx->ac, t_list,
1816 buffer_index,
1817 ctx->ac.i32_0,
1818 num_channels, false, true);
1819
1820 input = ac_build_expand_to_vec4(&ctx->ac, input, num_channels);
1821
1822 for (unsigned chan = 0; chan < 4; chan++) {
1823 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
1824 ctx->inputs[ac_llvm_reg_index_soa(idx, chan)] =
1825 ac_to_integer(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder,
1826 input, llvm_chan, ""));
1827 }
1828 }
1829 }
1830
1831 static void interp_fs_input(struct radv_shader_context *ctx,
1832 unsigned attr,
1833 LLVMValueRef interp_param,
1834 LLVMValueRef prim_mask,
1835 LLVMValueRef result[4])
1836 {
1837 LLVMValueRef attr_number;
1838 unsigned chan;
1839 LLVMValueRef i, j;
1840 bool interp = interp_param != NULL;
1841
1842 attr_number = LLVMConstInt(ctx->ac.i32, attr, false);
1843
1844 /* fs.constant returns the param from the middle vertex, so it's not
1845 * really useful for flat shading. It's meant to be used for custom
1846 * interpolation (but the intrinsic can't fetch from the other two
1847 * vertices).
1848 *
1849 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1850 * to do the right thing. The only reason we use fs.constant is that
1851 * fs.interp cannot be used on integers, because they can be equal
1852 * to NaN.
1853 */
1854 if (interp) {
1855 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1856 ctx->ac.v2f32, "");
1857
1858 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1859 ctx->ac.i32_0, "");
1860 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1861 ctx->ac.i32_1, "");
1862 }
1863
1864 for (chan = 0; chan < 4; chan++) {
1865 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
1866
1867 if (interp) {
1868 result[chan] = ac_build_fs_interp(&ctx->ac,
1869 llvm_chan,
1870 attr_number,
1871 prim_mask, i, j);
1872 } else {
1873 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
1874 LLVMConstInt(ctx->ac.i32, 2, false),
1875 llvm_chan,
1876 attr_number,
1877 prim_mask);
1878 }
1879 }
1880 }
1881
1882 static void
1883 handle_fs_input_decl(struct radv_shader_context *ctx,
1884 struct nir_variable *variable)
1885 {
1886 int idx = variable->data.location;
1887 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
1888 LLVMValueRef interp;
1889
1890 variable->data.driver_location = idx * 4;
1891 ctx->input_mask |= ((1ull << attrib_count) - 1) << variable->data.location;
1892
1893 if (glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT) {
1894 unsigned interp_type;
1895 if (variable->data.sample)
1896 interp_type = INTERP_SAMPLE;
1897 else if (variable->data.centroid)
1898 interp_type = INTERP_CENTROID;
1899 else
1900 interp_type = INTERP_CENTER;
1901
1902 interp = lookup_interp_param(&ctx->abi, variable->data.interpolation, interp_type);
1903 } else
1904 interp = NULL;
1905
1906 for (unsigned i = 0; i < attrib_count; ++i)
1907 ctx->inputs[ac_llvm_reg_index_soa(idx + i, 0)] = interp;
1908
1909 }
1910
1911 static void
1912 handle_vs_inputs(struct radv_shader_context *ctx,
1913 struct nir_shader *nir) {
1914 nir_foreach_variable(variable, &nir->inputs)
1915 handle_vs_input_decl(ctx, variable);
1916 }
1917
1918 static void
1919 prepare_interp_optimize(struct radv_shader_context *ctx,
1920 struct nir_shader *nir)
1921 {
1922 if (!ctx->options->key.fs.multisample)
1923 return;
1924
1925 bool uses_center = false;
1926 bool uses_centroid = false;
1927 nir_foreach_variable(variable, &nir->inputs) {
1928 if (glsl_get_base_type(glsl_without_array(variable->type)) != GLSL_TYPE_FLOAT ||
1929 variable->data.sample)
1930 continue;
1931
1932 if (variable->data.centroid)
1933 uses_centroid = true;
1934 else
1935 uses_center = true;
1936 }
1937
1938 if (uses_center && uses_centroid) {
1939 LLVMValueRef sel = LLVMBuildICmp(ctx->ac.builder, LLVMIntSLT, ctx->abi.prim_mask, ctx->ac.i32_0, "");
1940 ctx->persp_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->persp_center, ctx->persp_centroid, "");
1941 ctx->linear_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->linear_center, ctx->linear_centroid, "");
1942 }
1943 }
1944
1945 static void
1946 handle_fs_inputs(struct radv_shader_context *ctx,
1947 struct nir_shader *nir)
1948 {
1949 prepare_interp_optimize(ctx, nir);
1950
1951 nir_foreach_variable(variable, &nir->inputs)
1952 handle_fs_input_decl(ctx, variable);
1953
1954 unsigned index = 0;
1955
1956 if (ctx->shader_info->info.ps.uses_input_attachments ||
1957 ctx->shader_info->info.needs_multiview_view_index)
1958 ctx->input_mask |= 1ull << VARYING_SLOT_LAYER;
1959
1960 for (unsigned i = 0; i < RADEON_LLVM_MAX_INPUTS; ++i) {
1961 LLVMValueRef interp_param;
1962 LLVMValueRef *inputs = ctx->inputs +ac_llvm_reg_index_soa(i, 0);
1963
1964 if (!(ctx->input_mask & (1ull << i)))
1965 continue;
1966
1967 if (i >= VARYING_SLOT_VAR0 || i == VARYING_SLOT_PNTC ||
1968 i == VARYING_SLOT_PRIMITIVE_ID || i == VARYING_SLOT_LAYER) {
1969 interp_param = *inputs;
1970 interp_fs_input(ctx, index, interp_param, ctx->abi.prim_mask,
1971 inputs);
1972
1973 if (!interp_param)
1974 ctx->shader_info->fs.flat_shaded_mask |= 1u << index;
1975 ++index;
1976 } else if (i == VARYING_SLOT_POS) {
1977 for(int i = 0; i < 3; ++i)
1978 inputs[i] = ctx->abi.frag_pos[i];
1979
1980 inputs[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
1981 ctx->abi.frag_pos[3]);
1982 }
1983 }
1984 ctx->shader_info->fs.num_interp = index;
1985 ctx->shader_info->fs.input_mask = ctx->input_mask >> VARYING_SLOT_VAR0;
1986
1987 if (ctx->shader_info->info.needs_multiview_view_index)
1988 ctx->abi.view_index = ctx->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
1989 }
1990
1991 static void
1992 scan_shader_output_decl(struct radv_shader_context *ctx,
1993 struct nir_variable *variable,
1994 struct nir_shader *shader,
1995 gl_shader_stage stage)
1996 {
1997 int idx = variable->data.location + variable->data.index;
1998 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
1999 uint64_t mask_attribs;
2000
2001 variable->data.driver_location = idx * 4;
2002
2003 /* tess ctrl has it's own load/store paths for outputs */
2004 if (stage == MESA_SHADER_TESS_CTRL)
2005 return;
2006
2007 mask_attribs = ((1ull << attrib_count) - 1) << idx;
2008 if (stage == MESA_SHADER_VERTEX ||
2009 stage == MESA_SHADER_TESS_EVAL ||
2010 stage == MESA_SHADER_GEOMETRY) {
2011 if (idx == VARYING_SLOT_CLIP_DIST0) {
2012 int length = shader->info.clip_distance_array_size +
2013 shader->info.cull_distance_array_size;
2014 if (stage == MESA_SHADER_VERTEX) {
2015 ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
2016 ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
2017 }
2018 if (stage == MESA_SHADER_TESS_EVAL) {
2019 ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
2020 ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
2021 }
2022
2023 if (length > 4)
2024 attrib_count = 2;
2025 else
2026 attrib_count = 1;
2027 mask_attribs = 1ull << idx;
2028 }
2029 }
2030
2031 ctx->output_mask |= mask_attribs;
2032 }
2033
2034
2035 /* Initialize arguments for the shader export intrinsic */
2036 static void
2037 si_llvm_init_export_args(struct radv_shader_context *ctx,
2038 LLVMValueRef *values,
2039 unsigned enabled_channels,
2040 unsigned target,
2041 struct ac_export_args *args)
2042 {
2043 /* Specify the channels that are enabled. */
2044 args->enabled_channels = enabled_channels;
2045
2046 /* Specify whether the EXEC mask represents the valid mask */
2047 args->valid_mask = 0;
2048
2049 /* Specify whether this is the last export */
2050 args->done = 0;
2051
2052 /* Specify the target we are exporting */
2053 args->target = target;
2054
2055 args->compr = false;
2056 args->out[0] = LLVMGetUndef(ctx->ac.f32);
2057 args->out[1] = LLVMGetUndef(ctx->ac.f32);
2058 args->out[2] = LLVMGetUndef(ctx->ac.f32);
2059 args->out[3] = LLVMGetUndef(ctx->ac.f32);
2060
2061 if (ctx->stage == MESA_SHADER_FRAGMENT && target >= V_008DFC_SQ_EXP_MRT) {
2062 unsigned index = target - V_008DFC_SQ_EXP_MRT;
2063 unsigned col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
2064 bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
2065 bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1;
2066 unsigned chan;
2067
2068 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
2069 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
2070 unsigned bits, bool hi) = NULL;
2071
2072 switch(col_format) {
2073 case V_028714_SPI_SHADER_ZERO:
2074 args->enabled_channels = 0; /* writemask */
2075 args->target = V_008DFC_SQ_EXP_NULL;
2076 break;
2077
2078 case V_028714_SPI_SHADER_32_R:
2079 args->enabled_channels = 1;
2080 args->out[0] = values[0];
2081 break;
2082
2083 case V_028714_SPI_SHADER_32_GR:
2084 args->enabled_channels = 0x3;
2085 args->out[0] = values[0];
2086 args->out[1] = values[1];
2087 break;
2088
2089 case V_028714_SPI_SHADER_32_AR:
2090 args->enabled_channels = 0x9;
2091 args->out[0] = values[0];
2092 args->out[3] = values[3];
2093 break;
2094
2095 case V_028714_SPI_SHADER_FP16_ABGR:
2096 args->enabled_channels = 0x5;
2097 packf = ac_build_cvt_pkrtz_f16;
2098 break;
2099
2100 case V_028714_SPI_SHADER_UNORM16_ABGR:
2101 args->enabled_channels = 0x5;
2102 packf = ac_build_cvt_pknorm_u16;
2103 break;
2104
2105 case V_028714_SPI_SHADER_SNORM16_ABGR:
2106 args->enabled_channels = 0x5;
2107 packf = ac_build_cvt_pknorm_i16;
2108 break;
2109
2110 case V_028714_SPI_SHADER_UINT16_ABGR:
2111 args->enabled_channels = 0x5;
2112 packi = ac_build_cvt_pk_u16;
2113 break;
2114
2115 case V_028714_SPI_SHADER_SINT16_ABGR:
2116 args->enabled_channels = 0x5;
2117 packi = ac_build_cvt_pk_i16;
2118 break;
2119
2120 default:
2121 case V_028714_SPI_SHADER_32_ABGR:
2122 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2123 break;
2124 }
2125
2126 /* Pack f16 or norm_i16/u16. */
2127 if (packf) {
2128 for (chan = 0; chan < 2; chan++) {
2129 LLVMValueRef pack_args[2] = {
2130 values[2 * chan],
2131 values[2 * chan + 1]
2132 };
2133 LLVMValueRef packed;
2134
2135 packed = packf(&ctx->ac, pack_args);
2136 args->out[chan] = ac_to_float(&ctx->ac, packed);
2137 }
2138 args->compr = 1; /* COMPR flag */
2139 }
2140
2141 /* Pack i16/u16. */
2142 if (packi) {
2143 for (chan = 0; chan < 2; chan++) {
2144 LLVMValueRef pack_args[2] = {
2145 ac_to_integer(&ctx->ac, values[2 * chan]),
2146 ac_to_integer(&ctx->ac, values[2 * chan + 1])
2147 };
2148 LLVMValueRef packed;
2149
2150 packed = packi(&ctx->ac, pack_args,
2151 is_int8 ? 8 : is_int10 ? 10 : 16,
2152 chan == 1);
2153 args->out[chan] = ac_to_float(&ctx->ac, packed);
2154 }
2155 args->compr = 1; /* COMPR flag */
2156 }
2157 return;
2158 }
2159
2160 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2161
2162 for (unsigned i = 0; i < 4; ++i) {
2163 if (!(args->enabled_channels & (1 << i)))
2164 continue;
2165
2166 args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
2167 }
2168 }
2169
2170 static void
2171 radv_export_param(struct radv_shader_context *ctx, unsigned index,
2172 LLVMValueRef *values, unsigned enabled_channels)
2173 {
2174 struct ac_export_args args;
2175
2176 si_llvm_init_export_args(ctx, values, enabled_channels,
2177 V_008DFC_SQ_EXP_PARAM + index, &args);
2178 ac_build_export(&ctx->ac, &args);
2179 }
2180
2181 static LLVMValueRef
2182 radv_load_output(struct radv_shader_context *ctx, unsigned index, unsigned chan)
2183 {
2184 LLVMValueRef output =
2185 ctx->abi.outputs[ac_llvm_reg_index_soa(index, chan)];
2186
2187 return LLVMBuildLoad(ctx->ac.builder, output, "");
2188 }
2189
2190 static void
2191 handle_vs_outputs_post(struct radv_shader_context *ctx,
2192 bool export_prim_id,
2193 struct radv_vs_output_info *outinfo)
2194 {
2195 uint32_t param_count = 0;
2196 unsigned target;
2197 unsigned pos_idx, num_pos_exports = 0;
2198 struct ac_export_args args, pos_args[4] = {};
2199 LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2200 int i;
2201
2202 if (ctx->options->key.has_multiview_view_index) {
2203 LLVMValueRef* tmp_out = &ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
2204 if(!*tmp_out) {
2205 for(unsigned i = 0; i < 4; ++i)
2206 ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, i)] =
2207 ac_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
2208 }
2209
2210 LLVMBuildStore(ctx->ac.builder, ac_to_float(&ctx->ac, ctx->abi.view_index), *tmp_out);
2211 ctx->output_mask |= 1ull << VARYING_SLOT_LAYER;
2212 }
2213
2214 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
2215 sizeof(outinfo->vs_output_param_offset));
2216
2217 if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
2218 LLVMValueRef slots[8];
2219 unsigned j;
2220
2221 if (outinfo->cull_dist_mask)
2222 outinfo->cull_dist_mask <<= ctx->num_output_clips;
2223
2224 i = VARYING_SLOT_CLIP_DIST0;
2225 for (j = 0; j < ctx->num_output_clips + ctx->num_output_culls; j++)
2226 slots[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2227
2228 for (i = ctx->num_output_clips + ctx->num_output_culls; i < 8; i++)
2229 slots[i] = LLVMGetUndef(ctx->ac.f32);
2230
2231 if (ctx->num_output_clips + ctx->num_output_culls > 4) {
2232 target = V_008DFC_SQ_EXP_POS + 3;
2233 si_llvm_init_export_args(ctx, &slots[4], 0xf, target, &args);
2234 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2235 &args, sizeof(args));
2236 }
2237
2238 target = V_008DFC_SQ_EXP_POS + 2;
2239 si_llvm_init_export_args(ctx, &slots[0], 0xf, target, &args);
2240 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2241 &args, sizeof(args));
2242
2243 }
2244
2245 LLVMValueRef pos_values[4] = {ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_1};
2246 if (ctx->output_mask & (1ull << VARYING_SLOT_POS)) {
2247 for (unsigned j = 0; j < 4; j++)
2248 pos_values[j] = radv_load_output(ctx, VARYING_SLOT_POS, j);
2249 }
2250 si_llvm_init_export_args(ctx, pos_values, 0xf, V_008DFC_SQ_EXP_POS, &pos_args[0]);
2251
2252 if (ctx->output_mask & (1ull << VARYING_SLOT_PSIZ)) {
2253 outinfo->writes_pointsize = true;
2254 psize_value = radv_load_output(ctx, VARYING_SLOT_PSIZ, 0);
2255 }
2256
2257 if (ctx->output_mask & (1ull << VARYING_SLOT_LAYER)) {
2258 outinfo->writes_layer = true;
2259 layer_value = radv_load_output(ctx, VARYING_SLOT_LAYER, 0);
2260 }
2261
2262 if (ctx->output_mask & (1ull << VARYING_SLOT_VIEWPORT)) {
2263 outinfo->writes_viewport_index = true;
2264 viewport_index_value = radv_load_output(ctx, VARYING_SLOT_VIEWPORT, 0);
2265 }
2266
2267 if (outinfo->writes_pointsize ||
2268 outinfo->writes_layer ||
2269 outinfo->writes_viewport_index) {
2270 pos_args[1].enabled_channels = ((outinfo->writes_pointsize == true ? 1 : 0) |
2271 (outinfo->writes_layer == true ? 4 : 0));
2272 pos_args[1].valid_mask = 0;
2273 pos_args[1].done = 0;
2274 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2275 pos_args[1].compr = 0;
2276 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2277 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2278 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2279 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2280
2281 if (outinfo->writes_pointsize == true)
2282 pos_args[1].out[0] = psize_value;
2283 if (outinfo->writes_layer == true)
2284 pos_args[1].out[2] = layer_value;
2285 if (outinfo->writes_viewport_index == true) {
2286 if (ctx->options->chip_class >= GFX9) {
2287 /* GFX9 has the layer in out.z[10:0] and the viewport
2288 * index in out.z[19:16].
2289 */
2290 LLVMValueRef v = viewport_index_value;
2291 v = ac_to_integer(&ctx->ac, v);
2292 v = LLVMBuildShl(ctx->ac.builder, v,
2293 LLVMConstInt(ctx->ac.i32, 16, false),
2294 "");
2295 v = LLVMBuildOr(ctx->ac.builder, v,
2296 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2297
2298 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2299 pos_args[1].enabled_channels |= 1 << 2;
2300 } else {
2301 pos_args[1].out[3] = viewport_index_value;
2302 pos_args[1].enabled_channels |= 1 << 3;
2303 }
2304 }
2305 }
2306 for (i = 0; i < 4; i++) {
2307 if (pos_args[i].out[0])
2308 num_pos_exports++;
2309 }
2310
2311 pos_idx = 0;
2312 for (i = 0; i < 4; i++) {
2313 if (!pos_args[i].out[0])
2314 continue;
2315
2316 /* Specify the target we are exporting */
2317 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2318 if (pos_idx == num_pos_exports)
2319 pos_args[i].done = 1;
2320 ac_build_export(&ctx->ac, &pos_args[i]);
2321 }
2322
2323 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2324 LLVMValueRef values[4];
2325 if (!(ctx->output_mask & (1ull << i)))
2326 continue;
2327
2328 if (i != VARYING_SLOT_LAYER &&
2329 i != VARYING_SLOT_PRIMITIVE_ID &&
2330 i < VARYING_SLOT_VAR0)
2331 continue;
2332
2333 for (unsigned j = 0; j < 4; j++)
2334 values[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2335
2336 unsigned output_usage_mask;
2337
2338 if (ctx->stage == MESA_SHADER_VERTEX &&
2339 !ctx->is_gs_copy_shader) {
2340 output_usage_mask =
2341 ctx->shader_info->info.vs.output_usage_mask[i];
2342 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
2343 output_usage_mask =
2344 ctx->shader_info->info.tes.output_usage_mask[i];
2345 } else {
2346 /* Enable all channels for the GS copy shader because
2347 * we don't know the output usage mask currently.
2348 */
2349 output_usage_mask = 0xf;
2350 }
2351
2352 radv_export_param(ctx, param_count, values, output_usage_mask);
2353
2354 outinfo->vs_output_param_offset[i] = param_count++;
2355 }
2356
2357 if (export_prim_id) {
2358 LLVMValueRef values[4];
2359
2360 values[0] = ctx->vs_prim_id;
2361 ctx->shader_info->vs.vgpr_comp_cnt = MAX2(2,
2362 ctx->shader_info->vs.vgpr_comp_cnt);
2363 for (unsigned j = 1; j < 4; j++)
2364 values[j] = ctx->ac.f32_0;
2365
2366 radv_export_param(ctx, param_count, values, 0xf);
2367
2368 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count++;
2369 outinfo->export_prim_id = true;
2370 }
2371
2372 outinfo->pos_exports = num_pos_exports;
2373 outinfo->param_exports = param_count;
2374 }
2375
2376 static void
2377 handle_es_outputs_post(struct radv_shader_context *ctx,
2378 struct radv_es_output_info *outinfo)
2379 {
2380 int j;
2381 uint64_t max_output_written = 0;
2382 LLVMValueRef lds_base = NULL;
2383
2384 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2385 int param_index;
2386 int length = 4;
2387
2388 if (!(ctx->output_mask & (1ull << i)))
2389 continue;
2390
2391 if (i == VARYING_SLOT_CLIP_DIST0)
2392 length = ctx->num_output_clips + ctx->num_output_culls;
2393
2394 param_index = shader_io_get_unique_index(i);
2395
2396 max_output_written = MAX2(param_index + (length > 4), max_output_written);
2397 }
2398
2399 outinfo->esgs_itemsize = (max_output_written + 1) * 16;
2400
2401 if (ctx->ac.chip_class >= GFX9) {
2402 unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
2403 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2404 LLVMValueRef wave_idx = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2405 LLVMConstInt(ctx->ac.i32, 24, false),
2406 LLVMConstInt(ctx->ac.i32, 4, false), false);
2407 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2408 LLVMBuildMul(ctx->ac.builder, wave_idx,
2409 LLVMConstInt(ctx->ac.i32, 64, false), ""), "");
2410 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2411 LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
2412 }
2413
2414 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2415 LLVMValueRef dw_addr = NULL;
2416 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2417 int param_index;
2418 int length = 4;
2419
2420 if (!(ctx->output_mask & (1ull << i)))
2421 continue;
2422
2423 if (i == VARYING_SLOT_CLIP_DIST0)
2424 length = ctx->num_output_clips + ctx->num_output_culls;
2425
2426 param_index = shader_io_get_unique_index(i);
2427
2428 if (lds_base) {
2429 dw_addr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2430 LLVMConstInt(ctx->ac.i32, param_index * 4, false),
2431 "");
2432 }
2433 for (j = 0; j < length; j++) {
2434 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2435 out_val = LLVMBuildBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
2436
2437 if (ctx->ac.chip_class >= GFX9) {
2438 ac_lds_store(&ctx->ac, dw_addr,
2439 LLVMBuildLoad(ctx->ac.builder, out_ptr[j], ""));
2440 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2441 } else {
2442 ac_build_buffer_store_dword(&ctx->ac,
2443 ctx->esgs_ring,
2444 out_val, 1,
2445 NULL, ctx->es2gs_offset,
2446 (4 * param_index + j) * 4,
2447 1, 1, true, true);
2448 }
2449 }
2450 }
2451 }
2452
2453 static void
2454 handle_ls_outputs_post(struct radv_shader_context *ctx)
2455 {
2456 LLVMValueRef vertex_id = ctx->rel_auto_id;
2457 uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->info.vs.ls_outputs_written);
2458 LLVMValueRef vertex_dw_stride = LLVMConstInt(ctx->ac.i32, num_tcs_inputs * 4, false);
2459 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2460 vertex_dw_stride, "");
2461
2462 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2463 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2464 int length = 4;
2465
2466 if (!(ctx->output_mask & (1ull << i)))
2467 continue;
2468
2469 if (i == VARYING_SLOT_CLIP_DIST0)
2470 length = ctx->num_output_clips + ctx->num_output_culls;
2471 int param = shader_io_get_unique_index(i);
2472 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2473 LLVMConstInt(ctx->ac.i32, param * 4, false),
2474 "");
2475 for (unsigned j = 0; j < length; j++) {
2476 ac_lds_store(&ctx->ac, dw_addr,
2477 LLVMBuildLoad(ctx->ac.builder, out_ptr[j], ""));
2478 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2479 }
2480 }
2481 }
2482
2483 struct ac_build_if_state
2484 {
2485 struct radv_shader_context *ctx;
2486 LLVMValueRef condition;
2487 LLVMBasicBlockRef entry_block;
2488 LLVMBasicBlockRef true_block;
2489 LLVMBasicBlockRef false_block;
2490 LLVMBasicBlockRef merge_block;
2491 };
2492
2493 static LLVMBasicBlockRef
2494 ac_build_insert_new_block(struct radv_shader_context *ctx, const char *name)
2495 {
2496 LLVMBasicBlockRef current_block;
2497 LLVMBasicBlockRef next_block;
2498 LLVMBasicBlockRef new_block;
2499
2500 /* get current basic block */
2501 current_block = LLVMGetInsertBlock(ctx->ac.builder);
2502
2503 /* chqeck if there's another block after this one */
2504 next_block = LLVMGetNextBasicBlock(current_block);
2505 if (next_block) {
2506 /* insert the new block before the next block */
2507 new_block = LLVMInsertBasicBlockInContext(ctx->context, next_block, name);
2508 }
2509 else {
2510 /* append new block after current block */
2511 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
2512 new_block = LLVMAppendBasicBlockInContext(ctx->context, function, name);
2513 }
2514 return new_block;
2515 }
2516
2517 static void
2518 ac_nir_build_if(struct ac_build_if_state *ifthen,
2519 struct radv_shader_context *ctx,
2520 LLVMValueRef condition)
2521 {
2522 LLVMBasicBlockRef block = LLVMGetInsertBlock(ctx->ac.builder);
2523
2524 memset(ifthen, 0, sizeof *ifthen);
2525 ifthen->ctx = ctx;
2526 ifthen->condition = condition;
2527 ifthen->entry_block = block;
2528
2529 /* create endif/merge basic block for the phi functions */
2530 ifthen->merge_block = ac_build_insert_new_block(ctx, "endif-block");
2531
2532 /* create/insert true_block before merge_block */
2533 ifthen->true_block =
2534 LLVMInsertBasicBlockInContext(ctx->context,
2535 ifthen->merge_block,
2536 "if-true-block");
2537
2538 /* successive code goes into the true block */
2539 LLVMPositionBuilderAtEnd(ctx->ac.builder, ifthen->true_block);
2540 }
2541
2542 /**
2543 * End a conditional.
2544 */
2545 static void
2546 ac_nir_build_endif(struct ac_build_if_state *ifthen)
2547 {
2548 LLVMBuilderRef builder = ifthen->ctx->ac.builder;
2549
2550 /* Insert branch to the merge block from current block */
2551 LLVMBuildBr(builder, ifthen->merge_block);
2552
2553 /*
2554 * Now patch in the various branch instructions.
2555 */
2556
2557 /* Insert the conditional branch instruction at the end of entry_block */
2558 LLVMPositionBuilderAtEnd(builder, ifthen->entry_block);
2559 if (ifthen->false_block) {
2560 /* we have an else clause */
2561 LLVMBuildCondBr(builder, ifthen->condition,
2562 ifthen->true_block, ifthen->false_block);
2563 }
2564 else {
2565 /* no else clause */
2566 LLVMBuildCondBr(builder, ifthen->condition,
2567 ifthen->true_block, ifthen->merge_block);
2568 }
2569
2570 /* Resume building code at end of the ifthen->merge_block */
2571 LLVMPositionBuilderAtEnd(builder, ifthen->merge_block);
2572 }
2573
2574 static void
2575 write_tess_factors(struct radv_shader_context *ctx)
2576 {
2577 unsigned stride, outer_comps, inner_comps;
2578 struct ac_build_if_state if_ctx, inner_if_ctx;
2579 LLVMValueRef invocation_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 8, 5);
2580 LLVMValueRef rel_patch_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
2581 unsigned tess_inner_index = 0, tess_outer_index;
2582 LLVMValueRef lds_base, lds_inner = NULL, lds_outer, byteoffset, buffer;
2583 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2584 int i;
2585 ac_emit_barrier(&ctx->ac, ctx->stage);
2586
2587 switch (ctx->options->key.tcs.primitive_mode) {
2588 case GL_ISOLINES:
2589 stride = 2;
2590 outer_comps = 2;
2591 inner_comps = 0;
2592 break;
2593 case GL_TRIANGLES:
2594 stride = 4;
2595 outer_comps = 3;
2596 inner_comps = 1;
2597 break;
2598 case GL_QUADS:
2599 stride = 6;
2600 outer_comps = 4;
2601 inner_comps = 2;
2602 break;
2603 default:
2604 return;
2605 }
2606
2607 ac_nir_build_if(&if_ctx, ctx,
2608 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2609 invocation_id, ctx->ac.i32_0, ""));
2610
2611 lds_base = get_tcs_out_current_patch_data_offset(ctx);
2612
2613 if (inner_comps) {
2614 tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2615 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2616 LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), "");
2617 }
2618
2619 tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2620 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2621 LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), "");
2622
2623 for (i = 0; i < 4; i++) {
2624 inner[i] = LLVMGetUndef(ctx->ac.i32);
2625 outer[i] = LLVMGetUndef(ctx->ac.i32);
2626 }
2627
2628 // LINES reverseal
2629 if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
2630 outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
2631 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2632 ctx->ac.i32_1, "");
2633 outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
2634 } else {
2635 for (i = 0; i < outer_comps; i++) {
2636 outer[i] = out[i] =
2637 ac_lds_load(&ctx->ac, lds_outer);
2638 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2639 ctx->ac.i32_1, "");
2640 }
2641 for (i = 0; i < inner_comps; i++) {
2642 inner[i] = out[outer_comps+i] =
2643 ac_lds_load(&ctx->ac, lds_inner);
2644 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_inner,
2645 ctx->ac.i32_1, "");
2646 }
2647 }
2648
2649 /* Convert the outputs to vectors for stores. */
2650 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2651 vec1 = NULL;
2652
2653 if (stride > 4)
2654 vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
2655
2656
2657 buffer = ctx->hs_ring_tess_factor;
2658 tf_base = ctx->tess_factor_offset;
2659 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2660 LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
2661 unsigned tf_offset = 0;
2662
2663 if (ctx->options->chip_class <= VI) {
2664 ac_nir_build_if(&inner_if_ctx, ctx,
2665 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2666 rel_patch_id, ctx->ac.i32_0, ""));
2667
2668 /* Store the dynamic HS control word. */
2669 ac_build_buffer_store_dword(&ctx->ac, buffer,
2670 LLVMConstInt(ctx->ac.i32, 0x80000000, false),
2671 1, ctx->ac.i32_0, tf_base,
2672 0, 1, 0, true, false);
2673 tf_offset += 4;
2674
2675 ac_nir_build_endif(&inner_if_ctx);
2676 }
2677
2678 /* Store the tessellation factors. */
2679 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2680 MIN2(stride, 4), byteoffset, tf_base,
2681 tf_offset, 1, 0, true, false);
2682 if (vec1)
2683 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2684 stride - 4, byteoffset, tf_base,
2685 16 + tf_offset, 1, 0, true, false);
2686
2687 //store to offchip for TES to read - only if TES reads them
2688 if (ctx->options->key.tcs.tes_reads_tess_factors) {
2689 LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
2690 LLVMValueRef tf_inner_offset;
2691 unsigned param_outer, param_inner;
2692
2693 param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2694 tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
2695 LLVMConstInt(ctx->ac.i32, param_outer, 0));
2696
2697 outer_vec = ac_build_gather_values(&ctx->ac, outer,
2698 util_next_power_of_two(outer_comps));
2699
2700 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
2701 outer_comps, tf_outer_offset,
2702 ctx->oc_lds, 0, 1, 0, true, false);
2703 if (inner_comps) {
2704 param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2705 tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
2706 LLVMConstInt(ctx->ac.i32, param_inner, 0));
2707
2708 inner_vec = inner_comps == 1 ? inner[0] :
2709 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2710 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
2711 inner_comps, tf_inner_offset,
2712 ctx->oc_lds, 0, 1, 0, true, false);
2713 }
2714 }
2715 ac_nir_build_endif(&if_ctx);
2716 }
2717
2718 static void
2719 handle_tcs_outputs_post(struct radv_shader_context *ctx)
2720 {
2721 write_tess_factors(ctx);
2722 }
2723
2724 static bool
2725 si_export_mrt_color(struct radv_shader_context *ctx,
2726 LLVMValueRef *color, unsigned index,
2727 struct ac_export_args *args)
2728 {
2729 /* Export */
2730 si_llvm_init_export_args(ctx, color, 0xf,
2731 V_008DFC_SQ_EXP_MRT + index, args);
2732 if (!args->enabled_channels)
2733 return false; /* unnecessary NULL export */
2734
2735 return true;
2736 }
2737
2738 static void
2739 radv_export_mrt_z(struct radv_shader_context *ctx,
2740 LLVMValueRef depth, LLVMValueRef stencil,
2741 LLVMValueRef samplemask)
2742 {
2743 struct ac_export_args args;
2744
2745 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
2746
2747 ac_build_export(&ctx->ac, &args);
2748 }
2749
2750 static void
2751 handle_fs_outputs_post(struct radv_shader_context *ctx)
2752 {
2753 unsigned index = 0;
2754 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2755 struct ac_export_args color_args[8];
2756
2757 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2758 LLVMValueRef values[4];
2759
2760 if (!(ctx->output_mask & (1ull << i)))
2761 continue;
2762
2763 if (i < FRAG_RESULT_DATA0)
2764 continue;
2765
2766 for (unsigned j = 0; j < 4; j++)
2767 values[j] = ac_to_float(&ctx->ac,
2768 radv_load_output(ctx, i, j));
2769
2770 bool ret = si_export_mrt_color(ctx, values,
2771 i - FRAG_RESULT_DATA0,
2772 &color_args[index]);
2773 if (ret)
2774 index++;
2775 }
2776
2777 /* Process depth, stencil, samplemask. */
2778 if (ctx->shader_info->info.ps.writes_z) {
2779 depth = ac_to_float(&ctx->ac,
2780 radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
2781 }
2782 if (ctx->shader_info->info.ps.writes_stencil) {
2783 stencil = ac_to_float(&ctx->ac,
2784 radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
2785 }
2786 if (ctx->shader_info->info.ps.writes_sample_mask) {
2787 samplemask = ac_to_float(&ctx->ac,
2788 radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
2789 }
2790
2791 /* Set the DONE bit on last non-null color export only if Z isn't
2792 * exported.
2793 */
2794 if (index > 0 &&
2795 !ctx->shader_info->info.ps.writes_z &&
2796 !ctx->shader_info->info.ps.writes_stencil &&
2797 !ctx->shader_info->info.ps.writes_sample_mask) {
2798 unsigned last = index - 1;
2799
2800 color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
2801 color_args[last].done = 1; /* DONE bit */
2802 }
2803
2804 /* Export PS outputs. */
2805 for (unsigned i = 0; i < index; i++)
2806 ac_build_export(&ctx->ac, &color_args[i]);
2807
2808 if (depth || stencil || samplemask)
2809 radv_export_mrt_z(ctx, depth, stencil, samplemask);
2810 else if (!index)
2811 ac_build_export_null(&ctx->ac);
2812 }
2813
2814 static void
2815 emit_gs_epilogue(struct radv_shader_context *ctx)
2816 {
2817 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
2818 }
2819
2820 static void
2821 handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
2822 LLVMValueRef *addrs)
2823 {
2824 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
2825
2826 switch (ctx->stage) {
2827 case MESA_SHADER_VERTEX:
2828 if (ctx->options->key.vs.as_ls)
2829 handle_ls_outputs_post(ctx);
2830 else if (ctx->options->key.vs.as_es)
2831 handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
2832 else
2833 handle_vs_outputs_post(ctx, ctx->options->key.vs.export_prim_id,
2834 &ctx->shader_info->vs.outinfo);
2835 break;
2836 case MESA_SHADER_FRAGMENT:
2837 handle_fs_outputs_post(ctx);
2838 break;
2839 case MESA_SHADER_GEOMETRY:
2840 emit_gs_epilogue(ctx);
2841 break;
2842 case MESA_SHADER_TESS_CTRL:
2843 handle_tcs_outputs_post(ctx);
2844 break;
2845 case MESA_SHADER_TESS_EVAL:
2846 if (ctx->options->key.tes.as_es)
2847 handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
2848 else
2849 handle_vs_outputs_post(ctx, ctx->options->key.tes.export_prim_id,
2850 &ctx->shader_info->tes.outinfo);
2851 break;
2852 default:
2853 break;
2854 }
2855 }
2856
2857 static void ac_llvm_finalize_module(struct radv_shader_context *ctx)
2858 {
2859 LLVMPassManagerRef passmgr;
2860 /* Create the pass manager */
2861 passmgr = LLVMCreateFunctionPassManagerForModule(
2862 ctx->ac.module);
2863
2864 /* This pass should eliminate all the load and store instructions */
2865 LLVMAddPromoteMemoryToRegisterPass(passmgr);
2866
2867 /* Add some optimization passes */
2868 LLVMAddScalarReplAggregatesPass(passmgr);
2869 LLVMAddLICMPass(passmgr);
2870 LLVMAddAggressiveDCEPass(passmgr);
2871 LLVMAddCFGSimplificationPass(passmgr);
2872 LLVMAddInstructionCombiningPass(passmgr);
2873
2874 /* Run the pass */
2875 LLVMInitializeFunctionPassManager(passmgr);
2876 LLVMRunFunctionPassManager(passmgr, ctx->main_function);
2877 LLVMFinalizeFunctionPassManager(passmgr);
2878
2879 LLVMDisposeBuilder(ctx->ac.builder);
2880 LLVMDisposePassManager(passmgr);
2881
2882 ac_llvm_context_dispose(&ctx->ac);
2883 }
2884
2885 static void
2886 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
2887 {
2888 struct radv_vs_output_info *outinfo;
2889
2890 switch (ctx->stage) {
2891 case MESA_SHADER_FRAGMENT:
2892 case MESA_SHADER_COMPUTE:
2893 case MESA_SHADER_TESS_CTRL:
2894 case MESA_SHADER_GEOMETRY:
2895 return;
2896 case MESA_SHADER_VERTEX:
2897 if (ctx->options->key.vs.as_ls ||
2898 ctx->options->key.vs.as_es)
2899 return;
2900 outinfo = &ctx->shader_info->vs.outinfo;
2901 break;
2902 case MESA_SHADER_TESS_EVAL:
2903 if (ctx->options->key.vs.as_es)
2904 return;
2905 outinfo = &ctx->shader_info->tes.outinfo;
2906 break;
2907 default:
2908 unreachable("Unhandled shader type");
2909 }
2910
2911 ac_optimize_vs_outputs(&ctx->ac,
2912 ctx->main_function,
2913 outinfo->vs_output_param_offset,
2914 VARYING_SLOT_MAX,
2915 &outinfo->param_exports);
2916 }
2917
2918 static void
2919 ac_setup_rings(struct radv_shader_context *ctx)
2920 {
2921 if ((ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) ||
2922 (ctx->stage == MESA_SHADER_TESS_EVAL && ctx->options->key.tes.as_es)) {
2923 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_VS, false));
2924 }
2925
2926 if (ctx->is_gs_copy_shader) {
2927 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
2928 }
2929 if (ctx->stage == MESA_SHADER_GEOMETRY) {
2930 LLVMValueRef tmp;
2931 uint32_t num_entries = 64;
2932 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_GS, false));
2933 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
2934
2935 ctx->gsvs_ring = LLVMBuildBitCast(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.v4i32, "");
2936
2937 tmp = LLVMConstInt(ctx->ac.i32, num_entries, false);
2938 if (ctx->options->chip_class >= VI)
2939 tmp = LLVMBuildMul(ctx->ac.builder, LLVMBuildLShr(ctx->ac.builder, ctx->gsvs_ring_stride, LLVMConstInt(ctx->ac.i32, 16, false), ""), tmp, "");
2940 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
2941 tmp = LLVMBuildExtractElement(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
2942 tmp = LLVMBuildOr(ctx->ac.builder, tmp, ctx->gsvs_ring_stride, "");
2943 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
2944 }
2945
2946 if (ctx->stage == MESA_SHADER_TESS_CTRL ||
2947 ctx->stage == MESA_SHADER_TESS_EVAL) {
2948 ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
2949 ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
2950 }
2951 }
2952
2953 static unsigned
2954 ac_nir_get_max_workgroup_size(enum chip_class chip_class,
2955 const struct nir_shader *nir)
2956 {
2957 switch (nir->info.stage) {
2958 case MESA_SHADER_TESS_CTRL:
2959 return chip_class >= CIK ? 128 : 64;
2960 case MESA_SHADER_GEOMETRY:
2961 return chip_class >= GFX9 ? 128 : 64;
2962 case MESA_SHADER_COMPUTE:
2963 break;
2964 default:
2965 return 0;
2966 }
2967
2968 unsigned max_workgroup_size = nir->info.cs.local_size[0] *
2969 nir->info.cs.local_size[1] *
2970 nir->info.cs.local_size[2];
2971 return max_workgroup_size;
2972 }
2973
2974 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
2975 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context *ctx)
2976 {
2977 LLVMValueRef count = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2978 LLVMConstInt(ctx->ac.i32, 8, false),
2979 LLVMConstInt(ctx->ac.i32, 8, false), false);
2980 LLVMValueRef hs_empty = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, count,
2981 ctx->ac.i32_0, "");
2982 ctx->abi.instance_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->rel_auto_id, ctx->abi.instance_id, "");
2983 ctx->vs_prim_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.vertex_id, ctx->vs_prim_id, "");
2984 ctx->rel_auto_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_rel_ids, ctx->rel_auto_id, "");
2985 ctx->abi.vertex_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_patch_id, ctx->abi.vertex_id, "");
2986 }
2987
2988 static void prepare_gs_input_vgprs(struct radv_shader_context *ctx)
2989 {
2990 for(int i = 5; i >= 0; --i) {
2991 ctx->gs_vtx_offset[i] = ac_build_bfe(&ctx->ac, ctx->gs_vtx_offset[i & ~1],
2992 LLVMConstInt(ctx->ac.i32, (i & 1) * 16, false),
2993 LLVMConstInt(ctx->ac.i32, 16, false), false);
2994 }
2995
2996 ctx->gs_wave_id = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2997 LLVMConstInt(ctx->ac.i32, 16, false),
2998 LLVMConstInt(ctx->ac.i32, 8, false), false);
2999 }
3000
3001
3002 static
3003 LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
3004 struct nir_shader *const *shaders,
3005 int shader_count,
3006 struct radv_shader_variant_info *shader_info,
3007 const struct radv_nir_compiler_options *options)
3008 {
3009 struct radv_shader_context ctx = {0};
3010 unsigned i;
3011 ctx.options = options;
3012 ctx.shader_info = shader_info;
3013 ctx.context = LLVMContextCreate();
3014
3015 ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class,
3016 options->family);
3017 ctx.ac.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
3018 LLVMSetTarget(ctx.ac.module, options->supports_spill ? "amdgcn-mesa-mesa3d" : "amdgcn--");
3019
3020 LLVMTargetDataRef data_layout = LLVMCreateTargetDataLayout(tm);
3021 char *data_layout_str = LLVMCopyStringRepOfTargetData(data_layout);
3022 LLVMSetDataLayout(ctx.ac.module, data_layout_str);
3023 LLVMDisposeTargetData(data_layout);
3024 LLVMDisposeMessage(data_layout_str);
3025
3026 enum ac_float_mode float_mode =
3027 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3028 AC_FLOAT_MODE_DEFAULT;
3029
3030 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3031
3032 memset(shader_info, 0, sizeof(*shader_info));
3033
3034 for(int i = 0; i < shader_count; ++i)
3035 radv_nir_shader_info_pass(shaders[i], options, &shader_info->info);
3036
3037 for (i = 0; i < RADV_UD_MAX_SETS; i++)
3038 shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
3039 for (i = 0; i < AC_UD_MAX_UD; i++)
3040 shader_info->user_sgprs_locs.shader_data[i].sgpr_idx = -1;
3041
3042 ctx.max_workgroup_size = 0;
3043 for (int i = 0; i < shader_count; ++i) {
3044 ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
3045 ac_nir_get_max_workgroup_size(ctx.options->chip_class,
3046 shaders[i]));
3047 }
3048
3049 create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
3050 shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
3051
3052 ctx.abi.inputs = &ctx.inputs[0];
3053 ctx.abi.emit_outputs = handle_shader_outputs_post;
3054 ctx.abi.emit_vertex = visit_emit_vertex;
3055 ctx.abi.load_ubo = radv_load_ubo;
3056 ctx.abi.load_ssbo = radv_load_ssbo;
3057 ctx.abi.load_sampler_desc = radv_get_sampler_desc;
3058 ctx.abi.load_resource = radv_load_resource;
3059 ctx.abi.clamp_shadow_reference = false;
3060
3061 if (shader_count >= 2)
3062 ac_init_exec_full_mask(&ctx.ac);
3063
3064 if (ctx.ac.chip_class == GFX9 &&
3065 shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
3066 ac_nir_fixup_ls_hs_input_vgprs(&ctx);
3067
3068 for(int i = 0; i < shader_count; ++i) {
3069 ctx.stage = shaders[i]->info.stage;
3070 ctx.output_mask = 0;
3071 ctx.num_output_clips = shaders[i]->info.clip_distance_array_size;
3072 ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
3073
3074 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3075 ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
3076 ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
3077 ctx.abi.load_inputs = load_gs_input;
3078 ctx.abi.emit_primitive = visit_end_primitive;
3079 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3080 ctx.tcs_outputs_read = shaders[i]->info.outputs_read;
3081 ctx.tcs_patch_outputs_read = shaders[i]->info.patch_outputs_read;
3082 ctx.abi.load_tess_varyings = load_tcs_varyings;
3083 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3084 ctx.abi.store_tcs_outputs = store_tcs_output;
3085 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3086 if (shader_count == 1)
3087 ctx.tcs_num_inputs = ctx.options->key.tcs.num_inputs;
3088 else
3089 ctx.tcs_num_inputs = util_last_bit64(shader_info->info.vs.ls_outputs_written);
3090 ctx.tcs_num_patches = get_tcs_num_patches(&ctx);
3091 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
3092 ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
3093 ctx.abi.load_tess_varyings = load_tes_input;
3094 ctx.abi.load_tess_coord = load_tess_coord;
3095 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3096 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3097 ctx.tcs_num_patches = ctx.options->key.tes.num_patches;
3098 } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
3099 if (shader_info->info.vs.needs_instance_id) {
3100 if (ctx.options->key.vs.as_ls) {
3101 ctx.shader_info->vs.vgpr_comp_cnt =
3102 MAX2(2, ctx.shader_info->vs.vgpr_comp_cnt);
3103 } else {
3104 ctx.shader_info->vs.vgpr_comp_cnt =
3105 MAX2(1, ctx.shader_info->vs.vgpr_comp_cnt);
3106 }
3107 }
3108 ctx.abi.load_base_vertex = radv_load_base_vertex;
3109 } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
3110 shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
3111 ctx.abi.lookup_interp_param = lookup_interp_param;
3112 ctx.abi.load_sample_position = load_sample_position;
3113 ctx.abi.load_sample_mask_in = load_sample_mask_in;
3114 ctx.abi.emit_kill = radv_emit_kill;
3115 }
3116
3117 if (i)
3118 ac_emit_barrier(&ctx.ac, ctx.stage);
3119
3120 ac_setup_rings(&ctx);
3121
3122 LLVMBasicBlockRef merge_block;
3123 if (shader_count >= 2) {
3124 LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
3125 LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3126 merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3127
3128 LLVMValueRef count = ac_build_bfe(&ctx.ac, ctx.merged_wave_info,
3129 LLVMConstInt(ctx.ac.i32, 8 * i, false),
3130 LLVMConstInt(ctx.ac.i32, 8, false), false);
3131 LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac);
3132 LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT,
3133 thread_id, count, "");
3134 LLVMBuildCondBr(ctx.ac.builder, cond, then_block, merge_block);
3135
3136 LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
3137 }
3138
3139 if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
3140 handle_fs_inputs(&ctx, shaders[i]);
3141 else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
3142 handle_vs_inputs(&ctx, shaders[i]);
3143 else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
3144 prepare_gs_input_vgprs(&ctx);
3145
3146 nir_foreach_variable(variable, &shaders[i]->outputs)
3147 scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
3148
3149 ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i]);
3150
3151 if (shader_count >= 2) {
3152 LLVMBuildBr(ctx.ac.builder, merge_block);
3153 LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
3154 }
3155
3156 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3157 unsigned addclip = shaders[i]->info.clip_distance_array_size +
3158 shaders[i]->info.cull_distance_array_size > 4;
3159 shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
3160 shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
3161 shaders[i]->info.gs.vertices_out;
3162 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3163 shader_info->tcs.num_patches = ctx.tcs_num_patches;
3164 shader_info->tcs.lds_size = calculate_tess_lds_size(&ctx);
3165 }
3166 }
3167
3168 LLVMBuildRetVoid(ctx.ac.builder);
3169
3170 if (options->dump_preoptir)
3171 ac_dump_module(ctx.ac.module);
3172
3173 ac_llvm_finalize_module(&ctx);
3174
3175 if (shader_count == 1)
3176 ac_nir_eliminate_const_vs_outputs(&ctx);
3177
3178 if (options->dump_shader) {
3179 ctx.shader_info->private_mem_vgprs =
3180 ac_count_scratch_private_memory(ctx.main_function);
3181 }
3182
3183 return ctx.ac.module;
3184 }
3185
3186 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
3187 {
3188 unsigned *retval = (unsigned *)context;
3189 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
3190 char *description = LLVMGetDiagInfoDescription(di);
3191
3192 if (severity == LLVMDSError) {
3193 *retval = 1;
3194 fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n",
3195 description);
3196 }
3197
3198 LLVMDisposeMessage(description);
3199 }
3200
3201 static unsigned ac_llvm_compile(LLVMModuleRef M,
3202 struct ac_shader_binary *binary,
3203 LLVMTargetMachineRef tm)
3204 {
3205 unsigned retval = 0;
3206 char *err;
3207 LLVMContextRef llvm_ctx;
3208 LLVMMemoryBufferRef out_buffer;
3209 unsigned buffer_size;
3210 const char *buffer_data;
3211 LLVMBool mem_err;
3212
3213 /* Setup Diagnostic Handler*/
3214 llvm_ctx = LLVMGetModuleContext(M);
3215
3216 LLVMContextSetDiagnosticHandler(llvm_ctx, ac_diagnostic_handler,
3217 &retval);
3218
3219 /* Compile IR*/
3220 mem_err = LLVMTargetMachineEmitToMemoryBuffer(tm, M, LLVMObjectFile,
3221 &err, &out_buffer);
3222
3223 /* Process Errors/Warnings */
3224 if (mem_err) {
3225 fprintf(stderr, "%s: %s", __FUNCTION__, err);
3226 free(err);
3227 retval = 1;
3228 goto out;
3229 }
3230
3231 /* Extract Shader Code*/
3232 buffer_size = LLVMGetBufferSize(out_buffer);
3233 buffer_data = LLVMGetBufferStart(out_buffer);
3234
3235 ac_elf_read(buffer_data, buffer_size, binary);
3236
3237 /* Clean up */
3238 LLVMDisposeMemoryBuffer(out_buffer);
3239
3240 out:
3241 return retval;
3242 }
3243
3244 static void ac_compile_llvm_module(LLVMTargetMachineRef tm,
3245 LLVMModuleRef llvm_module,
3246 struct ac_shader_binary *binary,
3247 struct ac_shader_config *config,
3248 struct radv_shader_variant_info *shader_info,
3249 gl_shader_stage stage,
3250 const struct radv_nir_compiler_options *options)
3251 {
3252 if (options->dump_shader)
3253 ac_dump_module(llvm_module);
3254
3255 memset(binary, 0, sizeof(*binary));
3256
3257 if (options->record_llvm_ir) {
3258 char *llvm_ir = LLVMPrintModuleToString(llvm_module);
3259 binary->llvm_ir_string = strdup(llvm_ir);
3260 LLVMDisposeMessage(llvm_ir);
3261 }
3262
3263 int v = ac_llvm_compile(llvm_module, binary, tm);
3264 if (v) {
3265 fprintf(stderr, "compile failed\n");
3266 }
3267
3268 if (options->dump_shader)
3269 fprintf(stderr, "disasm:\n%s\n", binary->disasm_string);
3270
3271 ac_shader_binary_read_config(binary, config, 0, options->supports_spill);
3272
3273 LLVMContextRef ctx = LLVMGetModuleContext(llvm_module);
3274 LLVMDisposeModule(llvm_module);
3275 LLVMContextDispose(ctx);
3276
3277 if (stage == MESA_SHADER_FRAGMENT) {
3278 shader_info->num_input_vgprs = 0;
3279 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
3280 shader_info->num_input_vgprs += 2;
3281 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
3282 shader_info->num_input_vgprs += 2;
3283 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
3284 shader_info->num_input_vgprs += 2;
3285 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
3286 shader_info->num_input_vgprs += 3;
3287 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
3288 shader_info->num_input_vgprs += 2;
3289 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
3290 shader_info->num_input_vgprs += 2;
3291 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
3292 shader_info->num_input_vgprs += 2;
3293 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
3294 shader_info->num_input_vgprs += 1;
3295 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
3296 shader_info->num_input_vgprs += 1;
3297 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
3298 shader_info->num_input_vgprs += 1;
3299 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
3300 shader_info->num_input_vgprs += 1;
3301 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
3302 shader_info->num_input_vgprs += 1;
3303 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr))
3304 shader_info->num_input_vgprs += 1;
3305 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr))
3306 shader_info->num_input_vgprs += 1;
3307 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
3308 shader_info->num_input_vgprs += 1;
3309 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
3310 shader_info->num_input_vgprs += 1;
3311 }
3312 config->num_vgprs = MAX2(config->num_vgprs, shader_info->num_input_vgprs);
3313
3314 /* +3 for scratch wave offset and VCC */
3315 config->num_sgprs = MAX2(config->num_sgprs,
3316 shader_info->num_input_sgprs + 3);
3317
3318 /* Enable 64-bit and 16-bit denormals, because there is no performance
3319 * cost.
3320 *
3321 * If denormals are enabled, all floating-point output modifiers are
3322 * ignored.
3323 *
3324 * Don't enable denormals for 32-bit floats, because:
3325 * - Floating-point output modifiers would be ignored by the hw.
3326 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3327 * have to stop using those.
3328 * - SI & CI would be very slow.
3329 */
3330 config->float_mode |= V_00B028_FP_64_DENORMS;
3331 }
3332
3333 static void
3334 ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_shader *nir, const struct radv_nir_compiler_options *options)
3335 {
3336 switch (nir->info.stage) {
3337 case MESA_SHADER_COMPUTE:
3338 for (int i = 0; i < 3; ++i)
3339 shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
3340 break;
3341 case MESA_SHADER_FRAGMENT:
3342 shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
3343 break;
3344 case MESA_SHADER_GEOMETRY:
3345 shader_info->gs.vertices_in = nir->info.gs.vertices_in;
3346 shader_info->gs.vertices_out = nir->info.gs.vertices_out;
3347 shader_info->gs.output_prim = nir->info.gs.output_primitive;
3348 shader_info->gs.invocations = nir->info.gs.invocations;
3349 break;
3350 case MESA_SHADER_TESS_EVAL:
3351 shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
3352 shader_info->tes.spacing = nir->info.tess.spacing;
3353 shader_info->tes.ccw = nir->info.tess.ccw;
3354 shader_info->tes.point_mode = nir->info.tess.point_mode;
3355 shader_info->tes.as_es = options->key.tes.as_es;
3356 break;
3357 case MESA_SHADER_TESS_CTRL:
3358 shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
3359 break;
3360 case MESA_SHADER_VERTEX:
3361 shader_info->vs.as_es = options->key.vs.as_es;
3362 shader_info->vs.as_ls = options->key.vs.as_ls;
3363 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3364 if (options->key.vs.as_ls)
3365 shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
3366 break;
3367 default:
3368 break;
3369 }
3370 }
3371
3372 void
3373 radv_compile_nir_shader(LLVMTargetMachineRef tm,
3374 struct ac_shader_binary *binary,
3375 struct ac_shader_config *config,
3376 struct radv_shader_variant_info *shader_info,
3377 struct nir_shader *const *nir,
3378 int nir_count,
3379 const struct radv_nir_compiler_options *options)
3380 {
3381
3382 LLVMModuleRef llvm_module;
3383
3384 llvm_module = ac_translate_nir_to_llvm(tm, nir, nir_count, shader_info,
3385 options);
3386
3387 ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info,
3388 nir[0]->info.stage, options);
3389
3390 for (int i = 0; i < nir_count; ++i)
3391 ac_fill_shader_info(shader_info, nir[i], options);
3392
3393 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3394 if (options->chip_class == GFX9) {
3395 if (nir_count == 2 &&
3396 nir[1]->info.stage == MESA_SHADER_GEOMETRY) {
3397 shader_info->gs.es_type = nir[0]->info.stage;
3398 }
3399 }
3400 }
3401
3402 static void
3403 ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
3404 {
3405 LLVMValueRef vtx_offset =
3406 LLVMBuildMul(ctx->ac.builder, ctx->abi.vertex_id,
3407 LLVMConstInt(ctx->ac.i32, 4, false), "");
3408 int idx = 0;
3409
3410 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3411 int length = 4;
3412 int slot = idx;
3413 int slot_inc = 1;
3414 if (!(ctx->output_mask & (1ull << i)))
3415 continue;
3416
3417 if (i == VARYING_SLOT_CLIP_DIST0) {
3418 /* unpack clip and cull from a single set of slots */
3419 length = ctx->num_output_clips + ctx->num_output_culls;
3420 if (length > 4)
3421 slot_inc = 2;
3422 }
3423
3424 for (unsigned j = 0; j < length; j++) {
3425 LLVMValueRef value, soffset;
3426
3427 soffset = LLVMConstInt(ctx->ac.i32,
3428 (slot * 4 + j) *
3429 ctx->gs_max_out_vertices * 16 * 4, false);
3430
3431 value = ac_build_buffer_load(&ctx->ac, ctx->gsvs_ring,
3432 1, ctx->ac.i32_0,
3433 vtx_offset, soffset,
3434 0, 1, 1, true, false);
3435
3436 LLVMBuildStore(ctx->ac.builder,
3437 ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
3438 }
3439 idx += slot_inc;
3440 }
3441 handle_vs_outputs_post(ctx, false, &ctx->shader_info->vs.outinfo);
3442 }
3443
3444 void
3445 radv_compile_gs_copy_shader(LLVMTargetMachineRef tm,
3446 struct nir_shader *geom_shader,
3447 struct ac_shader_binary *binary,
3448 struct ac_shader_config *config,
3449 struct radv_shader_variant_info *shader_info,
3450 const struct radv_nir_compiler_options *options)
3451 {
3452 struct radv_shader_context ctx = {0};
3453 ctx.context = LLVMContextCreate();
3454 ctx.options = options;
3455 ctx.shader_info = shader_info;
3456
3457 ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class,
3458 options->family);
3459 ctx.ac.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
3460
3461 ctx.is_gs_copy_shader = true;
3462 LLVMSetTarget(ctx.ac.module, "amdgcn--");
3463
3464 enum ac_float_mode float_mode =
3465 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3466 AC_FLOAT_MODE_DEFAULT;
3467
3468 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3469 ctx.stage = MESA_SHADER_VERTEX;
3470
3471 create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
3472
3473 ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
3474 ac_setup_rings(&ctx);
3475
3476 ctx.num_output_clips = geom_shader->info.clip_distance_array_size;
3477 ctx.num_output_culls = geom_shader->info.cull_distance_array_size;
3478
3479 nir_foreach_variable(variable, &geom_shader->outputs) {
3480 scan_shader_output_decl(&ctx, variable, geom_shader, MESA_SHADER_VERTEX);
3481 ac_handle_shader_output_decl(&ctx.ac, &ctx.abi, geom_shader,
3482 variable, MESA_SHADER_VERTEX);
3483 }
3484
3485 ac_gs_copy_shader_emit(&ctx);
3486
3487 LLVMBuildRetVoid(ctx.ac.builder);
3488
3489 ac_llvm_finalize_module(&ctx);
3490
3491 ac_compile_llvm_module(tm, ctx.ac.module, binary, config, shader_info,
3492 MESA_SHADER_VERTEX, options);
3493 }