8779c9d2b10e7a361ca03fa761fc6a942a8b9320
[mesa.git] / src / amd / vulkan / radv_nir_to_llvm.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "nir/nir.h"
30
31 #include <llvm-c/Core.h>
32 #include <llvm-c/TargetMachine.h>
33 #include <llvm-c/Transforms/Scalar.h>
34
35 #include "sid.h"
36 #include "gfx9d.h"
37 #include "ac_binary.h"
38 #include "ac_llvm_util.h"
39 #include "ac_llvm_build.h"
40 #include "ac_shader_abi.h"
41 #include "ac_shader_util.h"
42 #include "ac_exp_param.h"
43
44 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
45
46 struct radv_shader_context {
47 struct ac_llvm_context ac;
48 const struct ac_nir_compiler_options *options;
49 struct ac_shader_variant_info *shader_info;
50 struct ac_shader_abi abi;
51
52 unsigned max_workgroup_size;
53 LLVMContextRef context;
54 LLVMValueRef main_function;
55
56 LLVMValueRef descriptor_sets[AC_UD_MAX_SETS];
57 LLVMValueRef ring_offsets;
58
59 LLVMValueRef vertex_buffers;
60 LLVMValueRef rel_auto_id;
61 LLVMValueRef vs_prim_id;
62 LLVMValueRef ls_out_layout;
63 LLVMValueRef es2gs_offset;
64
65 LLVMValueRef tcs_offchip_layout;
66 LLVMValueRef tcs_out_offsets;
67 LLVMValueRef tcs_out_layout;
68 LLVMValueRef tcs_in_layout;
69 LLVMValueRef oc_lds;
70 LLVMValueRef merged_wave_info;
71 LLVMValueRef tess_factor_offset;
72 LLVMValueRef tes_rel_patch_id;
73 LLVMValueRef tes_u;
74 LLVMValueRef tes_v;
75
76 LLVMValueRef gsvs_ring_stride;
77 LLVMValueRef gsvs_num_entries;
78 LLVMValueRef gs2vs_offset;
79 LLVMValueRef gs_wave_id;
80 LLVMValueRef gs_vtx_offset[6];
81
82 LLVMValueRef esgs_ring;
83 LLVMValueRef gsvs_ring;
84 LLVMValueRef hs_ring_tess_offchip;
85 LLVMValueRef hs_ring_tess_factor;
86
87 LLVMValueRef sample_pos_offset;
88 LLVMValueRef persp_sample, persp_center, persp_centroid;
89 LLVMValueRef linear_sample, linear_center, linear_centroid;
90
91 gl_shader_stage stage;
92
93 LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
94
95 uint64_t input_mask;
96 uint64_t output_mask;
97 uint8_t num_output_clips;
98 uint8_t num_output_culls;
99
100 bool is_gs_copy_shader;
101 LLVMValueRef gs_next_vertex;
102 unsigned gs_max_out_vertices;
103
104 unsigned tes_primitive_mode;
105 uint64_t tess_outputs_written;
106 uint64_t tess_patch_outputs_written;
107
108 uint32_t tcs_patch_outputs_read;
109 uint64_t tcs_outputs_read;
110 uint32_t tcs_vertices_per_patch;
111 };
112
113 enum radeon_llvm_calling_convention {
114 RADEON_LLVM_AMDGPU_VS = 87,
115 RADEON_LLVM_AMDGPU_GS = 88,
116 RADEON_LLVM_AMDGPU_PS = 89,
117 RADEON_LLVM_AMDGPU_CS = 90,
118 RADEON_LLVM_AMDGPU_HS = 93,
119 };
120
121 static inline struct radv_shader_context *
122 radv_shader_context_from_abi(struct ac_shader_abi *abi)
123 {
124 struct radv_shader_context *ctx = NULL;
125 return container_of(abi, ctx, abi);
126 }
127
128 static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
129 {
130 switch (ctx->stage) {
131 case MESA_SHADER_TESS_CTRL:
132 return ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
133 case MESA_SHADER_TESS_EVAL:
134 return ctx->tes_rel_patch_id;
135 break;
136 default:
137 unreachable("Illegal stage");
138 }
139 }
140
141 /* Tessellation shaders pass outputs to the next shader using LDS.
142 *
143 * LS outputs = TCS inputs
144 * TCS outputs = TES inputs
145 *
146 * The LDS layout is:
147 * - TCS inputs for patch 0
148 * - TCS inputs for patch 1
149 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
150 * - ...
151 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
152 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
153 * - TCS outputs for patch 1
154 * - Per-patch TCS outputs for patch 1
155 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
156 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
157 * - ...
158 *
159 * All three shaders VS(LS), TCS, TES share the same LDS space.
160 */
161 static LLVMValueRef
162 get_tcs_in_patch_stride(struct radv_shader_context *ctx)
163 {
164 if (ctx->stage == MESA_SHADER_VERTEX)
165 return ac_unpack_param(&ctx->ac, ctx->ls_out_layout, 0, 13);
166 else if (ctx->stage == MESA_SHADER_TESS_CTRL)
167 return ac_unpack_param(&ctx->ac, ctx->tcs_in_layout, 0, 13);
168 else {
169 assert(0);
170 return NULL;
171 }
172 }
173
174 static LLVMValueRef
175 get_tcs_out_patch_stride(struct radv_shader_context *ctx)
176 {
177 return ac_unpack_param(&ctx->ac, ctx->tcs_out_layout, 0, 13);
178 }
179
180 static LLVMValueRef
181 get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
182 {
183 return ac_unpack_param(&ctx->ac, ctx->tcs_out_layout, 13, 8);
184 }
185
186 static LLVMValueRef
187 get_tcs_out_patch0_offset(struct radv_shader_context *ctx)
188 {
189 return LLVMBuildMul(ctx->ac.builder,
190 ac_unpack_param(&ctx->ac, ctx->tcs_out_offsets, 0, 16),
191 LLVMConstInt(ctx->ac.i32, 4, false), "");
192 }
193
194 static LLVMValueRef
195 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
196 {
197 return LLVMBuildMul(ctx->ac.builder,
198 ac_unpack_param(&ctx->ac, ctx->tcs_out_offsets, 16, 16),
199 LLVMConstInt(ctx->ac.i32, 4, false), "");
200 }
201
202 static LLVMValueRef
203 get_tcs_in_current_patch_offset(struct radv_shader_context *ctx)
204 {
205 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
206 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
207
208 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
209 }
210
211 static LLVMValueRef
212 get_tcs_out_current_patch_offset(struct radv_shader_context *ctx)
213 {
214 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
215 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
216 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
217
218 return LLVMBuildAdd(ctx->ac.builder, patch0_offset,
219 LLVMBuildMul(ctx->ac.builder, patch_stride,
220 rel_patch_id, ""),
221 "");
222 }
223
224 static LLVMValueRef
225 get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
226 {
227 LLVMValueRef patch0_patch_data_offset =
228 get_tcs_out_patch0_patch_data_offset(ctx);
229 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
230 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
231
232 return LLVMBuildAdd(ctx->ac.builder, patch0_patch_data_offset,
233 LLVMBuildMul(ctx->ac.builder, patch_stride,
234 rel_patch_id, ""),
235 "");
236 }
237
238 #define MAX_ARGS 23
239 struct arg_info {
240 LLVMTypeRef types[MAX_ARGS];
241 LLVMValueRef *assign[MAX_ARGS];
242 unsigned array_params_mask;
243 uint8_t count;
244 uint8_t sgpr_count;
245 uint8_t num_sgprs_used;
246 uint8_t num_vgprs_used;
247 };
248
249 enum ac_arg_regfile {
250 ARG_SGPR,
251 ARG_VGPR,
252 };
253
254 static void
255 add_arg(struct arg_info *info, enum ac_arg_regfile regfile, LLVMTypeRef type,
256 LLVMValueRef *param_ptr)
257 {
258 assert(info->count < MAX_ARGS);
259
260 info->assign[info->count] = param_ptr;
261 info->types[info->count] = type;
262 info->count++;
263
264 if (regfile == ARG_SGPR) {
265 info->num_sgprs_used += ac_get_type_size(type) / 4;
266 info->sgpr_count++;
267 } else {
268 assert(regfile == ARG_VGPR);
269 info->num_vgprs_used += ac_get_type_size(type) / 4;
270 }
271 }
272
273 static inline void
274 add_array_arg(struct arg_info *info, LLVMTypeRef type, LLVMValueRef *param_ptr)
275 {
276 info->array_params_mask |= (1 << info->count);
277 add_arg(info, ARG_SGPR, type, param_ptr);
278 }
279
280 static void assign_arguments(LLVMValueRef main_function,
281 struct arg_info *info)
282 {
283 unsigned i;
284 for (i = 0; i < info->count; i++) {
285 if (info->assign[i])
286 *info->assign[i] = LLVMGetParam(main_function, i);
287 }
288 }
289
290 static LLVMValueRef
291 create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
292 LLVMBuilderRef builder, LLVMTypeRef *return_types,
293 unsigned num_return_elems,
294 struct arg_info *args,
295 unsigned max_workgroup_size,
296 bool unsafe_math)
297 {
298 LLVMTypeRef main_function_type, ret_type;
299 LLVMBasicBlockRef main_function_body;
300
301 if (num_return_elems)
302 ret_type = LLVMStructTypeInContext(ctx, return_types,
303 num_return_elems, true);
304 else
305 ret_type = LLVMVoidTypeInContext(ctx);
306
307 /* Setup the function */
308 main_function_type =
309 LLVMFunctionType(ret_type, args->types, args->count, 0);
310 LLVMValueRef main_function =
311 LLVMAddFunction(module, "main", main_function_type);
312 main_function_body =
313 LLVMAppendBasicBlockInContext(ctx, main_function, "main_body");
314 LLVMPositionBuilderAtEnd(builder, main_function_body);
315
316 LLVMSetFunctionCallConv(main_function, RADEON_LLVM_AMDGPU_CS);
317 for (unsigned i = 0; i < args->sgpr_count; ++i) {
318 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_INREG);
319
320 if (args->array_params_mask & (1 << i)) {
321 LLVMValueRef P = LLVMGetParam(main_function, i);
322 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_NOALIAS);
323 ac_add_attr_dereferenceable(P, UINT64_MAX);
324 }
325 }
326
327 if (max_workgroup_size) {
328 ac_llvm_add_target_dep_function_attr(main_function,
329 "amdgpu-max-work-group-size",
330 max_workgroup_size);
331 }
332 if (unsafe_math) {
333 /* These were copied from some LLVM test. */
334 LLVMAddTargetDependentFunctionAttr(main_function,
335 "less-precise-fpmad",
336 "true");
337 LLVMAddTargetDependentFunctionAttr(main_function,
338 "no-infs-fp-math",
339 "true");
340 LLVMAddTargetDependentFunctionAttr(main_function,
341 "no-nans-fp-math",
342 "true");
343 LLVMAddTargetDependentFunctionAttr(main_function,
344 "unsafe-fp-math",
345 "true");
346 LLVMAddTargetDependentFunctionAttr(main_function,
347 "no-signed-zeros-fp-math",
348 "true");
349 }
350 return main_function;
351 }
352
353
354 static void
355 set_loc(struct ac_userdata_info *ud_info, uint8_t *sgpr_idx, uint8_t num_sgprs,
356 uint32_t indirect_offset)
357 {
358 ud_info->sgpr_idx = *sgpr_idx;
359 ud_info->num_sgprs = num_sgprs;
360 ud_info->indirect = indirect_offset > 0;
361 ud_info->indirect_offset = indirect_offset;
362 *sgpr_idx += num_sgprs;
363 }
364
365 static void
366 set_loc_shader(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
367 uint8_t num_sgprs)
368 {
369 struct ac_userdata_info *ud_info =
370 &ctx->shader_info->user_sgprs_locs.shader_data[idx];
371 assert(ud_info);
372
373 set_loc(ud_info, sgpr_idx, num_sgprs, 0);
374 }
375
376 static void
377 set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
378 uint32_t indirect_offset)
379 {
380 struct ac_userdata_info *ud_info =
381 &ctx->shader_info->user_sgprs_locs.descriptor_sets[idx];
382 assert(ud_info);
383
384 set_loc(ud_info, sgpr_idx, 2, indirect_offset);
385 }
386
387 struct user_sgpr_info {
388 bool need_ring_offsets;
389 uint8_t sgpr_count;
390 bool indirect_all_descriptor_sets;
391 };
392
393 static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
394 gl_shader_stage stage)
395 {
396 switch (stage) {
397 case MESA_SHADER_VERTEX:
398 if (ctx->shader_info->info.needs_multiview_view_index ||
399 (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
400 return true;
401 break;
402 case MESA_SHADER_TESS_EVAL:
403 if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
404 return true;
405 break;
406 case MESA_SHADER_GEOMETRY:
407 case MESA_SHADER_TESS_CTRL:
408 if (ctx->shader_info->info.needs_multiview_view_index)
409 return true;
410 break;
411 default:
412 break;
413 }
414 return false;
415 }
416
417 static uint8_t
418 count_vs_user_sgprs(struct radv_shader_context *ctx)
419 {
420 uint8_t count = 0;
421
422 count += ctx->shader_info->info.vs.has_vertex_buffers ? 2 : 0;
423 count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
424
425 return count;
426 }
427
428 static void allocate_user_sgprs(struct radv_shader_context *ctx,
429 gl_shader_stage stage,
430 bool has_previous_stage,
431 gl_shader_stage previous_stage,
432 bool needs_view_index,
433 struct user_sgpr_info *user_sgpr_info)
434 {
435 memset(user_sgpr_info, 0, sizeof(struct user_sgpr_info));
436
437 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
438 if (stage == MESA_SHADER_GEOMETRY ||
439 stage == MESA_SHADER_VERTEX ||
440 stage == MESA_SHADER_TESS_CTRL ||
441 stage == MESA_SHADER_TESS_EVAL ||
442 ctx->is_gs_copy_shader)
443 user_sgpr_info->need_ring_offsets = true;
444
445 if (stage == MESA_SHADER_FRAGMENT &&
446 ctx->shader_info->info.ps.needs_sample_positions)
447 user_sgpr_info->need_ring_offsets = true;
448
449 /* 2 user sgprs will nearly always be allocated for scratch/rings */
450 if (ctx->options->supports_spill || user_sgpr_info->need_ring_offsets) {
451 user_sgpr_info->sgpr_count += 2;
452 }
453
454 switch (stage) {
455 case MESA_SHADER_COMPUTE:
456 if (ctx->shader_info->info.cs.uses_grid_size)
457 user_sgpr_info->sgpr_count += 3;
458 break;
459 case MESA_SHADER_FRAGMENT:
460 user_sgpr_info->sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
461 break;
462 case MESA_SHADER_VERTEX:
463 if (!ctx->is_gs_copy_shader)
464 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
465 if (ctx->options->key.vs.as_ls)
466 user_sgpr_info->sgpr_count++;
467 break;
468 case MESA_SHADER_TESS_CTRL:
469 if (has_previous_stage) {
470 if (previous_stage == MESA_SHADER_VERTEX)
471 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
472 user_sgpr_info->sgpr_count++;
473 }
474 user_sgpr_info->sgpr_count += 4;
475 break;
476 case MESA_SHADER_TESS_EVAL:
477 user_sgpr_info->sgpr_count += 1;
478 break;
479 case MESA_SHADER_GEOMETRY:
480 if (has_previous_stage) {
481 if (previous_stage == MESA_SHADER_VERTEX) {
482 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
483 } else {
484 user_sgpr_info->sgpr_count++;
485 }
486 }
487 user_sgpr_info->sgpr_count += 2;
488 break;
489 default:
490 break;
491 }
492
493 if (needs_view_index)
494 user_sgpr_info->sgpr_count++;
495
496 if (ctx->shader_info->info.loads_push_constants)
497 user_sgpr_info->sgpr_count += 2;
498
499 uint32_t available_sgprs = ctx->options->chip_class >= GFX9 ? 32 : 16;
500 uint32_t remaining_sgprs = available_sgprs - user_sgpr_info->sgpr_count;
501
502 if (remaining_sgprs / 2 < util_bitcount(ctx->shader_info->info.desc_set_used_mask)) {
503 user_sgpr_info->sgpr_count += 2;
504 user_sgpr_info->indirect_all_descriptor_sets = true;
505 } else {
506 user_sgpr_info->sgpr_count += util_bitcount(ctx->shader_info->info.desc_set_used_mask) * 2;
507 }
508 }
509
510 static void
511 declare_global_input_sgprs(struct radv_shader_context *ctx,
512 gl_shader_stage stage,
513 bool has_previous_stage,
514 gl_shader_stage previous_stage,
515 const struct user_sgpr_info *user_sgpr_info,
516 struct arg_info *args,
517 LLVMValueRef *desc_sets)
518 {
519 LLVMTypeRef type = ac_array_in_const_addr_space(ctx->ac.i8);
520 unsigned num_sets = ctx->options->layout ?
521 ctx->options->layout->num_sets : 0;
522 unsigned stage_mask = 1 << stage;
523
524 if (has_previous_stage)
525 stage_mask |= 1 << previous_stage;
526
527 /* 1 for each descriptor set */
528 if (!user_sgpr_info->indirect_all_descriptor_sets) {
529 for (unsigned i = 0; i < num_sets; ++i) {
530 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
531 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
532 add_array_arg(args, type,
533 &ctx->descriptor_sets[i]);
534 }
535 }
536 } else {
537 add_array_arg(args, ac_array_in_const_addr_space(type), desc_sets);
538 }
539
540 if (ctx->shader_info->info.loads_push_constants) {
541 /* 1 for push constants and dynamic descriptors */
542 add_array_arg(args, type, &ctx->abi.push_constants);
543 }
544 }
545
546 static void
547 declare_vs_specific_input_sgprs(struct radv_shader_context *ctx,
548 gl_shader_stage stage,
549 bool has_previous_stage,
550 gl_shader_stage previous_stage,
551 struct arg_info *args)
552 {
553 if (!ctx->is_gs_copy_shader &&
554 (stage == MESA_SHADER_VERTEX ||
555 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
556 if (ctx->shader_info->info.vs.has_vertex_buffers) {
557 add_arg(args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
558 &ctx->vertex_buffers);
559 }
560 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.base_vertex);
561 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.start_instance);
562 if (ctx->shader_info->info.vs.needs_draw_id) {
563 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.draw_id);
564 }
565 }
566 }
567
568 static void
569 declare_vs_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
570 {
571 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.vertex_id);
572 if (!ctx->is_gs_copy_shader) {
573 if (ctx->options->key.vs.as_ls) {
574 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->rel_auto_id);
575 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
576 } else {
577 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
578 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->vs_prim_id);
579 }
580 add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
581 }
582 }
583
584 static void
585 declare_tes_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
586 {
587 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_u);
588 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_v);
589 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->tes_rel_patch_id);
590 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.tes_patch_id);
591 }
592
593 static void
594 set_global_input_locs(struct radv_shader_context *ctx, gl_shader_stage stage,
595 bool has_previous_stage, gl_shader_stage previous_stage,
596 const struct user_sgpr_info *user_sgpr_info,
597 LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
598 {
599 unsigned num_sets = ctx->options->layout ?
600 ctx->options->layout->num_sets : 0;
601 unsigned stage_mask = 1 << stage;
602
603 if (has_previous_stage)
604 stage_mask |= 1 << previous_stage;
605
606 if (!user_sgpr_info->indirect_all_descriptor_sets) {
607 for (unsigned i = 0; i < num_sets; ++i) {
608 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
609 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
610 set_loc_desc(ctx, i, user_sgpr_idx, 0);
611 } else
612 ctx->descriptor_sets[i] = NULL;
613 }
614 } else {
615 set_loc_shader(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS,
616 user_sgpr_idx, 2);
617
618 for (unsigned i = 0; i < num_sets; ++i) {
619 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
620 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
621 set_loc_desc(ctx, i, user_sgpr_idx, i * 8);
622 ctx->descriptor_sets[i] =
623 ac_build_load_to_sgpr(&ctx->ac,
624 desc_sets,
625 LLVMConstInt(ctx->ac.i32, i, false));
626
627 } else
628 ctx->descriptor_sets[i] = NULL;
629 }
630 ctx->shader_info->need_indirect_descriptor_sets = true;
631 }
632
633 if (ctx->shader_info->info.loads_push_constants) {
634 set_loc_shader(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx, 2);
635 }
636 }
637
638 static void
639 set_vs_specific_input_locs(struct radv_shader_context *ctx,
640 gl_shader_stage stage, bool has_previous_stage,
641 gl_shader_stage previous_stage,
642 uint8_t *user_sgpr_idx)
643 {
644 if (!ctx->is_gs_copy_shader &&
645 (stage == MESA_SHADER_VERTEX ||
646 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
647 if (ctx->shader_info->info.vs.has_vertex_buffers) {
648 set_loc_shader(ctx, AC_UD_VS_VERTEX_BUFFERS,
649 user_sgpr_idx, 2);
650 }
651
652 unsigned vs_num = 2;
653 if (ctx->shader_info->info.vs.needs_draw_id)
654 vs_num++;
655
656 set_loc_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE,
657 user_sgpr_idx, vs_num);
658 }
659 }
660
661 static unsigned shader_io_get_unique_index(gl_varying_slot slot)
662 {
663 /* handle patch indices separate */
664 if (slot == VARYING_SLOT_TESS_LEVEL_OUTER)
665 return 0;
666 if (slot == VARYING_SLOT_TESS_LEVEL_INNER)
667 return 1;
668 if (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_TESS_MAX)
669 return 2 + (slot - VARYING_SLOT_PATCH0);
670
671 if (slot == VARYING_SLOT_POS)
672 return 0;
673 if (slot == VARYING_SLOT_PSIZ)
674 return 1;
675 if (slot == VARYING_SLOT_CLIP_DIST0)
676 return 2;
677 /* 3 is reserved for clip dist as well */
678 if (slot >= VARYING_SLOT_VAR0 && slot <= VARYING_SLOT_VAR31)
679 return 4 + (slot - VARYING_SLOT_VAR0);
680 unreachable("illegal slot in get unique index\n");
681 }
682
683 static void set_llvm_calling_convention(LLVMValueRef func,
684 gl_shader_stage stage)
685 {
686 enum radeon_llvm_calling_convention calling_conv;
687
688 switch (stage) {
689 case MESA_SHADER_VERTEX:
690 case MESA_SHADER_TESS_EVAL:
691 calling_conv = RADEON_LLVM_AMDGPU_VS;
692 break;
693 case MESA_SHADER_GEOMETRY:
694 calling_conv = RADEON_LLVM_AMDGPU_GS;
695 break;
696 case MESA_SHADER_TESS_CTRL:
697 calling_conv = HAVE_LLVM >= 0x0500 ? RADEON_LLVM_AMDGPU_HS : RADEON_LLVM_AMDGPU_VS;
698 break;
699 case MESA_SHADER_FRAGMENT:
700 calling_conv = RADEON_LLVM_AMDGPU_PS;
701 break;
702 case MESA_SHADER_COMPUTE:
703 calling_conv = RADEON_LLVM_AMDGPU_CS;
704 break;
705 default:
706 unreachable("Unhandle shader type");
707 }
708
709 LLVMSetFunctionCallConv(func, calling_conv);
710 }
711
712 static void create_function(struct radv_shader_context *ctx,
713 gl_shader_stage stage,
714 bool has_previous_stage,
715 gl_shader_stage previous_stage)
716 {
717 uint8_t user_sgpr_idx;
718 struct user_sgpr_info user_sgpr_info;
719 struct arg_info args = {};
720 LLVMValueRef desc_sets;
721 bool needs_view_index = needs_view_index_sgpr(ctx, stage);
722 allocate_user_sgprs(ctx, stage, has_previous_stage,
723 previous_stage, needs_view_index, &user_sgpr_info);
724
725 if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
726 add_arg(&args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
727 &ctx->ring_offsets);
728 }
729
730 switch (stage) {
731 case MESA_SHADER_COMPUTE:
732 declare_global_input_sgprs(ctx, stage, has_previous_stage,
733 previous_stage, &user_sgpr_info,
734 &args, &desc_sets);
735
736 if (ctx->shader_info->info.cs.uses_grid_size) {
737 add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
738 &ctx->abi.num_work_groups);
739 }
740
741 for (int i = 0; i < 3; i++) {
742 ctx->abi.workgroup_ids[i] = NULL;
743 if (ctx->shader_info->info.cs.uses_block_id[i]) {
744 add_arg(&args, ARG_SGPR, ctx->ac.i32,
745 &ctx->abi.workgroup_ids[i]);
746 }
747 }
748
749 if (ctx->shader_info->info.cs.uses_local_invocation_idx)
750 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.tg_size);
751 add_arg(&args, ARG_VGPR, ctx->ac.v3i32,
752 &ctx->abi.local_invocation_ids);
753 break;
754 case MESA_SHADER_VERTEX:
755 declare_global_input_sgprs(ctx, stage, has_previous_stage,
756 previous_stage, &user_sgpr_info,
757 &args, &desc_sets);
758 declare_vs_specific_input_sgprs(ctx, stage, has_previous_stage,
759 previous_stage, &args);
760
761 if (needs_view_index)
762 add_arg(&args, ARG_SGPR, ctx->ac.i32,
763 &ctx->abi.view_index);
764 if (ctx->options->key.vs.as_es)
765 add_arg(&args, ARG_SGPR, ctx->ac.i32,
766 &ctx->es2gs_offset);
767 else if (ctx->options->key.vs.as_ls)
768 add_arg(&args, ARG_SGPR, ctx->ac.i32,
769 &ctx->ls_out_layout);
770
771 declare_vs_input_vgprs(ctx, &args);
772 break;
773 case MESA_SHADER_TESS_CTRL:
774 if (has_previous_stage) {
775 // First 6 system regs
776 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
777 add_arg(&args, ARG_SGPR, ctx->ac.i32,
778 &ctx->merged_wave_info);
779 add_arg(&args, ARG_SGPR, ctx->ac.i32,
780 &ctx->tess_factor_offset);
781
782 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
783 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
784 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
785
786 declare_global_input_sgprs(ctx, stage,
787 has_previous_stage,
788 previous_stage,
789 &user_sgpr_info, &args,
790 &desc_sets);
791 declare_vs_specific_input_sgprs(ctx, stage,
792 has_previous_stage,
793 previous_stage, &args);
794
795 add_arg(&args, ARG_SGPR, ctx->ac.i32,
796 &ctx->ls_out_layout);
797
798 add_arg(&args, ARG_SGPR, ctx->ac.i32,
799 &ctx->tcs_offchip_layout);
800 add_arg(&args, ARG_SGPR, ctx->ac.i32,
801 &ctx->tcs_out_offsets);
802 add_arg(&args, ARG_SGPR, ctx->ac.i32,
803 &ctx->tcs_out_layout);
804 add_arg(&args, ARG_SGPR, ctx->ac.i32,
805 &ctx->tcs_in_layout);
806 if (needs_view_index)
807 add_arg(&args, ARG_SGPR, ctx->ac.i32,
808 &ctx->abi.view_index);
809
810 add_arg(&args, ARG_VGPR, ctx->ac.i32,
811 &ctx->abi.tcs_patch_id);
812 add_arg(&args, ARG_VGPR, ctx->ac.i32,
813 &ctx->abi.tcs_rel_ids);
814
815 declare_vs_input_vgprs(ctx, &args);
816 } else {
817 declare_global_input_sgprs(ctx, stage,
818 has_previous_stage,
819 previous_stage,
820 &user_sgpr_info, &args,
821 &desc_sets);
822
823 add_arg(&args, ARG_SGPR, ctx->ac.i32,
824 &ctx->tcs_offchip_layout);
825 add_arg(&args, ARG_SGPR, ctx->ac.i32,
826 &ctx->tcs_out_offsets);
827 add_arg(&args, ARG_SGPR, ctx->ac.i32,
828 &ctx->tcs_out_layout);
829 add_arg(&args, ARG_SGPR, ctx->ac.i32,
830 &ctx->tcs_in_layout);
831 if (needs_view_index)
832 add_arg(&args, ARG_SGPR, ctx->ac.i32,
833 &ctx->abi.view_index);
834
835 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
836 add_arg(&args, ARG_SGPR, ctx->ac.i32,
837 &ctx->tess_factor_offset);
838 add_arg(&args, ARG_VGPR, ctx->ac.i32,
839 &ctx->abi.tcs_patch_id);
840 add_arg(&args, ARG_VGPR, ctx->ac.i32,
841 &ctx->abi.tcs_rel_ids);
842 }
843 break;
844 case MESA_SHADER_TESS_EVAL:
845 declare_global_input_sgprs(ctx, stage, has_previous_stage,
846 previous_stage, &user_sgpr_info,
847 &args, &desc_sets);
848
849 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->tcs_offchip_layout);
850 if (needs_view_index)
851 add_arg(&args, ARG_SGPR, ctx->ac.i32,
852 &ctx->abi.view_index);
853
854 if (ctx->options->key.tes.as_es) {
855 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
856 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
857 add_arg(&args, ARG_SGPR, ctx->ac.i32,
858 &ctx->es2gs_offset);
859 } else {
860 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
861 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
862 }
863 declare_tes_input_vgprs(ctx, &args);
864 break;
865 case MESA_SHADER_GEOMETRY:
866 if (has_previous_stage) {
867 // First 6 system regs
868 add_arg(&args, ARG_SGPR, ctx->ac.i32,
869 &ctx->gs2vs_offset);
870 add_arg(&args, ARG_SGPR, ctx->ac.i32,
871 &ctx->merged_wave_info);
872 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
873
874 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
875 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
876 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
877
878 declare_global_input_sgprs(ctx, stage,
879 has_previous_stage,
880 previous_stage,
881 &user_sgpr_info, &args,
882 &desc_sets);
883
884 if (previous_stage == MESA_SHADER_TESS_EVAL) {
885 add_arg(&args, ARG_SGPR, ctx->ac.i32,
886 &ctx->tcs_offchip_layout);
887 } else {
888 declare_vs_specific_input_sgprs(ctx, stage,
889 has_previous_stage,
890 previous_stage,
891 &args);
892 }
893
894 add_arg(&args, ARG_SGPR, ctx->ac.i32,
895 &ctx->gsvs_ring_stride);
896 add_arg(&args, ARG_SGPR, ctx->ac.i32,
897 &ctx->gsvs_num_entries);
898 if (needs_view_index)
899 add_arg(&args, ARG_SGPR, ctx->ac.i32,
900 &ctx->abi.view_index);
901
902 add_arg(&args, ARG_VGPR, ctx->ac.i32,
903 &ctx->gs_vtx_offset[0]);
904 add_arg(&args, ARG_VGPR, ctx->ac.i32,
905 &ctx->gs_vtx_offset[2]);
906 add_arg(&args, ARG_VGPR, ctx->ac.i32,
907 &ctx->abi.gs_prim_id);
908 add_arg(&args, ARG_VGPR, ctx->ac.i32,
909 &ctx->abi.gs_invocation_id);
910 add_arg(&args, ARG_VGPR, ctx->ac.i32,
911 &ctx->gs_vtx_offset[4]);
912
913 if (previous_stage == MESA_SHADER_VERTEX) {
914 declare_vs_input_vgprs(ctx, &args);
915 } else {
916 declare_tes_input_vgprs(ctx, &args);
917 }
918 } else {
919 declare_global_input_sgprs(ctx, stage,
920 has_previous_stage,
921 previous_stage,
922 &user_sgpr_info, &args,
923 &desc_sets);
924
925 add_arg(&args, ARG_SGPR, ctx->ac.i32,
926 &ctx->gsvs_ring_stride);
927 add_arg(&args, ARG_SGPR, ctx->ac.i32,
928 &ctx->gsvs_num_entries);
929 if (needs_view_index)
930 add_arg(&args, ARG_SGPR, ctx->ac.i32,
931 &ctx->abi.view_index);
932
933 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs2vs_offset);
934 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs_wave_id);
935 add_arg(&args, ARG_VGPR, ctx->ac.i32,
936 &ctx->gs_vtx_offset[0]);
937 add_arg(&args, ARG_VGPR, ctx->ac.i32,
938 &ctx->gs_vtx_offset[1]);
939 add_arg(&args, ARG_VGPR, ctx->ac.i32,
940 &ctx->abi.gs_prim_id);
941 add_arg(&args, ARG_VGPR, ctx->ac.i32,
942 &ctx->gs_vtx_offset[2]);
943 add_arg(&args, ARG_VGPR, ctx->ac.i32,
944 &ctx->gs_vtx_offset[3]);
945 add_arg(&args, ARG_VGPR, ctx->ac.i32,
946 &ctx->gs_vtx_offset[4]);
947 add_arg(&args, ARG_VGPR, ctx->ac.i32,
948 &ctx->gs_vtx_offset[5]);
949 add_arg(&args, ARG_VGPR, ctx->ac.i32,
950 &ctx->abi.gs_invocation_id);
951 }
952 break;
953 case MESA_SHADER_FRAGMENT:
954 declare_global_input_sgprs(ctx, stage, has_previous_stage,
955 previous_stage, &user_sgpr_info,
956 &args, &desc_sets);
957
958 if (ctx->shader_info->info.ps.needs_sample_positions)
959 add_arg(&args, ARG_SGPR, ctx->ac.i32,
960 &ctx->sample_pos_offset);
961
962 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.prim_mask);
963 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_sample);
964 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_center);
965 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_centroid);
966 add_arg(&args, ARG_VGPR, ctx->ac.v3i32, NULL); /* persp pull model */
967 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_sample);
968 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_center);
969 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_centroid);
970 add_arg(&args, ARG_VGPR, ctx->ac.f32, NULL); /* line stipple tex */
971 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[0]);
972 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[1]);
973 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[2]);
974 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[3]);
975 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.front_face);
976 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.ancillary);
977 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.sample_coverage);
978 add_arg(&args, ARG_VGPR, ctx->ac.i32, NULL); /* fixed pt */
979 break;
980 default:
981 unreachable("Shader stage not implemented");
982 }
983
984 ctx->main_function = create_llvm_function(
985 ctx->context, ctx->ac.module, ctx->ac.builder, NULL, 0, &args,
986 ctx->max_workgroup_size,
987 ctx->options->unsafe_math);
988 set_llvm_calling_convention(ctx->main_function, stage);
989
990
991 ctx->shader_info->num_input_vgprs = 0;
992 ctx->shader_info->num_input_sgprs = ctx->options->supports_spill ? 2 : 0;
993
994 ctx->shader_info->num_input_sgprs += args.num_sgprs_used;
995
996 if (ctx->stage != MESA_SHADER_FRAGMENT)
997 ctx->shader_info->num_input_vgprs = args.num_vgprs_used;
998
999 assign_arguments(ctx->main_function, &args);
1000
1001 user_sgpr_idx = 0;
1002
1003 if (ctx->options->supports_spill || user_sgpr_info.need_ring_offsets) {
1004 set_loc_shader(ctx, AC_UD_SCRATCH_RING_OFFSETS,
1005 &user_sgpr_idx, 2);
1006 if (ctx->options->supports_spill) {
1007 ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
1008 LLVMPointerType(ctx->ac.i8, AC_CONST_ADDR_SPACE),
1009 NULL, 0, AC_FUNC_ATTR_READNONE);
1010 ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
1011 ac_array_in_const_addr_space(ctx->ac.v4i32), "");
1012 }
1013 }
1014
1015 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
1016 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
1017 if (has_previous_stage)
1018 user_sgpr_idx = 0;
1019
1020 set_global_input_locs(ctx, stage, has_previous_stage, previous_stage,
1021 &user_sgpr_info, desc_sets, &user_sgpr_idx);
1022
1023 switch (stage) {
1024 case MESA_SHADER_COMPUTE:
1025 if (ctx->shader_info->info.cs.uses_grid_size) {
1026 set_loc_shader(ctx, AC_UD_CS_GRID_SIZE,
1027 &user_sgpr_idx, 3);
1028 }
1029 break;
1030 case MESA_SHADER_VERTEX:
1031 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1032 previous_stage, &user_sgpr_idx);
1033 if (ctx->abi.view_index)
1034 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1035 if (ctx->options->key.vs.as_ls) {
1036 set_loc_shader(ctx, AC_UD_VS_LS_TCS_IN_LAYOUT,
1037 &user_sgpr_idx, 1);
1038 }
1039 break;
1040 case MESA_SHADER_TESS_CTRL:
1041 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1042 previous_stage, &user_sgpr_idx);
1043 if (has_previous_stage)
1044 set_loc_shader(ctx, AC_UD_VS_LS_TCS_IN_LAYOUT,
1045 &user_sgpr_idx, 1);
1046 set_loc_shader(ctx, AC_UD_TCS_OFFCHIP_LAYOUT, &user_sgpr_idx, 4);
1047 if (ctx->abi.view_index)
1048 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1049 break;
1050 case MESA_SHADER_TESS_EVAL:
1051 set_loc_shader(ctx, AC_UD_TES_OFFCHIP_LAYOUT, &user_sgpr_idx, 1);
1052 if (ctx->abi.view_index)
1053 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1054 break;
1055 case MESA_SHADER_GEOMETRY:
1056 if (has_previous_stage) {
1057 if (previous_stage == MESA_SHADER_VERTEX)
1058 set_vs_specific_input_locs(ctx, stage,
1059 has_previous_stage,
1060 previous_stage,
1061 &user_sgpr_idx);
1062 else
1063 set_loc_shader(ctx, AC_UD_TES_OFFCHIP_LAYOUT,
1064 &user_sgpr_idx, 1);
1065 }
1066 set_loc_shader(ctx, AC_UD_GS_VS_RING_STRIDE_ENTRIES,
1067 &user_sgpr_idx, 2);
1068 if (ctx->abi.view_index)
1069 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1070 break;
1071 case MESA_SHADER_FRAGMENT:
1072 if (ctx->shader_info->info.ps.needs_sample_positions) {
1073 set_loc_shader(ctx, AC_UD_PS_SAMPLE_POS_OFFSET,
1074 &user_sgpr_idx, 1);
1075 }
1076 break;
1077 default:
1078 unreachable("Shader stage not implemented");
1079 }
1080
1081 if (stage == MESA_SHADER_TESS_CTRL ||
1082 (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_ls) ||
1083 /* GFX9 has the ESGS ring buffer in LDS. */
1084 (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
1085 ac_declare_lds_as_pointer(&ctx->ac);
1086 }
1087
1088 ctx->shader_info->num_user_sgprs = user_sgpr_idx;
1089 }
1090
1091
1092 static LLVMValueRef
1093 radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
1094 unsigned desc_set, unsigned binding)
1095 {
1096 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1097 LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
1098 struct radv_pipeline_layout *pipeline_layout = ctx->options->layout;
1099 struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
1100 unsigned base_offset = layout->binding[binding].offset;
1101 LLVMValueRef offset, stride;
1102
1103 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1104 layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
1105 unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
1106 layout->binding[binding].dynamic_offset_offset;
1107 desc_ptr = ctx->abi.push_constants;
1108 base_offset = pipeline_layout->push_constant_size + 16 * idx;
1109 stride = LLVMConstInt(ctx->ac.i32, 16, false);
1110 } else
1111 stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
1112
1113 offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
1114 index = LLVMBuildMul(ctx->ac.builder, index, stride, "");
1115 offset = LLVMBuildAdd(ctx->ac.builder, offset, index, "");
1116
1117 desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
1118 desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
1119 LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1120
1121 return desc_ptr;
1122 }
1123
1124
1125 /* The offchip buffer layout for TCS->TES is
1126 *
1127 * - attribute 0 of patch 0 vertex 0
1128 * - attribute 0 of patch 0 vertex 1
1129 * - attribute 0 of patch 0 vertex 2
1130 * ...
1131 * - attribute 0 of patch 1 vertex 0
1132 * - attribute 0 of patch 1 vertex 1
1133 * ...
1134 * - attribute 1 of patch 0 vertex 0
1135 * - attribute 1 of patch 0 vertex 1
1136 * ...
1137 * - per patch attribute 0 of patch 0
1138 * - per patch attribute 0 of patch 1
1139 * ...
1140 *
1141 * Note that every attribute has 4 components.
1142 */
1143 static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
1144 LLVMValueRef vertex_index,
1145 LLVMValueRef param_index)
1146 {
1147 LLVMValueRef base_addr, vertices_per_patch, num_patches;
1148 LLVMValueRef param_stride, constant16;
1149 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
1150
1151 vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch, false);
1152 num_patches = ac_unpack_param(&ctx->ac, ctx->tcs_offchip_layout, 0, 9);
1153
1154 constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
1155 if (vertex_index) {
1156 base_addr = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
1157 vertices_per_patch, "");
1158
1159 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1160 vertex_index, "");
1161
1162 param_stride = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
1163 num_patches, "");
1164 } else {
1165 base_addr = rel_patch_id;
1166 param_stride = num_patches;
1167 }
1168
1169 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1170 LLVMBuildMul(ctx->ac.builder, param_index,
1171 param_stride, ""), "");
1172
1173 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
1174
1175 if (!vertex_index) {
1176 LLVMValueRef patch_data_offset =
1177 ac_unpack_param(&ctx->ac, ctx->tcs_offchip_layout, 16, 16);
1178
1179 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1180 patch_data_offset, "");
1181 }
1182 return base_addr;
1183 }
1184
1185 static LLVMValueRef get_tcs_tes_buffer_address_params(struct radv_shader_context *ctx,
1186 unsigned param,
1187 unsigned const_index,
1188 bool is_compact,
1189 LLVMValueRef vertex_index,
1190 LLVMValueRef indir_index)
1191 {
1192 LLVMValueRef param_index;
1193
1194 if (indir_index)
1195 param_index = LLVMBuildAdd(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, param, false),
1196 indir_index, "");
1197 else {
1198 if (const_index && !is_compact)
1199 param += const_index;
1200 param_index = LLVMConstInt(ctx->ac.i32, param, false);
1201 }
1202 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
1203 }
1204
1205 static void
1206 mark_tess_output(struct radv_shader_context *ctx,
1207 bool is_patch, uint32_t param)
1208
1209 {
1210 if (is_patch) {
1211 ctx->tess_patch_outputs_written |= (1ull << param);
1212 } else
1213 ctx->tess_outputs_written |= (1ull << param);
1214 }
1215
1216 static LLVMValueRef
1217 get_dw_address(struct radv_shader_context *ctx,
1218 LLVMValueRef dw_addr,
1219 unsigned param,
1220 unsigned const_index,
1221 bool compact_const_index,
1222 LLVMValueRef vertex_index,
1223 LLVMValueRef stride,
1224 LLVMValueRef indir_index)
1225
1226 {
1227
1228 if (vertex_index) {
1229 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1230 LLVMBuildMul(ctx->ac.builder,
1231 vertex_index,
1232 stride, ""), "");
1233 }
1234
1235 if (indir_index)
1236 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1237 LLVMBuildMul(ctx->ac.builder, indir_index,
1238 LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
1239 else if (const_index && !compact_const_index)
1240 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1241 LLVMConstInt(ctx->ac.i32, const_index, false), "");
1242
1243 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1244 LLVMConstInt(ctx->ac.i32, param * 4, false), "");
1245
1246 if (const_index && compact_const_index)
1247 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1248 LLVMConstInt(ctx->ac.i32, const_index, false), "");
1249 return dw_addr;
1250 }
1251
1252 static LLVMValueRef
1253 load_tcs_varyings(struct ac_shader_abi *abi,
1254 LLVMTypeRef type,
1255 LLVMValueRef vertex_index,
1256 LLVMValueRef indir_index,
1257 unsigned const_index,
1258 unsigned location,
1259 unsigned driver_location,
1260 unsigned component,
1261 unsigned num_components,
1262 bool is_patch,
1263 bool is_compact,
1264 bool load_input)
1265 {
1266 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1267 LLVMValueRef dw_addr, stride;
1268 LLVMValueRef value[4], result;
1269 unsigned param = shader_io_get_unique_index(location);
1270
1271 if (load_input) {
1272 stride = ac_unpack_param(&ctx->ac, ctx->tcs_in_layout, 13, 8);
1273 dw_addr = get_tcs_in_current_patch_offset(ctx);
1274 } else {
1275 if (!is_patch) {
1276 stride = get_tcs_out_vertex_stride(ctx);
1277 dw_addr = get_tcs_out_current_patch_offset(ctx);
1278 } else {
1279 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1280 stride = NULL;
1281 }
1282 }
1283
1284 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1285 indir_index);
1286
1287 for (unsigned i = 0; i < num_components + component; i++) {
1288 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1289 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1290 ctx->ac.i32_1, "");
1291 }
1292 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1293 return result;
1294 }
1295
1296 static void
1297 store_tcs_output(struct ac_shader_abi *abi,
1298 LLVMValueRef vertex_index,
1299 LLVMValueRef param_index,
1300 unsigned const_index,
1301 unsigned location,
1302 unsigned driver_location,
1303 LLVMValueRef src,
1304 unsigned component,
1305 bool is_patch,
1306 bool is_compact,
1307 unsigned writemask)
1308 {
1309 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1310 LLVMValueRef dw_addr;
1311 LLVMValueRef stride = NULL;
1312 LLVMValueRef buf_addr = NULL;
1313 unsigned param;
1314 bool store_lds = true;
1315
1316 if (is_patch) {
1317 if (!(ctx->tcs_patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
1318 store_lds = false;
1319 } else {
1320 if (!(ctx->tcs_outputs_read & (1ULL << location)))
1321 store_lds = false;
1322 }
1323
1324 param = shader_io_get_unique_index(location);
1325 if (location == VARYING_SLOT_CLIP_DIST0 &&
1326 is_compact && const_index > 3) {
1327 const_index -= 3;
1328 param++;
1329 }
1330
1331 if (!is_patch) {
1332 stride = get_tcs_out_vertex_stride(ctx);
1333 dw_addr = get_tcs_out_current_patch_offset(ctx);
1334 } else {
1335 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1336 }
1337
1338 mark_tess_output(ctx, is_patch, param);
1339
1340 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1341 param_index);
1342 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
1343 vertex_index, param_index);
1344
1345 bool is_tess_factor = false;
1346 if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
1347 location == VARYING_SLOT_TESS_LEVEL_OUTER)
1348 is_tess_factor = true;
1349
1350 unsigned base = is_compact ? const_index : 0;
1351 for (unsigned chan = 0; chan < 8; chan++) {
1352 if (!(writemask & (1 << chan)))
1353 continue;
1354 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1355
1356 if (store_lds || is_tess_factor) {
1357 LLVMValueRef dw_addr_chan =
1358 LLVMBuildAdd(ctx->ac.builder, dw_addr,
1359 LLVMConstInt(ctx->ac.i32, chan, false), "");
1360 ac_lds_store(&ctx->ac, dw_addr_chan, value);
1361 }
1362
1363 if (!is_tess_factor && writemask != 0xF)
1364 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
1365 buf_addr, ctx->oc_lds,
1366 4 * (base + chan), 1, 0, true, false);
1367 }
1368
1369 if (writemask == 0xF) {
1370 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
1371 buf_addr, ctx->oc_lds,
1372 (base * 4), 1, 0, true, false);
1373 }
1374 }
1375
1376 static LLVMValueRef
1377 load_tes_input(struct ac_shader_abi *abi,
1378 LLVMTypeRef type,
1379 LLVMValueRef vertex_index,
1380 LLVMValueRef param_index,
1381 unsigned const_index,
1382 unsigned location,
1383 unsigned driver_location,
1384 unsigned component,
1385 unsigned num_components,
1386 bool is_patch,
1387 bool is_compact,
1388 bool load_input)
1389 {
1390 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1391 LLVMValueRef buf_addr;
1392 LLVMValueRef result;
1393 unsigned param = shader_io_get_unique_index(location);
1394
1395 if (location == VARYING_SLOT_CLIP_DIST0 && is_compact && const_index > 3) {
1396 const_index -= 3;
1397 param++;
1398 }
1399
1400 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
1401 is_compact, vertex_index, param_index);
1402
1403 LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, component * 4, false);
1404 buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");
1405
1406 result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
1407 buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
1408 result = ac_trim_vector(&ctx->ac, result, num_components);
1409 return result;
1410 }
1411
1412 static LLVMValueRef
1413 load_gs_input(struct ac_shader_abi *abi,
1414 unsigned location,
1415 unsigned driver_location,
1416 unsigned component,
1417 unsigned num_components,
1418 unsigned vertex_index,
1419 unsigned const_index,
1420 LLVMTypeRef type)
1421 {
1422 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1423 LLVMValueRef vtx_offset;
1424 unsigned param, vtx_offset_param;
1425 LLVMValueRef value[4], result;
1426
1427 vtx_offset_param = vertex_index;
1428 assert(vtx_offset_param < 6);
1429 vtx_offset = LLVMBuildMul(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
1430 LLVMConstInt(ctx->ac.i32, 4, false), "");
1431
1432 param = shader_io_get_unique_index(location);
1433
1434 for (unsigned i = component; i < num_components + component; i++) {
1435 if (ctx->ac.chip_class >= GFX9) {
1436 LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
1437 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1438 LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
1439 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1440 } else {
1441 LLVMValueRef soffset =
1442 LLVMConstInt(ctx->ac.i32,
1443 (param * 4 + i + const_index) * 256,
1444 false);
1445
1446 value[i] = ac_build_buffer_load(&ctx->ac,
1447 ctx->esgs_ring, 1,
1448 ctx->ac.i32_0,
1449 vtx_offset, soffset,
1450 0, 1, 0, true, false);
1451
1452 value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i],
1453 type, "");
1454 }
1455 }
1456 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1457 result = ac_to_integer(&ctx->ac, result);
1458 return result;
1459 }
1460
1461
1462 static void radv_emit_kill(struct ac_shader_abi *abi, LLVMValueRef visible)
1463 {
1464 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1465 ac_build_kill_if_false(&ctx->ac, visible);
1466 }
1467
1468 static LLVMValueRef lookup_interp_param(struct ac_shader_abi *abi,
1469 enum glsl_interp_mode interp, unsigned location)
1470 {
1471 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1472
1473 switch (interp) {
1474 case INTERP_MODE_FLAT:
1475 default:
1476 return NULL;
1477 case INTERP_MODE_SMOOTH:
1478 case INTERP_MODE_NONE:
1479 if (location == INTERP_CENTER)
1480 return ctx->persp_center;
1481 else if (location == INTERP_CENTROID)
1482 return ctx->persp_centroid;
1483 else if (location == INTERP_SAMPLE)
1484 return ctx->persp_sample;
1485 break;
1486 case INTERP_MODE_NOPERSPECTIVE:
1487 if (location == INTERP_CENTER)
1488 return ctx->linear_center;
1489 else if (location == INTERP_CENTROID)
1490 return ctx->linear_centroid;
1491 else if (location == INTERP_SAMPLE)
1492 return ctx->linear_sample;
1493 break;
1494 }
1495 return NULL;
1496 }
1497
1498 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
1499 LLVMValueRef sample_id)
1500 {
1501 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1502
1503 LLVMValueRef result;
1504 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
1505
1506 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
1507 ac_array_in_const_addr_space(ctx->ac.v2f32), "");
1508
1509 sample_id = LLVMBuildAdd(ctx->ac.builder, sample_id, ctx->sample_pos_offset, "");
1510 result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);
1511
1512 return result;
1513 }
1514
1515
1516 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
1517 {
1518 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1519 uint8_t log2_ps_iter_samples = ctx->shader_info->info.ps.force_persample ?
1520 ctx->options->key.fs.log2_num_samples :
1521 ctx->options->key.fs.log2_ps_iter_samples;
1522
1523 /* The bit pattern matches that used by fixed function fragment
1524 * processing. */
1525 static const uint16_t ps_iter_masks[] = {
1526 0xffff, /* not used */
1527 0x5555,
1528 0x1111,
1529 0x0101,
1530 0x0001,
1531 };
1532 assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
1533
1534 uint32_t ps_iter_mask = ps_iter_masks[log2_ps_iter_samples];
1535
1536 LLVMValueRef result, sample_id;
1537 sample_id = ac_unpack_param(&ctx->ac, abi->ancillary, 8, 4);
1538 sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, ps_iter_mask, false), sample_id, "");
1539 result = LLVMBuildAnd(ctx->ac.builder, sample_id, abi->sample_coverage, "");
1540 return result;
1541 }
1542
1543
1544 static void
1545 visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
1546 {
1547 LLVMValueRef gs_next_vertex;
1548 LLVMValueRef can_emit;
1549 int idx;
1550 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1551
1552 assert(stream == 0);
1553
1554 /* Write vertex attribute values to GSVS ring */
1555 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
1556 ctx->gs_next_vertex,
1557 "");
1558
1559 /* If this thread has already emitted the declared maximum number of
1560 * vertices, kill it: excessive vertex emissions are not supposed to
1561 * have any effect, and GS threads have no externally observable
1562 * effects other than emitting vertices.
1563 */
1564 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
1565 LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
1566 ac_build_kill_if_false(&ctx->ac, can_emit);
1567
1568 /* loop num outputs */
1569 idx = 0;
1570 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
1571 LLVMValueRef *out_ptr = &addrs[i * 4];
1572 int length = 4;
1573 int slot = idx;
1574 int slot_inc = 1;
1575
1576 if (!(ctx->output_mask & (1ull << i)))
1577 continue;
1578
1579 if (i == VARYING_SLOT_CLIP_DIST0) {
1580 /* pack clip and cull into a single set of slots */
1581 length = ctx->num_output_clips + ctx->num_output_culls;
1582 if (length > 4)
1583 slot_inc = 2;
1584 }
1585 for (unsigned j = 0; j < length; j++) {
1586 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
1587 out_ptr[j], "");
1588 LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
1589 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
1590 voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
1591
1592 out_val = LLVMBuildBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
1593
1594 ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
1595 out_val, 1,
1596 voffset, ctx->gs2vs_offset, 0,
1597 1, 1, true, true);
1598 }
1599 idx += slot_inc;
1600 }
1601
1602 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
1603 ctx->ac.i32_1, "");
1604 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex);
1605
1606 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
1607 }
1608
1609 static void
1610 visit_end_primitive(struct ac_shader_abi *abi, unsigned stream)
1611 {
1612 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1613 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8), ctx->gs_wave_id);
1614 }
1615
1616 static LLVMValueRef
1617 load_tess_coord(struct ac_shader_abi *abi)
1618 {
1619 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1620
1621 LLVMValueRef coord[4] = {
1622 ctx->tes_u,
1623 ctx->tes_v,
1624 ctx->ac.f32_0,
1625 ctx->ac.f32_0,
1626 };
1627
1628 if (ctx->tes_primitive_mode == GL_TRIANGLES)
1629 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1630 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
1631
1632 return ac_build_gather_values(&ctx->ac, coord, 3);
1633 }
1634
1635 static LLVMValueRef
1636 load_patch_vertices_in(struct ac_shader_abi *abi)
1637 {
1638 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1639 return LLVMConstInt(ctx->ac.i32, ctx->options->key.tcs.input_vertices, false);
1640 }
1641
1642
1643 static LLVMValueRef radv_load_base_vertex(struct ac_shader_abi *abi)
1644 {
1645 return abi->base_vertex;
1646 }
1647
1648 static LLVMValueRef radv_load_ssbo(struct ac_shader_abi *abi,
1649 LLVMValueRef buffer_ptr, bool write)
1650 {
1651 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1652 LLVMValueRef result;
1653
1654 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1655
1656 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1657 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1658
1659 return result;
1660 }
1661
1662 static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi, LLVMValueRef buffer_ptr)
1663 {
1664 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1665 LLVMValueRef result;
1666
1667 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1668
1669 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1670 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1671
1672 return result;
1673 }
1674
1675 static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
1676 unsigned descriptor_set,
1677 unsigned base_index,
1678 unsigned constant_index,
1679 LLVMValueRef index,
1680 enum ac_descriptor_type desc_type,
1681 bool image, bool write)
1682 {
1683 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1684 LLVMValueRef list = ctx->descriptor_sets[descriptor_set];
1685 struct radv_descriptor_set_layout *layout = ctx->options->layout->set[descriptor_set].layout;
1686 struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
1687 unsigned offset = binding->offset;
1688 unsigned stride = binding->size;
1689 unsigned type_size;
1690 LLVMBuilderRef builder = ctx->ac.builder;
1691 LLVMTypeRef type;
1692
1693 assert(base_index < layout->binding_count);
1694
1695 switch (desc_type) {
1696 case AC_DESC_IMAGE:
1697 type = ctx->ac.v8i32;
1698 type_size = 32;
1699 break;
1700 case AC_DESC_FMASK:
1701 type = ctx->ac.v8i32;
1702 offset += 32;
1703 type_size = 32;
1704 break;
1705 case AC_DESC_SAMPLER:
1706 type = ctx->ac.v4i32;
1707 if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1708 offset += 64;
1709
1710 type_size = 16;
1711 break;
1712 case AC_DESC_BUFFER:
1713 type = ctx->ac.v4i32;
1714 type_size = 16;
1715 break;
1716 default:
1717 unreachable("invalid desc_type\n");
1718 }
1719
1720 offset += constant_index * stride;
1721
1722 if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
1723 (!index || binding->immutable_samplers_equal)) {
1724 if (binding->immutable_samplers_equal)
1725 constant_index = 0;
1726
1727 const uint32_t *samplers = radv_immutable_samplers(layout, binding);
1728
1729 LLVMValueRef constants[] = {
1730 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
1731 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
1732 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
1733 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
1734 };
1735 return ac_build_gather_values(&ctx->ac, constants, 4);
1736 }
1737
1738 assert(stride % type_size == 0);
1739
1740 if (!index)
1741 index = ctx->ac.i32_0;
1742
1743 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
1744
1745 list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
1746 list = LLVMBuildPointerCast(builder, list, ac_array_in_const_addr_space(type), "");
1747
1748 return ac_build_load_to_sgpr(&ctx->ac, list, index);
1749 }
1750
1751
1752 static void
1753 handle_vs_input_decl(struct radv_shader_context *ctx,
1754 struct nir_variable *variable)
1755 {
1756 LLVMValueRef t_list_ptr = ctx->vertex_buffers;
1757 LLVMValueRef t_offset;
1758 LLVMValueRef t_list;
1759 LLVMValueRef input;
1760 LLVMValueRef buffer_index;
1761 int index = variable->data.location - VERT_ATTRIB_GENERIC0;
1762 int idx = variable->data.location;
1763 unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
1764 uint8_t input_usage_mask =
1765 ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
1766 unsigned num_channels = util_last_bit(input_usage_mask);
1767
1768 variable->data.driver_location = idx * 4;
1769
1770 for (unsigned i = 0; i < attrib_count; ++i, ++idx) {
1771 if (ctx->options->key.vs.instance_rate_inputs & (1u << (index + i))) {
1772 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.instance_id,
1773 ctx->abi.start_instance, "");
1774 if (ctx->options->key.vs.as_ls) {
1775 ctx->shader_info->vs.vgpr_comp_cnt =
1776 MAX2(2, ctx->shader_info->vs.vgpr_comp_cnt);
1777 } else {
1778 ctx->shader_info->vs.vgpr_comp_cnt =
1779 MAX2(1, ctx->shader_info->vs.vgpr_comp_cnt);
1780 }
1781 } else
1782 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.vertex_id,
1783 ctx->abi.base_vertex, "");
1784 t_offset = LLVMConstInt(ctx->ac.i32, index + i, false);
1785
1786 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
1787
1788 input = ac_build_buffer_load_format(&ctx->ac, t_list,
1789 buffer_index,
1790 ctx->ac.i32_0,
1791 num_channels, false, true);
1792
1793 input = ac_build_expand_to_vec4(&ctx->ac, input, num_channels);
1794
1795 for (unsigned chan = 0; chan < 4; chan++) {
1796 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
1797 ctx->inputs[radeon_llvm_reg_index_soa(idx, chan)] =
1798 ac_to_integer(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder,
1799 input, llvm_chan, ""));
1800 }
1801 }
1802 }
1803
1804 static void interp_fs_input(struct radv_shader_context *ctx,
1805 unsigned attr,
1806 LLVMValueRef interp_param,
1807 LLVMValueRef prim_mask,
1808 LLVMValueRef result[4])
1809 {
1810 LLVMValueRef attr_number;
1811 unsigned chan;
1812 LLVMValueRef i, j;
1813 bool interp = interp_param != NULL;
1814
1815 attr_number = LLVMConstInt(ctx->ac.i32, attr, false);
1816
1817 /* fs.constant returns the param from the middle vertex, so it's not
1818 * really useful for flat shading. It's meant to be used for custom
1819 * interpolation (but the intrinsic can't fetch from the other two
1820 * vertices).
1821 *
1822 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1823 * to do the right thing. The only reason we use fs.constant is that
1824 * fs.interp cannot be used on integers, because they can be equal
1825 * to NaN.
1826 */
1827 if (interp) {
1828 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1829 ctx->ac.v2f32, "");
1830
1831 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1832 ctx->ac.i32_0, "");
1833 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1834 ctx->ac.i32_1, "");
1835 }
1836
1837 for (chan = 0; chan < 4; chan++) {
1838 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
1839
1840 if (interp) {
1841 result[chan] = ac_build_fs_interp(&ctx->ac,
1842 llvm_chan,
1843 attr_number,
1844 prim_mask, i, j);
1845 } else {
1846 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
1847 LLVMConstInt(ctx->ac.i32, 2, false),
1848 llvm_chan,
1849 attr_number,
1850 prim_mask);
1851 }
1852 }
1853 }
1854
1855 static void
1856 handle_fs_input_decl(struct radv_shader_context *ctx,
1857 struct nir_variable *variable)
1858 {
1859 int idx = variable->data.location;
1860 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
1861 LLVMValueRef interp;
1862
1863 variable->data.driver_location = idx * 4;
1864 ctx->input_mask |= ((1ull << attrib_count) - 1) << variable->data.location;
1865
1866 if (glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT) {
1867 unsigned interp_type;
1868 if (variable->data.sample)
1869 interp_type = INTERP_SAMPLE;
1870 else if (variable->data.centroid)
1871 interp_type = INTERP_CENTROID;
1872 else
1873 interp_type = INTERP_CENTER;
1874
1875 interp = lookup_interp_param(&ctx->abi, variable->data.interpolation, interp_type);
1876 } else
1877 interp = NULL;
1878
1879 for (unsigned i = 0; i < attrib_count; ++i)
1880 ctx->inputs[radeon_llvm_reg_index_soa(idx + i, 0)] = interp;
1881
1882 }
1883
1884 static void
1885 handle_vs_inputs(struct radv_shader_context *ctx,
1886 struct nir_shader *nir) {
1887 nir_foreach_variable(variable, &nir->inputs)
1888 handle_vs_input_decl(ctx, variable);
1889 }
1890
1891 static void
1892 prepare_interp_optimize(struct radv_shader_context *ctx,
1893 struct nir_shader *nir)
1894 {
1895 if (!ctx->options->key.fs.multisample)
1896 return;
1897
1898 bool uses_center = false;
1899 bool uses_centroid = false;
1900 nir_foreach_variable(variable, &nir->inputs) {
1901 if (glsl_get_base_type(glsl_without_array(variable->type)) != GLSL_TYPE_FLOAT ||
1902 variable->data.sample)
1903 continue;
1904
1905 if (variable->data.centroid)
1906 uses_centroid = true;
1907 else
1908 uses_center = true;
1909 }
1910
1911 if (uses_center && uses_centroid) {
1912 LLVMValueRef sel = LLVMBuildICmp(ctx->ac.builder, LLVMIntSLT, ctx->abi.prim_mask, ctx->ac.i32_0, "");
1913 ctx->persp_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->persp_center, ctx->persp_centroid, "");
1914 ctx->linear_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->linear_center, ctx->linear_centroid, "");
1915 }
1916 }
1917
1918 static void
1919 handle_fs_inputs(struct radv_shader_context *ctx,
1920 struct nir_shader *nir)
1921 {
1922 prepare_interp_optimize(ctx, nir);
1923
1924 nir_foreach_variable(variable, &nir->inputs)
1925 handle_fs_input_decl(ctx, variable);
1926
1927 unsigned index = 0;
1928
1929 if (ctx->shader_info->info.ps.uses_input_attachments ||
1930 ctx->shader_info->info.needs_multiview_view_index)
1931 ctx->input_mask |= 1ull << VARYING_SLOT_LAYER;
1932
1933 for (unsigned i = 0; i < RADEON_LLVM_MAX_INPUTS; ++i) {
1934 LLVMValueRef interp_param;
1935 LLVMValueRef *inputs = ctx->inputs +radeon_llvm_reg_index_soa(i, 0);
1936
1937 if (!(ctx->input_mask & (1ull << i)))
1938 continue;
1939
1940 if (i >= VARYING_SLOT_VAR0 || i == VARYING_SLOT_PNTC ||
1941 i == VARYING_SLOT_PRIMITIVE_ID || i == VARYING_SLOT_LAYER) {
1942 interp_param = *inputs;
1943 interp_fs_input(ctx, index, interp_param, ctx->abi.prim_mask,
1944 inputs);
1945
1946 if (!interp_param)
1947 ctx->shader_info->fs.flat_shaded_mask |= 1u << index;
1948 ++index;
1949 } else if (i == VARYING_SLOT_POS) {
1950 for(int i = 0; i < 3; ++i)
1951 inputs[i] = ctx->abi.frag_pos[i];
1952
1953 inputs[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
1954 ctx->abi.frag_pos[3]);
1955 }
1956 }
1957 ctx->shader_info->fs.num_interp = index;
1958 ctx->shader_info->fs.input_mask = ctx->input_mask >> VARYING_SLOT_VAR0;
1959
1960 if (ctx->shader_info->info.needs_multiview_view_index)
1961 ctx->abi.view_index = ctx->inputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
1962 }
1963
1964 static void
1965 scan_shader_output_decl(struct radv_shader_context *ctx,
1966 struct nir_variable *variable,
1967 struct nir_shader *shader,
1968 gl_shader_stage stage)
1969 {
1970 int idx = variable->data.location + variable->data.index;
1971 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
1972 uint64_t mask_attribs;
1973
1974 variable->data.driver_location = idx * 4;
1975
1976 /* tess ctrl has it's own load/store paths for outputs */
1977 if (stage == MESA_SHADER_TESS_CTRL)
1978 return;
1979
1980 mask_attribs = ((1ull << attrib_count) - 1) << idx;
1981 if (stage == MESA_SHADER_VERTEX ||
1982 stage == MESA_SHADER_TESS_EVAL ||
1983 stage == MESA_SHADER_GEOMETRY) {
1984 if (idx == VARYING_SLOT_CLIP_DIST0) {
1985 int length = shader->info.clip_distance_array_size +
1986 shader->info.cull_distance_array_size;
1987 if (stage == MESA_SHADER_VERTEX) {
1988 ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
1989 ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
1990 }
1991 if (stage == MESA_SHADER_TESS_EVAL) {
1992 ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
1993 ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
1994 }
1995
1996 if (length > 4)
1997 attrib_count = 2;
1998 else
1999 attrib_count = 1;
2000 mask_attribs = 1ull << idx;
2001 }
2002 }
2003
2004 ctx->output_mask |= mask_attribs;
2005 }
2006
2007
2008 /* Initialize arguments for the shader export intrinsic */
2009 static void
2010 si_llvm_init_export_args(struct radv_shader_context *ctx,
2011 LLVMValueRef *values,
2012 unsigned enabled_channels,
2013 unsigned target,
2014 struct ac_export_args *args)
2015 {
2016 /* Specify the channels that are enabled. */
2017 args->enabled_channels = enabled_channels;
2018
2019 /* Specify whether the EXEC mask represents the valid mask */
2020 args->valid_mask = 0;
2021
2022 /* Specify whether this is the last export */
2023 args->done = 0;
2024
2025 /* Specify the target we are exporting */
2026 args->target = target;
2027
2028 args->compr = false;
2029 args->out[0] = LLVMGetUndef(ctx->ac.f32);
2030 args->out[1] = LLVMGetUndef(ctx->ac.f32);
2031 args->out[2] = LLVMGetUndef(ctx->ac.f32);
2032 args->out[3] = LLVMGetUndef(ctx->ac.f32);
2033
2034 if (ctx->stage == MESA_SHADER_FRAGMENT && target >= V_008DFC_SQ_EXP_MRT) {
2035 unsigned index = target - V_008DFC_SQ_EXP_MRT;
2036 unsigned col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
2037 bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
2038 bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1;
2039 unsigned chan;
2040
2041 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
2042 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
2043 unsigned bits, bool hi) = NULL;
2044
2045 switch(col_format) {
2046 case V_028714_SPI_SHADER_ZERO:
2047 args->enabled_channels = 0; /* writemask */
2048 args->target = V_008DFC_SQ_EXP_NULL;
2049 break;
2050
2051 case V_028714_SPI_SHADER_32_R:
2052 args->enabled_channels = 1;
2053 args->out[0] = values[0];
2054 break;
2055
2056 case V_028714_SPI_SHADER_32_GR:
2057 args->enabled_channels = 0x3;
2058 args->out[0] = values[0];
2059 args->out[1] = values[1];
2060 break;
2061
2062 case V_028714_SPI_SHADER_32_AR:
2063 args->enabled_channels = 0x9;
2064 args->out[0] = values[0];
2065 args->out[3] = values[3];
2066 break;
2067
2068 case V_028714_SPI_SHADER_FP16_ABGR:
2069 args->enabled_channels = 0x5;
2070 packf = ac_build_cvt_pkrtz_f16;
2071 break;
2072
2073 case V_028714_SPI_SHADER_UNORM16_ABGR:
2074 args->enabled_channels = 0x5;
2075 packf = ac_build_cvt_pknorm_u16;
2076 break;
2077
2078 case V_028714_SPI_SHADER_SNORM16_ABGR:
2079 args->enabled_channels = 0x5;
2080 packf = ac_build_cvt_pknorm_i16;
2081 break;
2082
2083 case V_028714_SPI_SHADER_UINT16_ABGR:
2084 args->enabled_channels = 0x5;
2085 packi = ac_build_cvt_pk_u16;
2086 break;
2087
2088 case V_028714_SPI_SHADER_SINT16_ABGR:
2089 args->enabled_channels = 0x5;
2090 packi = ac_build_cvt_pk_i16;
2091 break;
2092
2093 default:
2094 case V_028714_SPI_SHADER_32_ABGR:
2095 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2096 break;
2097 }
2098
2099 /* Pack f16 or norm_i16/u16. */
2100 if (packf) {
2101 for (chan = 0; chan < 2; chan++) {
2102 LLVMValueRef pack_args[2] = {
2103 values[2 * chan],
2104 values[2 * chan + 1]
2105 };
2106 LLVMValueRef packed;
2107
2108 packed = packf(&ctx->ac, pack_args);
2109 args->out[chan] = ac_to_float(&ctx->ac, packed);
2110 }
2111 args->compr = 1; /* COMPR flag */
2112 }
2113
2114 /* Pack i16/u16. */
2115 if (packi) {
2116 for (chan = 0; chan < 2; chan++) {
2117 LLVMValueRef pack_args[2] = {
2118 ac_to_integer(&ctx->ac, values[2 * chan]),
2119 ac_to_integer(&ctx->ac, values[2 * chan + 1])
2120 };
2121 LLVMValueRef packed;
2122
2123 packed = packi(&ctx->ac, pack_args,
2124 is_int8 ? 8 : is_int10 ? 10 : 16,
2125 chan == 1);
2126 args->out[chan] = ac_to_float(&ctx->ac, packed);
2127 }
2128 args->compr = 1; /* COMPR flag */
2129 }
2130 return;
2131 }
2132
2133 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2134
2135 for (unsigned i = 0; i < 4; ++i) {
2136 if (!(args->enabled_channels & (1 << i)))
2137 continue;
2138
2139 args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
2140 }
2141 }
2142
2143 static void
2144 radv_export_param(struct radv_shader_context *ctx, unsigned index,
2145 LLVMValueRef *values, unsigned enabled_channels)
2146 {
2147 struct ac_export_args args;
2148
2149 si_llvm_init_export_args(ctx, values, enabled_channels,
2150 V_008DFC_SQ_EXP_PARAM + index, &args);
2151 ac_build_export(&ctx->ac, &args);
2152 }
2153
2154 static LLVMValueRef
2155 radv_load_output(struct radv_shader_context *ctx, unsigned index, unsigned chan)
2156 {
2157 LLVMValueRef output =
2158 ctx->abi.outputs[radeon_llvm_reg_index_soa(index, chan)];
2159
2160 return LLVMBuildLoad(ctx->ac.builder, output, "");
2161 }
2162
2163 static void
2164 handle_vs_outputs_post(struct radv_shader_context *ctx,
2165 bool export_prim_id,
2166 struct ac_vs_output_info *outinfo)
2167 {
2168 uint32_t param_count = 0;
2169 unsigned target;
2170 unsigned pos_idx, num_pos_exports = 0;
2171 struct ac_export_args args, pos_args[4] = {};
2172 LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2173 int i;
2174
2175 if (ctx->options->key.has_multiview_view_index) {
2176 LLVMValueRef* tmp_out = &ctx->abi.outputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
2177 if(!*tmp_out) {
2178 for(unsigned i = 0; i < 4; ++i)
2179 ctx->abi.outputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, i)] =
2180 ac_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
2181 }
2182
2183 LLVMBuildStore(ctx->ac.builder, ac_to_float(&ctx->ac, ctx->abi.view_index), *tmp_out);
2184 ctx->output_mask |= 1ull << VARYING_SLOT_LAYER;
2185 }
2186
2187 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
2188 sizeof(outinfo->vs_output_param_offset));
2189
2190 if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
2191 LLVMValueRef slots[8];
2192 unsigned j;
2193
2194 if (outinfo->cull_dist_mask)
2195 outinfo->cull_dist_mask <<= ctx->num_output_clips;
2196
2197 i = VARYING_SLOT_CLIP_DIST0;
2198 for (j = 0; j < ctx->num_output_clips + ctx->num_output_culls; j++)
2199 slots[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2200
2201 for (i = ctx->num_output_clips + ctx->num_output_culls; i < 8; i++)
2202 slots[i] = LLVMGetUndef(ctx->ac.f32);
2203
2204 if (ctx->num_output_clips + ctx->num_output_culls > 4) {
2205 target = V_008DFC_SQ_EXP_POS + 3;
2206 si_llvm_init_export_args(ctx, &slots[4], 0xf, target, &args);
2207 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2208 &args, sizeof(args));
2209 }
2210
2211 target = V_008DFC_SQ_EXP_POS + 2;
2212 si_llvm_init_export_args(ctx, &slots[0], 0xf, target, &args);
2213 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2214 &args, sizeof(args));
2215
2216 }
2217
2218 LLVMValueRef pos_values[4] = {ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_1};
2219 if (ctx->output_mask & (1ull << VARYING_SLOT_POS)) {
2220 for (unsigned j = 0; j < 4; j++)
2221 pos_values[j] = radv_load_output(ctx, VARYING_SLOT_POS, j);
2222 }
2223 si_llvm_init_export_args(ctx, pos_values, 0xf, V_008DFC_SQ_EXP_POS, &pos_args[0]);
2224
2225 if (ctx->output_mask & (1ull << VARYING_SLOT_PSIZ)) {
2226 outinfo->writes_pointsize = true;
2227 psize_value = radv_load_output(ctx, VARYING_SLOT_PSIZ, 0);
2228 }
2229
2230 if (ctx->output_mask & (1ull << VARYING_SLOT_LAYER)) {
2231 outinfo->writes_layer = true;
2232 layer_value = radv_load_output(ctx, VARYING_SLOT_LAYER, 0);
2233 }
2234
2235 if (ctx->output_mask & (1ull << VARYING_SLOT_VIEWPORT)) {
2236 outinfo->writes_viewport_index = true;
2237 viewport_index_value = radv_load_output(ctx, VARYING_SLOT_VIEWPORT, 0);
2238 }
2239
2240 if (outinfo->writes_pointsize ||
2241 outinfo->writes_layer ||
2242 outinfo->writes_viewport_index) {
2243 pos_args[1].enabled_channels = ((outinfo->writes_pointsize == true ? 1 : 0) |
2244 (outinfo->writes_layer == true ? 4 : 0));
2245 pos_args[1].valid_mask = 0;
2246 pos_args[1].done = 0;
2247 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2248 pos_args[1].compr = 0;
2249 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2250 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2251 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2252 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2253
2254 if (outinfo->writes_pointsize == true)
2255 pos_args[1].out[0] = psize_value;
2256 if (outinfo->writes_layer == true)
2257 pos_args[1].out[2] = layer_value;
2258 if (outinfo->writes_viewport_index == true) {
2259 if (ctx->options->chip_class >= GFX9) {
2260 /* GFX9 has the layer in out.z[10:0] and the viewport
2261 * index in out.z[19:16].
2262 */
2263 LLVMValueRef v = viewport_index_value;
2264 v = ac_to_integer(&ctx->ac, v);
2265 v = LLVMBuildShl(ctx->ac.builder, v,
2266 LLVMConstInt(ctx->ac.i32, 16, false),
2267 "");
2268 v = LLVMBuildOr(ctx->ac.builder, v,
2269 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2270
2271 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2272 pos_args[1].enabled_channels |= 1 << 2;
2273 } else {
2274 pos_args[1].out[3] = viewport_index_value;
2275 pos_args[1].enabled_channels |= 1 << 3;
2276 }
2277 }
2278 }
2279 for (i = 0; i < 4; i++) {
2280 if (pos_args[i].out[0])
2281 num_pos_exports++;
2282 }
2283
2284 pos_idx = 0;
2285 for (i = 0; i < 4; i++) {
2286 if (!pos_args[i].out[0])
2287 continue;
2288
2289 /* Specify the target we are exporting */
2290 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2291 if (pos_idx == num_pos_exports)
2292 pos_args[i].done = 1;
2293 ac_build_export(&ctx->ac, &pos_args[i]);
2294 }
2295
2296 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2297 LLVMValueRef values[4];
2298 if (!(ctx->output_mask & (1ull << i)))
2299 continue;
2300
2301 if (i != VARYING_SLOT_LAYER &&
2302 i != VARYING_SLOT_PRIMITIVE_ID &&
2303 i < VARYING_SLOT_VAR0)
2304 continue;
2305
2306 for (unsigned j = 0; j < 4; j++)
2307 values[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2308
2309 unsigned output_usage_mask;
2310
2311 if (ctx->stage == MESA_SHADER_VERTEX &&
2312 !ctx->is_gs_copy_shader) {
2313 output_usage_mask =
2314 ctx->shader_info->info.vs.output_usage_mask[i];
2315 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
2316 output_usage_mask =
2317 ctx->shader_info->info.tes.output_usage_mask[i];
2318 } else {
2319 /* Enable all channels for the GS copy shader because
2320 * we don't know the output usage mask currently.
2321 */
2322 output_usage_mask = 0xf;
2323 }
2324
2325 radv_export_param(ctx, param_count, values, output_usage_mask);
2326
2327 outinfo->vs_output_param_offset[i] = param_count++;
2328 }
2329
2330 if (export_prim_id) {
2331 LLVMValueRef values[4];
2332
2333 values[0] = ctx->vs_prim_id;
2334 ctx->shader_info->vs.vgpr_comp_cnt = MAX2(2,
2335 ctx->shader_info->vs.vgpr_comp_cnt);
2336 for (unsigned j = 1; j < 4; j++)
2337 values[j] = ctx->ac.f32_0;
2338
2339 radv_export_param(ctx, param_count, values, 0xf);
2340
2341 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count++;
2342 outinfo->export_prim_id = true;
2343 }
2344
2345 outinfo->pos_exports = num_pos_exports;
2346 outinfo->param_exports = param_count;
2347 }
2348
2349 static void
2350 handle_es_outputs_post(struct radv_shader_context *ctx,
2351 struct ac_es_output_info *outinfo)
2352 {
2353 int j;
2354 uint64_t max_output_written = 0;
2355 LLVMValueRef lds_base = NULL;
2356
2357 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2358 int param_index;
2359 int length = 4;
2360
2361 if (!(ctx->output_mask & (1ull << i)))
2362 continue;
2363
2364 if (i == VARYING_SLOT_CLIP_DIST0)
2365 length = ctx->num_output_clips + ctx->num_output_culls;
2366
2367 param_index = shader_io_get_unique_index(i);
2368
2369 max_output_written = MAX2(param_index + (length > 4), max_output_written);
2370 }
2371
2372 outinfo->esgs_itemsize = (max_output_written + 1) * 16;
2373
2374 if (ctx->ac.chip_class >= GFX9) {
2375 unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
2376 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2377 LLVMValueRef wave_idx = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2378 LLVMConstInt(ctx->ac.i32, 24, false),
2379 LLVMConstInt(ctx->ac.i32, 4, false), false);
2380 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2381 LLVMBuildMul(ctx->ac.builder, wave_idx,
2382 LLVMConstInt(ctx->ac.i32, 64, false), ""), "");
2383 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2384 LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
2385 }
2386
2387 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2388 LLVMValueRef dw_addr = NULL;
2389 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2390 int param_index;
2391 int length = 4;
2392
2393 if (!(ctx->output_mask & (1ull << i)))
2394 continue;
2395
2396 if (i == VARYING_SLOT_CLIP_DIST0)
2397 length = ctx->num_output_clips + ctx->num_output_culls;
2398
2399 param_index = shader_io_get_unique_index(i);
2400
2401 if (lds_base) {
2402 dw_addr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2403 LLVMConstInt(ctx->ac.i32, param_index * 4, false),
2404 "");
2405 }
2406 for (j = 0; j < length; j++) {
2407 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2408 out_val = LLVMBuildBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
2409
2410 if (ctx->ac.chip_class >= GFX9) {
2411 ac_lds_store(&ctx->ac, dw_addr,
2412 LLVMBuildLoad(ctx->ac.builder, out_ptr[j], ""));
2413 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2414 } else {
2415 ac_build_buffer_store_dword(&ctx->ac,
2416 ctx->esgs_ring,
2417 out_val, 1,
2418 NULL, ctx->es2gs_offset,
2419 (4 * param_index + j) * 4,
2420 1, 1, true, true);
2421 }
2422 }
2423 }
2424 }
2425
2426 static void
2427 handle_ls_outputs_post(struct radv_shader_context *ctx)
2428 {
2429 LLVMValueRef vertex_id = ctx->rel_auto_id;
2430 LLVMValueRef vertex_dw_stride = ac_unpack_param(&ctx->ac, ctx->ls_out_layout, 13, 8);
2431 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2432 vertex_dw_stride, "");
2433
2434 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2435 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2436 int length = 4;
2437
2438 if (!(ctx->output_mask & (1ull << i)))
2439 continue;
2440
2441 if (i == VARYING_SLOT_CLIP_DIST0)
2442 length = ctx->num_output_clips + ctx->num_output_culls;
2443 int param = shader_io_get_unique_index(i);
2444 mark_tess_output(ctx, false, param);
2445 if (length > 4)
2446 mark_tess_output(ctx, false, param + 1);
2447 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2448 LLVMConstInt(ctx->ac.i32, param * 4, false),
2449 "");
2450 for (unsigned j = 0; j < length; j++) {
2451 ac_lds_store(&ctx->ac, dw_addr,
2452 LLVMBuildLoad(ctx->ac.builder, out_ptr[j], ""));
2453 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2454 }
2455 }
2456 }
2457
2458 struct ac_build_if_state
2459 {
2460 struct radv_shader_context *ctx;
2461 LLVMValueRef condition;
2462 LLVMBasicBlockRef entry_block;
2463 LLVMBasicBlockRef true_block;
2464 LLVMBasicBlockRef false_block;
2465 LLVMBasicBlockRef merge_block;
2466 };
2467
2468 static LLVMBasicBlockRef
2469 ac_build_insert_new_block(struct radv_shader_context *ctx, const char *name)
2470 {
2471 LLVMBasicBlockRef current_block;
2472 LLVMBasicBlockRef next_block;
2473 LLVMBasicBlockRef new_block;
2474
2475 /* get current basic block */
2476 current_block = LLVMGetInsertBlock(ctx->ac.builder);
2477
2478 /* chqeck if there's another block after this one */
2479 next_block = LLVMGetNextBasicBlock(current_block);
2480 if (next_block) {
2481 /* insert the new block before the next block */
2482 new_block = LLVMInsertBasicBlockInContext(ctx->context, next_block, name);
2483 }
2484 else {
2485 /* append new block after current block */
2486 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
2487 new_block = LLVMAppendBasicBlockInContext(ctx->context, function, name);
2488 }
2489 return new_block;
2490 }
2491
2492 static void
2493 ac_nir_build_if(struct ac_build_if_state *ifthen,
2494 struct radv_shader_context *ctx,
2495 LLVMValueRef condition)
2496 {
2497 LLVMBasicBlockRef block = LLVMGetInsertBlock(ctx->ac.builder);
2498
2499 memset(ifthen, 0, sizeof *ifthen);
2500 ifthen->ctx = ctx;
2501 ifthen->condition = condition;
2502 ifthen->entry_block = block;
2503
2504 /* create endif/merge basic block for the phi functions */
2505 ifthen->merge_block = ac_build_insert_new_block(ctx, "endif-block");
2506
2507 /* create/insert true_block before merge_block */
2508 ifthen->true_block =
2509 LLVMInsertBasicBlockInContext(ctx->context,
2510 ifthen->merge_block,
2511 "if-true-block");
2512
2513 /* successive code goes into the true block */
2514 LLVMPositionBuilderAtEnd(ctx->ac.builder, ifthen->true_block);
2515 }
2516
2517 /**
2518 * End a conditional.
2519 */
2520 static void
2521 ac_nir_build_endif(struct ac_build_if_state *ifthen)
2522 {
2523 LLVMBuilderRef builder = ifthen->ctx->ac.builder;
2524
2525 /* Insert branch to the merge block from current block */
2526 LLVMBuildBr(builder, ifthen->merge_block);
2527
2528 /*
2529 * Now patch in the various branch instructions.
2530 */
2531
2532 /* Insert the conditional branch instruction at the end of entry_block */
2533 LLVMPositionBuilderAtEnd(builder, ifthen->entry_block);
2534 if (ifthen->false_block) {
2535 /* we have an else clause */
2536 LLVMBuildCondBr(builder, ifthen->condition,
2537 ifthen->true_block, ifthen->false_block);
2538 }
2539 else {
2540 /* no else clause */
2541 LLVMBuildCondBr(builder, ifthen->condition,
2542 ifthen->true_block, ifthen->merge_block);
2543 }
2544
2545 /* Resume building code at end of the ifthen->merge_block */
2546 LLVMPositionBuilderAtEnd(builder, ifthen->merge_block);
2547 }
2548
2549 static void
2550 write_tess_factors(struct radv_shader_context *ctx)
2551 {
2552 unsigned stride, outer_comps, inner_comps;
2553 struct ac_build_if_state if_ctx, inner_if_ctx;
2554 LLVMValueRef invocation_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 8, 5);
2555 LLVMValueRef rel_patch_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
2556 unsigned tess_inner_index = 0, tess_outer_index;
2557 LLVMValueRef lds_base, lds_inner = NULL, lds_outer, byteoffset, buffer;
2558 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2559 int i;
2560 ac_emit_barrier(&ctx->ac, ctx->stage);
2561
2562 switch (ctx->options->key.tcs.primitive_mode) {
2563 case GL_ISOLINES:
2564 stride = 2;
2565 outer_comps = 2;
2566 inner_comps = 0;
2567 break;
2568 case GL_TRIANGLES:
2569 stride = 4;
2570 outer_comps = 3;
2571 inner_comps = 1;
2572 break;
2573 case GL_QUADS:
2574 stride = 6;
2575 outer_comps = 4;
2576 inner_comps = 2;
2577 break;
2578 default:
2579 return;
2580 }
2581
2582 ac_nir_build_if(&if_ctx, ctx,
2583 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2584 invocation_id, ctx->ac.i32_0, ""));
2585
2586 lds_base = get_tcs_out_current_patch_data_offset(ctx);
2587
2588 if (inner_comps) {
2589 tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2590 mark_tess_output(ctx, true, tess_inner_index);
2591 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2592 LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), "");
2593 }
2594
2595 tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2596 mark_tess_output(ctx, true, tess_outer_index);
2597 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2598 LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), "");
2599
2600 for (i = 0; i < 4; i++) {
2601 inner[i] = LLVMGetUndef(ctx->ac.i32);
2602 outer[i] = LLVMGetUndef(ctx->ac.i32);
2603 }
2604
2605 // LINES reverseal
2606 if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
2607 outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
2608 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2609 ctx->ac.i32_1, "");
2610 outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
2611 } else {
2612 for (i = 0; i < outer_comps; i++) {
2613 outer[i] = out[i] =
2614 ac_lds_load(&ctx->ac, lds_outer);
2615 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2616 ctx->ac.i32_1, "");
2617 }
2618 for (i = 0; i < inner_comps; i++) {
2619 inner[i] = out[outer_comps+i] =
2620 ac_lds_load(&ctx->ac, lds_inner);
2621 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_inner,
2622 ctx->ac.i32_1, "");
2623 }
2624 }
2625
2626 /* Convert the outputs to vectors for stores. */
2627 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2628 vec1 = NULL;
2629
2630 if (stride > 4)
2631 vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
2632
2633
2634 buffer = ctx->hs_ring_tess_factor;
2635 tf_base = ctx->tess_factor_offset;
2636 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2637 LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
2638 unsigned tf_offset = 0;
2639
2640 if (ctx->options->chip_class <= VI) {
2641 ac_nir_build_if(&inner_if_ctx, ctx,
2642 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2643 rel_patch_id, ctx->ac.i32_0, ""));
2644
2645 /* Store the dynamic HS control word. */
2646 ac_build_buffer_store_dword(&ctx->ac, buffer,
2647 LLVMConstInt(ctx->ac.i32, 0x80000000, false),
2648 1, ctx->ac.i32_0, tf_base,
2649 0, 1, 0, true, false);
2650 tf_offset += 4;
2651
2652 ac_nir_build_endif(&inner_if_ctx);
2653 }
2654
2655 /* Store the tessellation factors. */
2656 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2657 MIN2(stride, 4), byteoffset, tf_base,
2658 tf_offset, 1, 0, true, false);
2659 if (vec1)
2660 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2661 stride - 4, byteoffset, tf_base,
2662 16 + tf_offset, 1, 0, true, false);
2663
2664 //store to offchip for TES to read - only if TES reads them
2665 if (ctx->options->key.tcs.tes_reads_tess_factors) {
2666 LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
2667 LLVMValueRef tf_inner_offset;
2668 unsigned param_outer, param_inner;
2669
2670 param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2671 tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
2672 LLVMConstInt(ctx->ac.i32, param_outer, 0));
2673
2674 outer_vec = ac_build_gather_values(&ctx->ac, outer,
2675 util_next_power_of_two(outer_comps));
2676
2677 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
2678 outer_comps, tf_outer_offset,
2679 ctx->oc_lds, 0, 1, 0, true, false);
2680 if (inner_comps) {
2681 param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2682 tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
2683 LLVMConstInt(ctx->ac.i32, param_inner, 0));
2684
2685 inner_vec = inner_comps == 1 ? inner[0] :
2686 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2687 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
2688 inner_comps, tf_inner_offset,
2689 ctx->oc_lds, 0, 1, 0, true, false);
2690 }
2691 }
2692 ac_nir_build_endif(&if_ctx);
2693 }
2694
2695 static void
2696 handle_tcs_outputs_post(struct radv_shader_context *ctx)
2697 {
2698 write_tess_factors(ctx);
2699 }
2700
2701 static bool
2702 si_export_mrt_color(struct radv_shader_context *ctx,
2703 LLVMValueRef *color, unsigned index,
2704 struct ac_export_args *args)
2705 {
2706 /* Export */
2707 si_llvm_init_export_args(ctx, color, 0xf,
2708 V_008DFC_SQ_EXP_MRT + index, args);
2709 if (!args->enabled_channels)
2710 return false; /* unnecessary NULL export */
2711
2712 return true;
2713 }
2714
2715 static void
2716 radv_export_mrt_z(struct radv_shader_context *ctx,
2717 LLVMValueRef depth, LLVMValueRef stencil,
2718 LLVMValueRef samplemask)
2719 {
2720 struct ac_export_args args;
2721
2722 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
2723
2724 ac_build_export(&ctx->ac, &args);
2725 }
2726
2727 static void
2728 handle_fs_outputs_post(struct radv_shader_context *ctx)
2729 {
2730 unsigned index = 0;
2731 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2732 struct ac_export_args color_args[8];
2733
2734 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2735 LLVMValueRef values[4];
2736
2737 if (!(ctx->output_mask & (1ull << i)))
2738 continue;
2739
2740 if (i < FRAG_RESULT_DATA0)
2741 continue;
2742
2743 for (unsigned j = 0; j < 4; j++)
2744 values[j] = ac_to_float(&ctx->ac,
2745 radv_load_output(ctx, i, j));
2746
2747 bool ret = si_export_mrt_color(ctx, values,
2748 i - FRAG_RESULT_DATA0,
2749 &color_args[index]);
2750 if (ret)
2751 index++;
2752 }
2753
2754 /* Process depth, stencil, samplemask. */
2755 if (ctx->shader_info->info.ps.writes_z) {
2756 depth = ac_to_float(&ctx->ac,
2757 radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
2758 }
2759 if (ctx->shader_info->info.ps.writes_stencil) {
2760 stencil = ac_to_float(&ctx->ac,
2761 radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
2762 }
2763 if (ctx->shader_info->info.ps.writes_sample_mask) {
2764 samplemask = ac_to_float(&ctx->ac,
2765 radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
2766 }
2767
2768 /* Set the DONE bit on last non-null color export only if Z isn't
2769 * exported.
2770 */
2771 if (index > 0 &&
2772 !ctx->shader_info->info.ps.writes_z &&
2773 !ctx->shader_info->info.ps.writes_stencil &&
2774 !ctx->shader_info->info.ps.writes_sample_mask) {
2775 unsigned last = index - 1;
2776
2777 color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
2778 color_args[last].done = 1; /* DONE bit */
2779 }
2780
2781 /* Export PS outputs. */
2782 for (unsigned i = 0; i < index; i++)
2783 ac_build_export(&ctx->ac, &color_args[i]);
2784
2785 if (depth || stencil || samplemask)
2786 radv_export_mrt_z(ctx, depth, stencil, samplemask);
2787 else if (!index)
2788 ac_build_export_null(&ctx->ac);
2789 }
2790
2791 static void
2792 emit_gs_epilogue(struct radv_shader_context *ctx)
2793 {
2794 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
2795 }
2796
2797 static void
2798 handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
2799 LLVMValueRef *addrs)
2800 {
2801 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
2802
2803 switch (ctx->stage) {
2804 case MESA_SHADER_VERTEX:
2805 if (ctx->options->key.vs.as_ls)
2806 handle_ls_outputs_post(ctx);
2807 else if (ctx->options->key.vs.as_es)
2808 handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
2809 else
2810 handle_vs_outputs_post(ctx, ctx->options->key.vs.export_prim_id,
2811 &ctx->shader_info->vs.outinfo);
2812 break;
2813 case MESA_SHADER_FRAGMENT:
2814 handle_fs_outputs_post(ctx);
2815 break;
2816 case MESA_SHADER_GEOMETRY:
2817 emit_gs_epilogue(ctx);
2818 break;
2819 case MESA_SHADER_TESS_CTRL:
2820 handle_tcs_outputs_post(ctx);
2821 break;
2822 case MESA_SHADER_TESS_EVAL:
2823 if (ctx->options->key.tes.as_es)
2824 handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
2825 else
2826 handle_vs_outputs_post(ctx, ctx->options->key.tes.export_prim_id,
2827 &ctx->shader_info->tes.outinfo);
2828 break;
2829 default:
2830 break;
2831 }
2832 }
2833
2834 static void ac_llvm_finalize_module(struct radv_shader_context *ctx)
2835 {
2836 LLVMPassManagerRef passmgr;
2837 /* Create the pass manager */
2838 passmgr = LLVMCreateFunctionPassManagerForModule(
2839 ctx->ac.module);
2840
2841 /* This pass should eliminate all the load and store instructions */
2842 LLVMAddPromoteMemoryToRegisterPass(passmgr);
2843
2844 /* Add some optimization passes */
2845 LLVMAddScalarReplAggregatesPass(passmgr);
2846 LLVMAddLICMPass(passmgr);
2847 LLVMAddAggressiveDCEPass(passmgr);
2848 LLVMAddCFGSimplificationPass(passmgr);
2849 LLVMAddInstructionCombiningPass(passmgr);
2850
2851 /* Run the pass */
2852 LLVMInitializeFunctionPassManager(passmgr);
2853 LLVMRunFunctionPassManager(passmgr, ctx->main_function);
2854 LLVMFinalizeFunctionPassManager(passmgr);
2855
2856 LLVMDisposeBuilder(ctx->ac.builder);
2857 LLVMDisposePassManager(passmgr);
2858
2859 ac_llvm_context_dispose(&ctx->ac);
2860 }
2861
2862 static void
2863 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
2864 {
2865 struct ac_vs_output_info *outinfo;
2866
2867 switch (ctx->stage) {
2868 case MESA_SHADER_FRAGMENT:
2869 case MESA_SHADER_COMPUTE:
2870 case MESA_SHADER_TESS_CTRL:
2871 case MESA_SHADER_GEOMETRY:
2872 return;
2873 case MESA_SHADER_VERTEX:
2874 if (ctx->options->key.vs.as_ls ||
2875 ctx->options->key.vs.as_es)
2876 return;
2877 outinfo = &ctx->shader_info->vs.outinfo;
2878 break;
2879 case MESA_SHADER_TESS_EVAL:
2880 if (ctx->options->key.vs.as_es)
2881 return;
2882 outinfo = &ctx->shader_info->tes.outinfo;
2883 break;
2884 default:
2885 unreachable("Unhandled shader type");
2886 }
2887
2888 ac_optimize_vs_outputs(&ctx->ac,
2889 ctx->main_function,
2890 outinfo->vs_output_param_offset,
2891 VARYING_SLOT_MAX,
2892 &outinfo->param_exports);
2893 }
2894
2895 static void
2896 ac_setup_rings(struct radv_shader_context *ctx)
2897 {
2898 if ((ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) ||
2899 (ctx->stage == MESA_SHADER_TESS_EVAL && ctx->options->key.tes.as_es)) {
2900 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_VS, false));
2901 }
2902
2903 if (ctx->is_gs_copy_shader) {
2904 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
2905 }
2906 if (ctx->stage == MESA_SHADER_GEOMETRY) {
2907 LLVMValueRef tmp;
2908 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_GS, false));
2909 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
2910
2911 ctx->gsvs_ring = LLVMBuildBitCast(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.v4i32, "");
2912
2913 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->ac.i32, 2, false), "");
2914 tmp = LLVMBuildExtractElement(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
2915 tmp = LLVMBuildOr(ctx->ac.builder, tmp, ctx->gsvs_ring_stride, "");
2916 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
2917 }
2918
2919 if (ctx->stage == MESA_SHADER_TESS_CTRL ||
2920 ctx->stage == MESA_SHADER_TESS_EVAL) {
2921 ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
2922 ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
2923 }
2924 }
2925
2926 static unsigned
2927 ac_nir_get_max_workgroup_size(enum chip_class chip_class,
2928 const struct nir_shader *nir)
2929 {
2930 switch (nir->info.stage) {
2931 case MESA_SHADER_TESS_CTRL:
2932 return chip_class >= CIK ? 128 : 64;
2933 case MESA_SHADER_GEOMETRY:
2934 return chip_class >= GFX9 ? 128 : 64;
2935 case MESA_SHADER_COMPUTE:
2936 break;
2937 default:
2938 return 0;
2939 }
2940
2941 unsigned max_workgroup_size = nir->info.cs.local_size[0] *
2942 nir->info.cs.local_size[1] *
2943 nir->info.cs.local_size[2];
2944 return max_workgroup_size;
2945 }
2946
2947 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
2948 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context *ctx)
2949 {
2950 LLVMValueRef count = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2951 LLVMConstInt(ctx->ac.i32, 8, false),
2952 LLVMConstInt(ctx->ac.i32, 8, false), false);
2953 LLVMValueRef hs_empty = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, count,
2954 ctx->ac.i32_0, "");
2955 ctx->abi.instance_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->rel_auto_id, ctx->abi.instance_id, "");
2956 ctx->vs_prim_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.vertex_id, ctx->vs_prim_id, "");
2957 ctx->rel_auto_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_rel_ids, ctx->rel_auto_id, "");
2958 ctx->abi.vertex_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_patch_id, ctx->abi.vertex_id, "");
2959 }
2960
2961 static void prepare_gs_input_vgprs(struct radv_shader_context *ctx)
2962 {
2963 for(int i = 5; i >= 0; --i) {
2964 ctx->gs_vtx_offset[i] = ac_build_bfe(&ctx->ac, ctx->gs_vtx_offset[i & ~1],
2965 LLVMConstInt(ctx->ac.i32, (i & 1) * 16, false),
2966 LLVMConstInt(ctx->ac.i32, 16, false), false);
2967 }
2968
2969 ctx->gs_wave_id = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2970 LLVMConstInt(ctx->ac.i32, 16, false),
2971 LLVMConstInt(ctx->ac.i32, 8, false), false);
2972 }
2973
2974
2975 static
2976 LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
2977 struct nir_shader *const *shaders,
2978 int shader_count,
2979 struct ac_shader_variant_info *shader_info,
2980 const struct ac_nir_compiler_options *options,
2981 bool dump_shader)
2982 {
2983 struct radv_shader_context ctx = {0};
2984 unsigned i;
2985 ctx.options = options;
2986 ctx.shader_info = shader_info;
2987 ctx.context = LLVMContextCreate();
2988
2989 ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class,
2990 options->family);
2991 ctx.ac.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
2992 LLVMSetTarget(ctx.ac.module, options->supports_spill ? "amdgcn-mesa-mesa3d" : "amdgcn--");
2993
2994 LLVMTargetDataRef data_layout = LLVMCreateTargetDataLayout(tm);
2995 char *data_layout_str = LLVMCopyStringRepOfTargetData(data_layout);
2996 LLVMSetDataLayout(ctx.ac.module, data_layout_str);
2997 LLVMDisposeTargetData(data_layout);
2998 LLVMDisposeMessage(data_layout_str);
2999
3000 enum ac_float_mode float_mode =
3001 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3002 AC_FLOAT_MODE_DEFAULT;
3003
3004 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3005
3006 memset(shader_info, 0, sizeof(*shader_info));
3007
3008 for(int i = 0; i < shader_count; ++i)
3009 ac_nir_shader_info_pass(shaders[i], options, &shader_info->info);
3010
3011 for (i = 0; i < AC_UD_MAX_SETS; i++)
3012 shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
3013 for (i = 0; i < AC_UD_MAX_UD; i++)
3014 shader_info->user_sgprs_locs.shader_data[i].sgpr_idx = -1;
3015
3016 ctx.max_workgroup_size = 0;
3017 for (int i = 0; i < shader_count; ++i) {
3018 ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
3019 ac_nir_get_max_workgroup_size(ctx.options->chip_class,
3020 shaders[i]));
3021 }
3022
3023 create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
3024 shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
3025
3026 ctx.abi.inputs = &ctx.inputs[0];
3027 ctx.abi.emit_outputs = handle_shader_outputs_post;
3028 ctx.abi.emit_vertex = visit_emit_vertex;
3029 ctx.abi.load_ubo = radv_load_ubo;
3030 ctx.abi.load_ssbo = radv_load_ssbo;
3031 ctx.abi.load_sampler_desc = radv_get_sampler_desc;
3032 ctx.abi.load_resource = radv_load_resource;
3033 ctx.abi.clamp_shadow_reference = false;
3034
3035 if (shader_count >= 2)
3036 ac_init_exec_full_mask(&ctx.ac);
3037
3038 if (ctx.ac.chip_class == GFX9 &&
3039 shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
3040 ac_nir_fixup_ls_hs_input_vgprs(&ctx);
3041
3042 for(int i = 0; i < shader_count; ++i) {
3043 ctx.stage = shaders[i]->info.stage;
3044 ctx.output_mask = 0;
3045 ctx.tess_outputs_written = 0;
3046 ctx.num_output_clips = shaders[i]->info.clip_distance_array_size;
3047 ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
3048
3049 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3050 ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
3051 ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
3052 ctx.abi.load_inputs = load_gs_input;
3053 ctx.abi.emit_primitive = visit_end_primitive;
3054 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3055 ctx.tcs_outputs_read = shaders[i]->info.outputs_read;
3056 ctx.tcs_patch_outputs_read = shaders[i]->info.patch_outputs_read;
3057 ctx.abi.load_tess_varyings = load_tcs_varyings;
3058 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3059 ctx.abi.store_tcs_outputs = store_tcs_output;
3060 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3061 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
3062 ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
3063 ctx.abi.load_tess_varyings = load_tes_input;
3064 ctx.abi.load_tess_coord = load_tess_coord;
3065 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3066 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3067 } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
3068 if (shader_info->info.vs.needs_instance_id) {
3069 if (ctx.options->key.vs.as_ls) {
3070 ctx.shader_info->vs.vgpr_comp_cnt =
3071 MAX2(2, ctx.shader_info->vs.vgpr_comp_cnt);
3072 } else {
3073 ctx.shader_info->vs.vgpr_comp_cnt =
3074 MAX2(1, ctx.shader_info->vs.vgpr_comp_cnt);
3075 }
3076 }
3077 ctx.abi.load_base_vertex = radv_load_base_vertex;
3078 } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
3079 shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
3080 ctx.abi.lookup_interp_param = lookup_interp_param;
3081 ctx.abi.load_sample_position = load_sample_position;
3082 ctx.abi.load_sample_mask_in = load_sample_mask_in;
3083 ctx.abi.emit_kill = radv_emit_kill;
3084 }
3085
3086 if (i)
3087 ac_emit_barrier(&ctx.ac, ctx.stage);
3088
3089 ac_setup_rings(&ctx);
3090
3091 LLVMBasicBlockRef merge_block;
3092 if (shader_count >= 2) {
3093 LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
3094 LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3095 merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3096
3097 LLVMValueRef count = ac_build_bfe(&ctx.ac, ctx.merged_wave_info,
3098 LLVMConstInt(ctx.ac.i32, 8 * i, false),
3099 LLVMConstInt(ctx.ac.i32, 8, false), false);
3100 LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac);
3101 LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT,
3102 thread_id, count, "");
3103 LLVMBuildCondBr(ctx.ac.builder, cond, then_block, merge_block);
3104
3105 LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
3106 }
3107
3108 if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
3109 handle_fs_inputs(&ctx, shaders[i]);
3110 else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
3111 handle_vs_inputs(&ctx, shaders[i]);
3112 else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
3113 prepare_gs_input_vgprs(&ctx);
3114
3115 nir_foreach_variable(variable, &shaders[i]->outputs)
3116 scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
3117
3118 ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i]);
3119
3120 if (shader_count >= 2) {
3121 LLVMBuildBr(ctx.ac.builder, merge_block);
3122 LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
3123 }
3124
3125 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3126 unsigned addclip = shaders[i]->info.clip_distance_array_size +
3127 shaders[i]->info.cull_distance_array_size > 4;
3128 shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
3129 shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
3130 shaders[i]->info.gs.vertices_out;
3131 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3132 shader_info->tcs.outputs_written = ctx.tess_outputs_written;
3133 shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
3134 } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
3135 shader_info->vs.outputs_written = ctx.tess_outputs_written;
3136 }
3137 }
3138
3139 LLVMBuildRetVoid(ctx.ac.builder);
3140
3141 if (options->dump_preoptir)
3142 ac_dump_module(ctx.ac.module);
3143
3144 ac_llvm_finalize_module(&ctx);
3145
3146 if (shader_count == 1)
3147 ac_nir_eliminate_const_vs_outputs(&ctx);
3148
3149 if (dump_shader) {
3150 ctx.shader_info->private_mem_vgprs =
3151 ac_count_scratch_private_memory(ctx.main_function);
3152 }
3153
3154 return ctx.ac.module;
3155 }
3156
3157 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
3158 {
3159 unsigned *retval = (unsigned *)context;
3160 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
3161 char *description = LLVMGetDiagInfoDescription(di);
3162
3163 if (severity == LLVMDSError) {
3164 *retval = 1;
3165 fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n",
3166 description);
3167 }
3168
3169 LLVMDisposeMessage(description);
3170 }
3171
3172 static unsigned ac_llvm_compile(LLVMModuleRef M,
3173 struct ac_shader_binary *binary,
3174 LLVMTargetMachineRef tm)
3175 {
3176 unsigned retval = 0;
3177 char *err;
3178 LLVMContextRef llvm_ctx;
3179 LLVMMemoryBufferRef out_buffer;
3180 unsigned buffer_size;
3181 const char *buffer_data;
3182 LLVMBool mem_err;
3183
3184 /* Setup Diagnostic Handler*/
3185 llvm_ctx = LLVMGetModuleContext(M);
3186
3187 LLVMContextSetDiagnosticHandler(llvm_ctx, ac_diagnostic_handler,
3188 &retval);
3189
3190 /* Compile IR*/
3191 mem_err = LLVMTargetMachineEmitToMemoryBuffer(tm, M, LLVMObjectFile,
3192 &err, &out_buffer);
3193
3194 /* Process Errors/Warnings */
3195 if (mem_err) {
3196 fprintf(stderr, "%s: %s", __FUNCTION__, err);
3197 free(err);
3198 retval = 1;
3199 goto out;
3200 }
3201
3202 /* Extract Shader Code*/
3203 buffer_size = LLVMGetBufferSize(out_buffer);
3204 buffer_data = LLVMGetBufferStart(out_buffer);
3205
3206 ac_elf_read(buffer_data, buffer_size, binary);
3207
3208 /* Clean up */
3209 LLVMDisposeMemoryBuffer(out_buffer);
3210
3211 out:
3212 return retval;
3213 }
3214
3215 static void ac_compile_llvm_module(LLVMTargetMachineRef tm,
3216 LLVMModuleRef llvm_module,
3217 struct ac_shader_binary *binary,
3218 struct ac_shader_config *config,
3219 struct ac_shader_variant_info *shader_info,
3220 gl_shader_stage stage,
3221 bool dump_shader, bool supports_spill)
3222 {
3223 if (dump_shader)
3224 ac_dump_module(llvm_module);
3225
3226 memset(binary, 0, sizeof(*binary));
3227 int v = ac_llvm_compile(llvm_module, binary, tm);
3228 if (v) {
3229 fprintf(stderr, "compile failed\n");
3230 }
3231
3232 if (dump_shader)
3233 fprintf(stderr, "disasm:\n%s\n", binary->disasm_string);
3234
3235 ac_shader_binary_read_config(binary, config, 0, supports_spill);
3236
3237 LLVMContextRef ctx = LLVMGetModuleContext(llvm_module);
3238 LLVMDisposeModule(llvm_module);
3239 LLVMContextDispose(ctx);
3240
3241 if (stage == MESA_SHADER_FRAGMENT) {
3242 shader_info->num_input_vgprs = 0;
3243 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
3244 shader_info->num_input_vgprs += 2;
3245 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
3246 shader_info->num_input_vgprs += 2;
3247 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
3248 shader_info->num_input_vgprs += 2;
3249 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
3250 shader_info->num_input_vgprs += 3;
3251 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
3252 shader_info->num_input_vgprs += 2;
3253 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
3254 shader_info->num_input_vgprs += 2;
3255 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
3256 shader_info->num_input_vgprs += 2;
3257 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
3258 shader_info->num_input_vgprs += 1;
3259 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
3260 shader_info->num_input_vgprs += 1;
3261 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
3262 shader_info->num_input_vgprs += 1;
3263 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
3264 shader_info->num_input_vgprs += 1;
3265 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
3266 shader_info->num_input_vgprs += 1;
3267 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr))
3268 shader_info->num_input_vgprs += 1;
3269 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr))
3270 shader_info->num_input_vgprs += 1;
3271 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
3272 shader_info->num_input_vgprs += 1;
3273 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
3274 shader_info->num_input_vgprs += 1;
3275 }
3276 config->num_vgprs = MAX2(config->num_vgprs, shader_info->num_input_vgprs);
3277
3278 /* +3 for scratch wave offset and VCC */
3279 config->num_sgprs = MAX2(config->num_sgprs,
3280 shader_info->num_input_sgprs + 3);
3281
3282 /* Enable 64-bit and 16-bit denormals, because there is no performance
3283 * cost.
3284 *
3285 * If denormals are enabled, all floating-point output modifiers are
3286 * ignored.
3287 *
3288 * Don't enable denormals for 32-bit floats, because:
3289 * - Floating-point output modifiers would be ignored by the hw.
3290 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3291 * have to stop using those.
3292 * - SI & CI would be very slow.
3293 */
3294 config->float_mode |= V_00B028_FP_64_DENORMS;
3295 }
3296
3297 static void
3298 ac_fill_shader_info(struct ac_shader_variant_info *shader_info, struct nir_shader *nir, const struct ac_nir_compiler_options *options)
3299 {
3300 switch (nir->info.stage) {
3301 case MESA_SHADER_COMPUTE:
3302 for (int i = 0; i < 3; ++i)
3303 shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
3304 break;
3305 case MESA_SHADER_FRAGMENT:
3306 shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
3307 break;
3308 case MESA_SHADER_GEOMETRY:
3309 shader_info->gs.vertices_in = nir->info.gs.vertices_in;
3310 shader_info->gs.vertices_out = nir->info.gs.vertices_out;
3311 shader_info->gs.output_prim = nir->info.gs.output_primitive;
3312 shader_info->gs.invocations = nir->info.gs.invocations;
3313 break;
3314 case MESA_SHADER_TESS_EVAL:
3315 shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
3316 shader_info->tes.spacing = nir->info.tess.spacing;
3317 shader_info->tes.ccw = nir->info.tess.ccw;
3318 shader_info->tes.point_mode = nir->info.tess.point_mode;
3319 shader_info->tes.as_es = options->key.tes.as_es;
3320 break;
3321 case MESA_SHADER_TESS_CTRL:
3322 shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
3323 break;
3324 case MESA_SHADER_VERTEX:
3325 shader_info->vs.as_es = options->key.vs.as_es;
3326 shader_info->vs.as_ls = options->key.vs.as_ls;
3327 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3328 if (options->key.vs.as_ls)
3329 shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
3330 break;
3331 default:
3332 break;
3333 }
3334 }
3335
3336 void
3337 radv_compile_nir_shader(LLVMTargetMachineRef tm,
3338 struct ac_shader_binary *binary,
3339 struct ac_shader_config *config,
3340 struct ac_shader_variant_info *shader_info,
3341 struct nir_shader *const *nir,
3342 int nir_count,
3343 const struct ac_nir_compiler_options *options,
3344 bool dump_shader)
3345 {
3346
3347 LLVMModuleRef llvm_module = ac_translate_nir_to_llvm(tm, nir, nir_count, shader_info,
3348 options, dump_shader);
3349
3350 ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir[0]->info.stage, dump_shader, options->supports_spill);
3351 for (int i = 0; i < nir_count; ++i)
3352 ac_fill_shader_info(shader_info, nir[i], options);
3353
3354 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3355 if (options->chip_class == GFX9) {
3356 if (nir_count == 2 &&
3357 nir[1]->info.stage == MESA_SHADER_GEOMETRY) {
3358 shader_info->gs.es_type = nir[0]->info.stage;
3359 }
3360 }
3361 }
3362
3363 static void
3364 ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
3365 {
3366 LLVMValueRef vtx_offset =
3367 LLVMBuildMul(ctx->ac.builder, ctx->abi.vertex_id,
3368 LLVMConstInt(ctx->ac.i32, 4, false), "");
3369 int idx = 0;
3370
3371 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3372 int length = 4;
3373 int slot = idx;
3374 int slot_inc = 1;
3375 if (!(ctx->output_mask & (1ull << i)))
3376 continue;
3377
3378 if (i == VARYING_SLOT_CLIP_DIST0) {
3379 /* unpack clip and cull from a single set of slots */
3380 length = ctx->num_output_clips + ctx->num_output_culls;
3381 if (length > 4)
3382 slot_inc = 2;
3383 }
3384
3385 for (unsigned j = 0; j < length; j++) {
3386 LLVMValueRef value, soffset;
3387
3388 soffset = LLVMConstInt(ctx->ac.i32,
3389 (slot * 4 + j) *
3390 ctx->gs_max_out_vertices * 16 * 4, false);
3391
3392 value = ac_build_buffer_load(&ctx->ac, ctx->gsvs_ring,
3393 1, ctx->ac.i32_0,
3394 vtx_offset, soffset,
3395 0, 1, 1, true, false);
3396
3397 LLVMBuildStore(ctx->ac.builder,
3398 ac_to_float(&ctx->ac, value), ctx->abi.outputs[radeon_llvm_reg_index_soa(i, j)]);
3399 }
3400 idx += slot_inc;
3401 }
3402 handle_vs_outputs_post(ctx, false, &ctx->shader_info->vs.outinfo);
3403 }
3404
3405 void
3406 radv_compile_gs_copy_shader(LLVMTargetMachineRef tm,
3407 struct nir_shader *geom_shader,
3408 struct ac_shader_binary *binary,
3409 struct ac_shader_config *config,
3410 struct ac_shader_variant_info *shader_info,
3411 const struct ac_nir_compiler_options *options,
3412 bool dump_shader)
3413 {
3414 struct radv_shader_context ctx = {0};
3415 ctx.context = LLVMContextCreate();
3416 ctx.options = options;
3417 ctx.shader_info = shader_info;
3418
3419 ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class,
3420 options->family);
3421 ctx.ac.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
3422
3423 ctx.is_gs_copy_shader = true;
3424 LLVMSetTarget(ctx.ac.module, "amdgcn--");
3425
3426 enum ac_float_mode float_mode =
3427 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3428 AC_FLOAT_MODE_DEFAULT;
3429
3430 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3431 ctx.stage = MESA_SHADER_VERTEX;
3432
3433 create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
3434
3435 ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
3436 ac_setup_rings(&ctx);
3437
3438 ctx.num_output_clips = geom_shader->info.clip_distance_array_size;
3439 ctx.num_output_culls = geom_shader->info.cull_distance_array_size;
3440
3441 nir_foreach_variable(variable, &geom_shader->outputs) {
3442 scan_shader_output_decl(&ctx, variable, geom_shader, MESA_SHADER_VERTEX);
3443 ac_handle_shader_output_decl(&ctx.ac, &ctx.abi, geom_shader,
3444 variable, MESA_SHADER_VERTEX);
3445 }
3446
3447 ac_gs_copy_shader_emit(&ctx);
3448
3449 LLVMBuildRetVoid(ctx.ac.builder);
3450
3451 ac_llvm_finalize_module(&ctx);
3452
3453 ac_compile_llvm_module(tm, ctx.ac.module, binary, config, shader_info,
3454 MESA_SHADER_VERTEX,
3455 dump_shader, options->supports_spill);
3456 }