11d0c1721fbf76e4040a9307e6c9522365b0a2b9
[mesa.git] / src / amd / vulkan / radv_nir_to_llvm.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_private.h"
29 #include "radv_shader.h"
30 #include "nir/nir.h"
31
32 #include <llvm-c/Core.h>
33 #include <llvm-c/TargetMachine.h>
34 #include <llvm-c/Transforms/Scalar.h>
35
36 #include "sid.h"
37 #include "gfx9d.h"
38 #include "ac_binary.h"
39 #include "ac_llvm_util.h"
40 #include "ac_llvm_build.h"
41 #include "ac_shader_abi.h"
42 #include "ac_shader_util.h"
43 #include "ac_exp_param.h"
44
45 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
46
47 struct radv_shader_context {
48 struct ac_llvm_context ac;
49 const struct radv_nir_compiler_options *options;
50 struct radv_shader_variant_info *shader_info;
51 struct ac_shader_abi abi;
52
53 unsigned max_workgroup_size;
54 LLVMContextRef context;
55 LLVMValueRef main_function;
56
57 LLVMValueRef descriptor_sets[RADV_UD_MAX_SETS];
58 LLVMValueRef ring_offsets;
59
60 LLVMValueRef vertex_buffers;
61 LLVMValueRef rel_auto_id;
62 LLVMValueRef vs_prim_id;
63 LLVMValueRef es2gs_offset;
64
65 LLVMValueRef tcs_offchip_layout;
66 LLVMValueRef tcs_out_offsets;
67 LLVMValueRef oc_lds;
68 LLVMValueRef merged_wave_info;
69 LLVMValueRef tess_factor_offset;
70 LLVMValueRef tes_rel_patch_id;
71 LLVMValueRef tes_u;
72 LLVMValueRef tes_v;
73
74 LLVMValueRef gsvs_ring_stride;
75 LLVMValueRef gsvs_num_entries;
76 LLVMValueRef gs2vs_offset;
77 LLVMValueRef gs_wave_id;
78 LLVMValueRef gs_vtx_offset[6];
79
80 LLVMValueRef esgs_ring;
81 LLVMValueRef gsvs_ring;
82 LLVMValueRef hs_ring_tess_offchip;
83 LLVMValueRef hs_ring_tess_factor;
84
85 LLVMValueRef sample_pos_offset;
86 LLVMValueRef persp_sample, persp_center, persp_centroid;
87 LLVMValueRef linear_sample, linear_center, linear_centroid;
88
89 gl_shader_stage stage;
90
91 LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
92
93 uint64_t input_mask;
94 uint64_t output_mask;
95 uint8_t num_output_clips;
96 uint8_t num_output_culls;
97
98 bool is_gs_copy_shader;
99 LLVMValueRef gs_next_vertex;
100 unsigned gs_max_out_vertices;
101
102 unsigned tes_primitive_mode;
103 uint64_t tess_outputs_written;
104 uint64_t tess_patch_outputs_written;
105
106 uint32_t tcs_patch_outputs_read;
107 uint64_t tcs_outputs_read;
108 uint32_t tcs_vertices_per_patch;
109 uint32_t tcs_num_inputs;
110 };
111
112 enum radeon_llvm_calling_convention {
113 RADEON_LLVM_AMDGPU_VS = 87,
114 RADEON_LLVM_AMDGPU_GS = 88,
115 RADEON_LLVM_AMDGPU_PS = 89,
116 RADEON_LLVM_AMDGPU_CS = 90,
117 RADEON_LLVM_AMDGPU_HS = 93,
118 };
119
120 static inline struct radv_shader_context *
121 radv_shader_context_from_abi(struct ac_shader_abi *abi)
122 {
123 struct radv_shader_context *ctx = NULL;
124 return container_of(abi, ctx, abi);
125 }
126
127 static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
128 {
129 switch (ctx->stage) {
130 case MESA_SHADER_TESS_CTRL:
131 return ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
132 case MESA_SHADER_TESS_EVAL:
133 return ctx->tes_rel_patch_id;
134 break;
135 default:
136 unreachable("Illegal stage");
137 }
138 }
139
140 /* Tessellation shaders pass outputs to the next shader using LDS.
141 *
142 * LS outputs = TCS inputs
143 * TCS outputs = TES inputs
144 *
145 * The LDS layout is:
146 * - TCS inputs for patch 0
147 * - TCS inputs for patch 1
148 * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
149 * - ...
150 * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
151 * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
152 * - TCS outputs for patch 1
153 * - Per-patch TCS outputs for patch 1
154 * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
155 * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
156 * - ...
157 *
158 * All three shaders VS(LS), TCS, TES share the same LDS space.
159 */
160 static LLVMValueRef
161 get_tcs_in_patch_stride(struct radv_shader_context *ctx)
162 {
163 assert (ctx->stage == MESA_SHADER_TESS_CTRL);
164 uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
165 uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
166
167 input_patch_size /= 4;
168 return LLVMConstInt(ctx->ac.i32, input_patch_size, false);
169 }
170
171 static LLVMValueRef
172 get_tcs_out_patch_stride(struct radv_shader_context *ctx)
173 {
174 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
175 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
176 uint32_t output_vertex_size = num_tcs_outputs * 16;
177 uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
178 uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
179 output_patch_size /= 4;
180 return LLVMConstInt(ctx->ac.i32, output_patch_size, false);
181 }
182
183 static LLVMValueRef
184 get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
185 {
186 uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
187 uint32_t output_vertex_size = num_tcs_outputs * 16;
188 output_vertex_size /= 4;
189 return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
190 }
191
192 static LLVMValueRef
193 get_tcs_out_patch0_offset(struct radv_shader_context *ctx)
194 {
195 return LLVMBuildMul(ctx->ac.builder,
196 ac_unpack_param(&ctx->ac, ctx->tcs_out_offsets, 0, 16),
197 LLVMConstInt(ctx->ac.i32, 4, false), "");
198 }
199
200 static LLVMValueRef
201 get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
202 {
203 return LLVMBuildMul(ctx->ac.builder,
204 ac_unpack_param(&ctx->ac, ctx->tcs_out_offsets, 16, 16),
205 LLVMConstInt(ctx->ac.i32, 4, false), "");
206 }
207
208 static LLVMValueRef
209 get_tcs_in_current_patch_offset(struct radv_shader_context *ctx)
210 {
211 LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
212 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
213
214 return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
215 }
216
217 static LLVMValueRef
218 get_tcs_out_current_patch_offset(struct radv_shader_context *ctx)
219 {
220 LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
221 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
222 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
223
224 return LLVMBuildAdd(ctx->ac.builder, patch0_offset,
225 LLVMBuildMul(ctx->ac.builder, patch_stride,
226 rel_patch_id, ""),
227 "");
228 }
229
230 static LLVMValueRef
231 get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
232 {
233 LLVMValueRef patch0_patch_data_offset =
234 get_tcs_out_patch0_patch_data_offset(ctx);
235 LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
236 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
237
238 return LLVMBuildAdd(ctx->ac.builder, patch0_patch_data_offset,
239 LLVMBuildMul(ctx->ac.builder, patch_stride,
240 rel_patch_id, ""),
241 "");
242 }
243
244 #define MAX_ARGS 23
245 struct arg_info {
246 LLVMTypeRef types[MAX_ARGS];
247 LLVMValueRef *assign[MAX_ARGS];
248 unsigned array_params_mask;
249 uint8_t count;
250 uint8_t sgpr_count;
251 uint8_t num_sgprs_used;
252 uint8_t num_vgprs_used;
253 };
254
255 enum ac_arg_regfile {
256 ARG_SGPR,
257 ARG_VGPR,
258 };
259
260 static void
261 add_arg(struct arg_info *info, enum ac_arg_regfile regfile, LLVMTypeRef type,
262 LLVMValueRef *param_ptr)
263 {
264 assert(info->count < MAX_ARGS);
265
266 info->assign[info->count] = param_ptr;
267 info->types[info->count] = type;
268 info->count++;
269
270 if (regfile == ARG_SGPR) {
271 info->num_sgprs_used += ac_get_type_size(type) / 4;
272 info->sgpr_count++;
273 } else {
274 assert(regfile == ARG_VGPR);
275 info->num_vgprs_used += ac_get_type_size(type) / 4;
276 }
277 }
278
279 static inline void
280 add_array_arg(struct arg_info *info, LLVMTypeRef type, LLVMValueRef *param_ptr)
281 {
282 info->array_params_mask |= (1 << info->count);
283 add_arg(info, ARG_SGPR, type, param_ptr);
284 }
285
286 static void assign_arguments(LLVMValueRef main_function,
287 struct arg_info *info)
288 {
289 unsigned i;
290 for (i = 0; i < info->count; i++) {
291 if (info->assign[i])
292 *info->assign[i] = LLVMGetParam(main_function, i);
293 }
294 }
295
296 static LLVMValueRef
297 create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
298 LLVMBuilderRef builder, LLVMTypeRef *return_types,
299 unsigned num_return_elems,
300 struct arg_info *args,
301 unsigned max_workgroup_size,
302 bool unsafe_math)
303 {
304 LLVMTypeRef main_function_type, ret_type;
305 LLVMBasicBlockRef main_function_body;
306
307 if (num_return_elems)
308 ret_type = LLVMStructTypeInContext(ctx, return_types,
309 num_return_elems, true);
310 else
311 ret_type = LLVMVoidTypeInContext(ctx);
312
313 /* Setup the function */
314 main_function_type =
315 LLVMFunctionType(ret_type, args->types, args->count, 0);
316 LLVMValueRef main_function =
317 LLVMAddFunction(module, "main", main_function_type);
318 main_function_body =
319 LLVMAppendBasicBlockInContext(ctx, main_function, "main_body");
320 LLVMPositionBuilderAtEnd(builder, main_function_body);
321
322 LLVMSetFunctionCallConv(main_function, RADEON_LLVM_AMDGPU_CS);
323 for (unsigned i = 0; i < args->sgpr_count; ++i) {
324 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_INREG);
325
326 if (args->array_params_mask & (1 << i)) {
327 LLVMValueRef P = LLVMGetParam(main_function, i);
328 ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_NOALIAS);
329 ac_add_attr_dereferenceable(P, UINT64_MAX);
330 }
331 }
332
333 if (max_workgroup_size) {
334 ac_llvm_add_target_dep_function_attr(main_function,
335 "amdgpu-max-work-group-size",
336 max_workgroup_size);
337 }
338 if (unsafe_math) {
339 /* These were copied from some LLVM test. */
340 LLVMAddTargetDependentFunctionAttr(main_function,
341 "less-precise-fpmad",
342 "true");
343 LLVMAddTargetDependentFunctionAttr(main_function,
344 "no-infs-fp-math",
345 "true");
346 LLVMAddTargetDependentFunctionAttr(main_function,
347 "no-nans-fp-math",
348 "true");
349 LLVMAddTargetDependentFunctionAttr(main_function,
350 "unsafe-fp-math",
351 "true");
352 LLVMAddTargetDependentFunctionAttr(main_function,
353 "no-signed-zeros-fp-math",
354 "true");
355 }
356 return main_function;
357 }
358
359
360 static void
361 set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx, uint8_t num_sgprs,
362 uint32_t indirect_offset)
363 {
364 ud_info->sgpr_idx = *sgpr_idx;
365 ud_info->num_sgprs = num_sgprs;
366 ud_info->indirect = indirect_offset > 0;
367 ud_info->indirect_offset = indirect_offset;
368 *sgpr_idx += num_sgprs;
369 }
370
371 static void
372 set_loc_shader(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
373 uint8_t num_sgprs)
374 {
375 struct radv_userdata_info *ud_info =
376 &ctx->shader_info->user_sgprs_locs.shader_data[idx];
377 assert(ud_info);
378
379 set_loc(ud_info, sgpr_idx, num_sgprs, 0);
380 }
381
382 static void
383 set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
384 uint32_t indirect_offset)
385 {
386 struct radv_userdata_info *ud_info =
387 &ctx->shader_info->user_sgprs_locs.descriptor_sets[idx];
388 assert(ud_info);
389
390 set_loc(ud_info, sgpr_idx, 2, indirect_offset);
391 }
392
393 struct user_sgpr_info {
394 bool need_ring_offsets;
395 uint8_t sgpr_count;
396 bool indirect_all_descriptor_sets;
397 };
398
399 static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
400 gl_shader_stage stage)
401 {
402 switch (stage) {
403 case MESA_SHADER_VERTEX:
404 if (ctx->shader_info->info.needs_multiview_view_index ||
405 (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
406 return true;
407 break;
408 case MESA_SHADER_TESS_EVAL:
409 if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
410 return true;
411 break;
412 case MESA_SHADER_GEOMETRY:
413 case MESA_SHADER_TESS_CTRL:
414 if (ctx->shader_info->info.needs_multiview_view_index)
415 return true;
416 break;
417 default:
418 break;
419 }
420 return false;
421 }
422
423 static uint8_t
424 count_vs_user_sgprs(struct radv_shader_context *ctx)
425 {
426 uint8_t count = 0;
427
428 count += ctx->shader_info->info.vs.has_vertex_buffers ? 2 : 0;
429 count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
430
431 return count;
432 }
433
434 static void allocate_user_sgprs(struct radv_shader_context *ctx,
435 gl_shader_stage stage,
436 bool has_previous_stage,
437 gl_shader_stage previous_stage,
438 bool needs_view_index,
439 struct user_sgpr_info *user_sgpr_info)
440 {
441 memset(user_sgpr_info, 0, sizeof(struct user_sgpr_info));
442
443 /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
444 if (stage == MESA_SHADER_GEOMETRY ||
445 stage == MESA_SHADER_VERTEX ||
446 stage == MESA_SHADER_TESS_CTRL ||
447 stage == MESA_SHADER_TESS_EVAL ||
448 ctx->is_gs_copy_shader)
449 user_sgpr_info->need_ring_offsets = true;
450
451 if (stage == MESA_SHADER_FRAGMENT &&
452 ctx->shader_info->info.ps.needs_sample_positions)
453 user_sgpr_info->need_ring_offsets = true;
454
455 /* 2 user sgprs will nearly always be allocated for scratch/rings */
456 if (ctx->options->supports_spill || user_sgpr_info->need_ring_offsets) {
457 user_sgpr_info->sgpr_count += 2;
458 }
459
460 switch (stage) {
461 case MESA_SHADER_COMPUTE:
462 if (ctx->shader_info->info.cs.uses_grid_size)
463 user_sgpr_info->sgpr_count += 3;
464 break;
465 case MESA_SHADER_FRAGMENT:
466 user_sgpr_info->sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
467 break;
468 case MESA_SHADER_VERTEX:
469 if (!ctx->is_gs_copy_shader)
470 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
471 break;
472 case MESA_SHADER_TESS_CTRL:
473 if (has_previous_stage) {
474 if (previous_stage == MESA_SHADER_VERTEX)
475 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
476 }
477 user_sgpr_info->sgpr_count += 2;
478 break;
479 case MESA_SHADER_TESS_EVAL:
480 user_sgpr_info->sgpr_count += 1;
481 break;
482 case MESA_SHADER_GEOMETRY:
483 if (has_previous_stage) {
484 if (previous_stage == MESA_SHADER_VERTEX) {
485 user_sgpr_info->sgpr_count += count_vs_user_sgprs(ctx);
486 } else {
487 user_sgpr_info->sgpr_count++;
488 }
489 }
490 user_sgpr_info->sgpr_count += 2;
491 break;
492 default:
493 break;
494 }
495
496 if (needs_view_index)
497 user_sgpr_info->sgpr_count++;
498
499 if (ctx->shader_info->info.loads_push_constants)
500 user_sgpr_info->sgpr_count += 2;
501
502 uint32_t available_sgprs = ctx->options->chip_class >= GFX9 ? 32 : 16;
503 uint32_t remaining_sgprs = available_sgprs - user_sgpr_info->sgpr_count;
504
505 if (remaining_sgprs / 2 < util_bitcount(ctx->shader_info->info.desc_set_used_mask)) {
506 user_sgpr_info->sgpr_count += 2;
507 user_sgpr_info->indirect_all_descriptor_sets = true;
508 } else {
509 user_sgpr_info->sgpr_count += util_bitcount(ctx->shader_info->info.desc_set_used_mask) * 2;
510 }
511 }
512
513 static void
514 declare_global_input_sgprs(struct radv_shader_context *ctx,
515 gl_shader_stage stage,
516 bool has_previous_stage,
517 gl_shader_stage previous_stage,
518 const struct user_sgpr_info *user_sgpr_info,
519 struct arg_info *args,
520 LLVMValueRef *desc_sets)
521 {
522 LLVMTypeRef type = ac_array_in_const_addr_space(ctx->ac.i8);
523 unsigned num_sets = ctx->options->layout ?
524 ctx->options->layout->num_sets : 0;
525 unsigned stage_mask = 1 << stage;
526
527 if (has_previous_stage)
528 stage_mask |= 1 << previous_stage;
529
530 /* 1 for each descriptor set */
531 if (!user_sgpr_info->indirect_all_descriptor_sets) {
532 for (unsigned i = 0; i < num_sets; ++i) {
533 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
534 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
535 add_array_arg(args, type,
536 &ctx->descriptor_sets[i]);
537 }
538 }
539 } else {
540 add_array_arg(args, ac_array_in_const_addr_space(type), desc_sets);
541 }
542
543 if (ctx->shader_info->info.loads_push_constants) {
544 /* 1 for push constants and dynamic descriptors */
545 add_array_arg(args, type, &ctx->abi.push_constants);
546 }
547 }
548
549 static void
550 declare_vs_specific_input_sgprs(struct radv_shader_context *ctx,
551 gl_shader_stage stage,
552 bool has_previous_stage,
553 gl_shader_stage previous_stage,
554 struct arg_info *args)
555 {
556 if (!ctx->is_gs_copy_shader &&
557 (stage == MESA_SHADER_VERTEX ||
558 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
559 if (ctx->shader_info->info.vs.has_vertex_buffers) {
560 add_arg(args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
561 &ctx->vertex_buffers);
562 }
563 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.base_vertex);
564 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.start_instance);
565 if (ctx->shader_info->info.vs.needs_draw_id) {
566 add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.draw_id);
567 }
568 }
569 }
570
571 static void
572 declare_vs_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
573 {
574 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.vertex_id);
575 if (!ctx->is_gs_copy_shader) {
576 if (ctx->options->key.vs.as_ls) {
577 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->rel_auto_id);
578 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
579 } else {
580 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
581 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->vs_prim_id);
582 }
583 add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
584 }
585 }
586
587 static void
588 declare_tes_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
589 {
590 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_u);
591 add_arg(args, ARG_VGPR, ctx->ac.f32, &ctx->tes_v);
592 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->tes_rel_patch_id);
593 add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.tes_patch_id);
594 }
595
596 static void
597 set_global_input_locs(struct radv_shader_context *ctx, gl_shader_stage stage,
598 bool has_previous_stage, gl_shader_stage previous_stage,
599 const struct user_sgpr_info *user_sgpr_info,
600 LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
601 {
602 unsigned num_sets = ctx->options->layout ?
603 ctx->options->layout->num_sets : 0;
604 unsigned stage_mask = 1 << stage;
605
606 if (has_previous_stage)
607 stage_mask |= 1 << previous_stage;
608
609 if (!user_sgpr_info->indirect_all_descriptor_sets) {
610 for (unsigned i = 0; i < num_sets; ++i) {
611 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
612 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
613 set_loc_desc(ctx, i, user_sgpr_idx, 0);
614 } else
615 ctx->descriptor_sets[i] = NULL;
616 }
617 } else {
618 set_loc_shader(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS,
619 user_sgpr_idx, 2);
620
621 for (unsigned i = 0; i < num_sets; ++i) {
622 if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
623 ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
624 set_loc_desc(ctx, i, user_sgpr_idx, i * 8);
625 ctx->descriptor_sets[i] =
626 ac_build_load_to_sgpr(&ctx->ac,
627 desc_sets,
628 LLVMConstInt(ctx->ac.i32, i, false));
629
630 } else
631 ctx->descriptor_sets[i] = NULL;
632 }
633 ctx->shader_info->need_indirect_descriptor_sets = true;
634 }
635
636 if (ctx->shader_info->info.loads_push_constants) {
637 set_loc_shader(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx, 2);
638 }
639 }
640
641 static void
642 set_vs_specific_input_locs(struct radv_shader_context *ctx,
643 gl_shader_stage stage, bool has_previous_stage,
644 gl_shader_stage previous_stage,
645 uint8_t *user_sgpr_idx)
646 {
647 if (!ctx->is_gs_copy_shader &&
648 (stage == MESA_SHADER_VERTEX ||
649 (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
650 if (ctx->shader_info->info.vs.has_vertex_buffers) {
651 set_loc_shader(ctx, AC_UD_VS_VERTEX_BUFFERS,
652 user_sgpr_idx, 2);
653 }
654
655 unsigned vs_num = 2;
656 if (ctx->shader_info->info.vs.needs_draw_id)
657 vs_num++;
658
659 set_loc_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE,
660 user_sgpr_idx, vs_num);
661 }
662 }
663
664 static void set_llvm_calling_convention(LLVMValueRef func,
665 gl_shader_stage stage)
666 {
667 enum radeon_llvm_calling_convention calling_conv;
668
669 switch (stage) {
670 case MESA_SHADER_VERTEX:
671 case MESA_SHADER_TESS_EVAL:
672 calling_conv = RADEON_LLVM_AMDGPU_VS;
673 break;
674 case MESA_SHADER_GEOMETRY:
675 calling_conv = RADEON_LLVM_AMDGPU_GS;
676 break;
677 case MESA_SHADER_TESS_CTRL:
678 calling_conv = HAVE_LLVM >= 0x0500 ? RADEON_LLVM_AMDGPU_HS : RADEON_LLVM_AMDGPU_VS;
679 break;
680 case MESA_SHADER_FRAGMENT:
681 calling_conv = RADEON_LLVM_AMDGPU_PS;
682 break;
683 case MESA_SHADER_COMPUTE:
684 calling_conv = RADEON_LLVM_AMDGPU_CS;
685 break;
686 default:
687 unreachable("Unhandle shader type");
688 }
689
690 LLVMSetFunctionCallConv(func, calling_conv);
691 }
692
693 static void create_function(struct radv_shader_context *ctx,
694 gl_shader_stage stage,
695 bool has_previous_stage,
696 gl_shader_stage previous_stage)
697 {
698 uint8_t user_sgpr_idx;
699 struct user_sgpr_info user_sgpr_info;
700 struct arg_info args = {};
701 LLVMValueRef desc_sets;
702 bool needs_view_index = needs_view_index_sgpr(ctx, stage);
703 allocate_user_sgprs(ctx, stage, has_previous_stage,
704 previous_stage, needs_view_index, &user_sgpr_info);
705
706 if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
707 add_arg(&args, ARG_SGPR, ac_array_in_const_addr_space(ctx->ac.v4i32),
708 &ctx->ring_offsets);
709 }
710
711 switch (stage) {
712 case MESA_SHADER_COMPUTE:
713 declare_global_input_sgprs(ctx, stage, has_previous_stage,
714 previous_stage, &user_sgpr_info,
715 &args, &desc_sets);
716
717 if (ctx->shader_info->info.cs.uses_grid_size) {
718 add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
719 &ctx->abi.num_work_groups);
720 }
721
722 for (int i = 0; i < 3; i++) {
723 ctx->abi.workgroup_ids[i] = NULL;
724 if (ctx->shader_info->info.cs.uses_block_id[i]) {
725 add_arg(&args, ARG_SGPR, ctx->ac.i32,
726 &ctx->abi.workgroup_ids[i]);
727 }
728 }
729
730 if (ctx->shader_info->info.cs.uses_local_invocation_idx)
731 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.tg_size);
732 add_arg(&args, ARG_VGPR, ctx->ac.v3i32,
733 &ctx->abi.local_invocation_ids);
734 break;
735 case MESA_SHADER_VERTEX:
736 declare_global_input_sgprs(ctx, stage, has_previous_stage,
737 previous_stage, &user_sgpr_info,
738 &args, &desc_sets);
739 declare_vs_specific_input_sgprs(ctx, stage, has_previous_stage,
740 previous_stage, &args);
741
742 if (needs_view_index)
743 add_arg(&args, ARG_SGPR, ctx->ac.i32,
744 &ctx->abi.view_index);
745 if (ctx->options->key.vs.as_es)
746 add_arg(&args, ARG_SGPR, ctx->ac.i32,
747 &ctx->es2gs_offset);
748
749 declare_vs_input_vgprs(ctx, &args);
750 break;
751 case MESA_SHADER_TESS_CTRL:
752 if (has_previous_stage) {
753 // First 6 system regs
754 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
755 add_arg(&args, ARG_SGPR, ctx->ac.i32,
756 &ctx->merged_wave_info);
757 add_arg(&args, ARG_SGPR, ctx->ac.i32,
758 &ctx->tess_factor_offset);
759
760 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
761 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
762 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
763
764 declare_global_input_sgprs(ctx, stage,
765 has_previous_stage,
766 previous_stage,
767 &user_sgpr_info, &args,
768 &desc_sets);
769 declare_vs_specific_input_sgprs(ctx, stage,
770 has_previous_stage,
771 previous_stage, &args);
772
773 add_arg(&args, ARG_SGPR, ctx->ac.i32,
774 &ctx->tcs_offchip_layout);
775 add_arg(&args, ARG_SGPR, ctx->ac.i32,
776 &ctx->tcs_out_offsets);
777 if (needs_view_index)
778 add_arg(&args, ARG_SGPR, ctx->ac.i32,
779 &ctx->abi.view_index);
780
781 add_arg(&args, ARG_VGPR, ctx->ac.i32,
782 &ctx->abi.tcs_patch_id);
783 add_arg(&args, ARG_VGPR, ctx->ac.i32,
784 &ctx->abi.tcs_rel_ids);
785
786 declare_vs_input_vgprs(ctx, &args);
787 } else {
788 declare_global_input_sgprs(ctx, stage,
789 has_previous_stage,
790 previous_stage,
791 &user_sgpr_info, &args,
792 &desc_sets);
793
794 add_arg(&args, ARG_SGPR, ctx->ac.i32,
795 &ctx->tcs_offchip_layout);
796 add_arg(&args, ARG_SGPR, ctx->ac.i32,
797 &ctx->tcs_out_offsets);
798 if (needs_view_index)
799 add_arg(&args, ARG_SGPR, ctx->ac.i32,
800 &ctx->abi.view_index);
801
802 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
803 add_arg(&args, ARG_SGPR, ctx->ac.i32,
804 &ctx->tess_factor_offset);
805 add_arg(&args, ARG_VGPR, ctx->ac.i32,
806 &ctx->abi.tcs_patch_id);
807 add_arg(&args, ARG_VGPR, ctx->ac.i32,
808 &ctx->abi.tcs_rel_ids);
809 }
810 break;
811 case MESA_SHADER_TESS_EVAL:
812 declare_global_input_sgprs(ctx, stage, has_previous_stage,
813 previous_stage, &user_sgpr_info,
814 &args, &desc_sets);
815
816 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->tcs_offchip_layout);
817 if (needs_view_index)
818 add_arg(&args, ARG_SGPR, ctx->ac.i32,
819 &ctx->abi.view_index);
820
821 if (ctx->options->key.tes.as_es) {
822 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
823 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
824 add_arg(&args, ARG_SGPR, ctx->ac.i32,
825 &ctx->es2gs_offset);
826 } else {
827 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
828 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
829 }
830 declare_tes_input_vgprs(ctx, &args);
831 break;
832 case MESA_SHADER_GEOMETRY:
833 if (has_previous_stage) {
834 // First 6 system regs
835 add_arg(&args, ARG_SGPR, ctx->ac.i32,
836 &ctx->gs2vs_offset);
837 add_arg(&args, ARG_SGPR, ctx->ac.i32,
838 &ctx->merged_wave_info);
839 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
840
841 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // scratch offset
842 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
843 add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
844
845 declare_global_input_sgprs(ctx, stage,
846 has_previous_stage,
847 previous_stage,
848 &user_sgpr_info, &args,
849 &desc_sets);
850
851 if (previous_stage == MESA_SHADER_TESS_EVAL) {
852 add_arg(&args, ARG_SGPR, ctx->ac.i32,
853 &ctx->tcs_offchip_layout);
854 } else {
855 declare_vs_specific_input_sgprs(ctx, stage,
856 has_previous_stage,
857 previous_stage,
858 &args);
859 }
860
861 add_arg(&args, ARG_SGPR, ctx->ac.i32,
862 &ctx->gsvs_ring_stride);
863 add_arg(&args, ARG_SGPR, ctx->ac.i32,
864 &ctx->gsvs_num_entries);
865 if (needs_view_index)
866 add_arg(&args, ARG_SGPR, ctx->ac.i32,
867 &ctx->abi.view_index);
868
869 add_arg(&args, ARG_VGPR, ctx->ac.i32,
870 &ctx->gs_vtx_offset[0]);
871 add_arg(&args, ARG_VGPR, ctx->ac.i32,
872 &ctx->gs_vtx_offset[2]);
873 add_arg(&args, ARG_VGPR, ctx->ac.i32,
874 &ctx->abi.gs_prim_id);
875 add_arg(&args, ARG_VGPR, ctx->ac.i32,
876 &ctx->abi.gs_invocation_id);
877 add_arg(&args, ARG_VGPR, ctx->ac.i32,
878 &ctx->gs_vtx_offset[4]);
879
880 if (previous_stage == MESA_SHADER_VERTEX) {
881 declare_vs_input_vgprs(ctx, &args);
882 } else {
883 declare_tes_input_vgprs(ctx, &args);
884 }
885 } else {
886 declare_global_input_sgprs(ctx, stage,
887 has_previous_stage,
888 previous_stage,
889 &user_sgpr_info, &args,
890 &desc_sets);
891
892 add_arg(&args, ARG_SGPR, ctx->ac.i32,
893 &ctx->gsvs_ring_stride);
894 add_arg(&args, ARG_SGPR, ctx->ac.i32,
895 &ctx->gsvs_num_entries);
896 if (needs_view_index)
897 add_arg(&args, ARG_SGPR, ctx->ac.i32,
898 &ctx->abi.view_index);
899
900 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs2vs_offset);
901 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->gs_wave_id);
902 add_arg(&args, ARG_VGPR, ctx->ac.i32,
903 &ctx->gs_vtx_offset[0]);
904 add_arg(&args, ARG_VGPR, ctx->ac.i32,
905 &ctx->gs_vtx_offset[1]);
906 add_arg(&args, ARG_VGPR, ctx->ac.i32,
907 &ctx->abi.gs_prim_id);
908 add_arg(&args, ARG_VGPR, ctx->ac.i32,
909 &ctx->gs_vtx_offset[2]);
910 add_arg(&args, ARG_VGPR, ctx->ac.i32,
911 &ctx->gs_vtx_offset[3]);
912 add_arg(&args, ARG_VGPR, ctx->ac.i32,
913 &ctx->gs_vtx_offset[4]);
914 add_arg(&args, ARG_VGPR, ctx->ac.i32,
915 &ctx->gs_vtx_offset[5]);
916 add_arg(&args, ARG_VGPR, ctx->ac.i32,
917 &ctx->abi.gs_invocation_id);
918 }
919 break;
920 case MESA_SHADER_FRAGMENT:
921 declare_global_input_sgprs(ctx, stage, has_previous_stage,
922 previous_stage, &user_sgpr_info,
923 &args, &desc_sets);
924
925 if (ctx->shader_info->info.ps.needs_sample_positions)
926 add_arg(&args, ARG_SGPR, ctx->ac.i32,
927 &ctx->sample_pos_offset);
928
929 add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.prim_mask);
930 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_sample);
931 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_center);
932 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_centroid);
933 add_arg(&args, ARG_VGPR, ctx->ac.v3i32, NULL); /* persp pull model */
934 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_sample);
935 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_center);
936 add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->linear_centroid);
937 add_arg(&args, ARG_VGPR, ctx->ac.f32, NULL); /* line stipple tex */
938 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[0]);
939 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[1]);
940 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[2]);
941 add_arg(&args, ARG_VGPR, ctx->ac.f32, &ctx->abi.frag_pos[3]);
942 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.front_face);
943 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.ancillary);
944 add_arg(&args, ARG_VGPR, ctx->ac.i32, &ctx->abi.sample_coverage);
945 add_arg(&args, ARG_VGPR, ctx->ac.i32, NULL); /* fixed pt */
946 break;
947 default:
948 unreachable("Shader stage not implemented");
949 }
950
951 ctx->main_function = create_llvm_function(
952 ctx->context, ctx->ac.module, ctx->ac.builder, NULL, 0, &args,
953 ctx->max_workgroup_size,
954 ctx->options->unsafe_math);
955 set_llvm_calling_convention(ctx->main_function, stage);
956
957
958 ctx->shader_info->num_input_vgprs = 0;
959 ctx->shader_info->num_input_sgprs = ctx->options->supports_spill ? 2 : 0;
960
961 ctx->shader_info->num_input_sgprs += args.num_sgprs_used;
962
963 if (ctx->stage != MESA_SHADER_FRAGMENT)
964 ctx->shader_info->num_input_vgprs = args.num_vgprs_used;
965
966 assign_arguments(ctx->main_function, &args);
967
968 user_sgpr_idx = 0;
969
970 if (ctx->options->supports_spill || user_sgpr_info.need_ring_offsets) {
971 set_loc_shader(ctx, AC_UD_SCRATCH_RING_OFFSETS,
972 &user_sgpr_idx, 2);
973 if (ctx->options->supports_spill) {
974 ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
975 LLVMPointerType(ctx->ac.i8, AC_CONST_ADDR_SPACE),
976 NULL, 0, AC_FUNC_ATTR_READNONE);
977 ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
978 ac_array_in_const_addr_space(ctx->ac.v4i32), "");
979 }
980 }
981
982 /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
983 * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
984 if (has_previous_stage)
985 user_sgpr_idx = 0;
986
987 set_global_input_locs(ctx, stage, has_previous_stage, previous_stage,
988 &user_sgpr_info, desc_sets, &user_sgpr_idx);
989
990 switch (stage) {
991 case MESA_SHADER_COMPUTE:
992 if (ctx->shader_info->info.cs.uses_grid_size) {
993 set_loc_shader(ctx, AC_UD_CS_GRID_SIZE,
994 &user_sgpr_idx, 3);
995 }
996 break;
997 case MESA_SHADER_VERTEX:
998 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
999 previous_stage, &user_sgpr_idx);
1000 if (ctx->abi.view_index)
1001 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1002 break;
1003 case MESA_SHADER_TESS_CTRL:
1004 set_vs_specific_input_locs(ctx, stage, has_previous_stage,
1005 previous_stage, &user_sgpr_idx);
1006 set_loc_shader(ctx, AC_UD_TCS_OFFCHIP_LAYOUT, &user_sgpr_idx, 2);
1007 if (ctx->abi.view_index)
1008 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1009 break;
1010 case MESA_SHADER_TESS_EVAL:
1011 set_loc_shader(ctx, AC_UD_TES_OFFCHIP_LAYOUT, &user_sgpr_idx, 1);
1012 if (ctx->abi.view_index)
1013 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1014 break;
1015 case MESA_SHADER_GEOMETRY:
1016 if (has_previous_stage) {
1017 if (previous_stage == MESA_SHADER_VERTEX)
1018 set_vs_specific_input_locs(ctx, stage,
1019 has_previous_stage,
1020 previous_stage,
1021 &user_sgpr_idx);
1022 else
1023 set_loc_shader(ctx, AC_UD_TES_OFFCHIP_LAYOUT,
1024 &user_sgpr_idx, 1);
1025 }
1026 set_loc_shader(ctx, AC_UD_GS_VS_RING_STRIDE_ENTRIES,
1027 &user_sgpr_idx, 2);
1028 if (ctx->abi.view_index)
1029 set_loc_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
1030 break;
1031 case MESA_SHADER_FRAGMENT:
1032 if (ctx->shader_info->info.ps.needs_sample_positions) {
1033 set_loc_shader(ctx, AC_UD_PS_SAMPLE_POS_OFFSET,
1034 &user_sgpr_idx, 1);
1035 }
1036 break;
1037 default:
1038 unreachable("Shader stage not implemented");
1039 }
1040
1041 if (stage == MESA_SHADER_TESS_CTRL ||
1042 (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_ls) ||
1043 /* GFX9 has the ESGS ring buffer in LDS. */
1044 (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
1045 ac_declare_lds_as_pointer(&ctx->ac);
1046 }
1047
1048 ctx->shader_info->num_user_sgprs = user_sgpr_idx;
1049 }
1050
1051
1052 static LLVMValueRef
1053 radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
1054 unsigned desc_set, unsigned binding)
1055 {
1056 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1057 LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
1058 struct radv_pipeline_layout *pipeline_layout = ctx->options->layout;
1059 struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
1060 unsigned base_offset = layout->binding[binding].offset;
1061 LLVMValueRef offset, stride;
1062
1063 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1064 layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
1065 unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
1066 layout->binding[binding].dynamic_offset_offset;
1067 desc_ptr = ctx->abi.push_constants;
1068 base_offset = pipeline_layout->push_constant_size + 16 * idx;
1069 stride = LLVMConstInt(ctx->ac.i32, 16, false);
1070 } else
1071 stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
1072
1073 offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
1074 index = LLVMBuildMul(ctx->ac.builder, index, stride, "");
1075 offset = LLVMBuildAdd(ctx->ac.builder, offset, index, "");
1076
1077 desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
1078 desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
1079 LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1080
1081 return desc_ptr;
1082 }
1083
1084
1085 /* The offchip buffer layout for TCS->TES is
1086 *
1087 * - attribute 0 of patch 0 vertex 0
1088 * - attribute 0 of patch 0 vertex 1
1089 * - attribute 0 of patch 0 vertex 2
1090 * ...
1091 * - attribute 0 of patch 1 vertex 0
1092 * - attribute 0 of patch 1 vertex 1
1093 * ...
1094 * - attribute 1 of patch 0 vertex 0
1095 * - attribute 1 of patch 0 vertex 1
1096 * ...
1097 * - per patch attribute 0 of patch 0
1098 * - per patch attribute 0 of patch 1
1099 * ...
1100 *
1101 * Note that every attribute has 4 components.
1102 */
1103 static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
1104 LLVMValueRef vertex_index,
1105 LLVMValueRef param_index)
1106 {
1107 LLVMValueRef base_addr, vertices_per_patch, num_patches;
1108 LLVMValueRef param_stride, constant16;
1109 LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
1110
1111 vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch, false);
1112 num_patches = ac_unpack_param(&ctx->ac, ctx->tcs_offchip_layout, 0, 9);
1113
1114 constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
1115 if (vertex_index) {
1116 base_addr = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
1117 vertices_per_patch, "");
1118
1119 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1120 vertex_index, "");
1121
1122 param_stride = LLVMBuildMul(ctx->ac.builder, vertices_per_patch,
1123 num_patches, "");
1124 } else {
1125 base_addr = rel_patch_id;
1126 param_stride = num_patches;
1127 }
1128
1129 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1130 LLVMBuildMul(ctx->ac.builder, param_index,
1131 param_stride, ""), "");
1132
1133 base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");
1134
1135 if (!vertex_index) {
1136 LLVMValueRef patch_data_offset =
1137 ac_unpack_param(&ctx->ac, ctx->tcs_offchip_layout, 16, 16);
1138
1139 base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
1140 patch_data_offset, "");
1141 }
1142 return base_addr;
1143 }
1144
1145 static LLVMValueRef get_tcs_tes_buffer_address_params(struct radv_shader_context *ctx,
1146 unsigned param,
1147 unsigned const_index,
1148 bool is_compact,
1149 LLVMValueRef vertex_index,
1150 LLVMValueRef indir_index)
1151 {
1152 LLVMValueRef param_index;
1153
1154 if (indir_index)
1155 param_index = LLVMBuildAdd(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, param, false),
1156 indir_index, "");
1157 else {
1158 if (const_index && !is_compact)
1159 param += const_index;
1160 param_index = LLVMConstInt(ctx->ac.i32, param, false);
1161 }
1162 return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
1163 }
1164
1165 static void
1166 mark_tess_output(struct radv_shader_context *ctx,
1167 bool is_patch, uint32_t param, int num_slots)
1168
1169 {
1170 uint64_t slot_mask = (1ull << num_slots) - 1;
1171 if (is_patch) {
1172 ctx->tess_patch_outputs_written |= (slot_mask << param);
1173 } else
1174 ctx->tess_outputs_written |= (slot_mask << param);
1175 }
1176
1177 static LLVMValueRef
1178 get_dw_address(struct radv_shader_context *ctx,
1179 LLVMValueRef dw_addr,
1180 unsigned param,
1181 unsigned const_index,
1182 bool compact_const_index,
1183 LLVMValueRef vertex_index,
1184 LLVMValueRef stride,
1185 LLVMValueRef indir_index)
1186
1187 {
1188
1189 if (vertex_index) {
1190 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1191 LLVMBuildMul(ctx->ac.builder,
1192 vertex_index,
1193 stride, ""), "");
1194 }
1195
1196 if (indir_index)
1197 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1198 LLVMBuildMul(ctx->ac.builder, indir_index,
1199 LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
1200 else if (const_index && !compact_const_index)
1201 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1202 LLVMConstInt(ctx->ac.i32, const_index * 4, false), "");
1203
1204 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1205 LLVMConstInt(ctx->ac.i32, param * 4, false), "");
1206
1207 if (const_index && compact_const_index)
1208 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1209 LLVMConstInt(ctx->ac.i32, const_index, false), "");
1210 return dw_addr;
1211 }
1212
1213 static LLVMValueRef
1214 load_tcs_varyings(struct ac_shader_abi *abi,
1215 LLVMTypeRef type,
1216 LLVMValueRef vertex_index,
1217 LLVMValueRef indir_index,
1218 unsigned const_index,
1219 unsigned location,
1220 unsigned driver_location,
1221 unsigned component,
1222 unsigned num_components,
1223 bool is_patch,
1224 bool is_compact,
1225 bool load_input)
1226 {
1227 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1228 LLVMValueRef dw_addr, stride;
1229 LLVMValueRef value[4], result;
1230 unsigned param = shader_io_get_unique_index(location);
1231
1232 if (load_input) {
1233 uint32_t input_vertex_size = (ctx->tcs_num_inputs * 16) / 4;
1234 stride = LLVMConstInt(ctx->ac.i32, input_vertex_size, false);
1235 dw_addr = get_tcs_in_current_patch_offset(ctx);
1236 } else {
1237 if (!is_patch) {
1238 stride = get_tcs_out_vertex_stride(ctx);
1239 dw_addr = get_tcs_out_current_patch_offset(ctx);
1240 } else {
1241 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1242 stride = NULL;
1243 }
1244 }
1245
1246 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1247 indir_index);
1248
1249 for (unsigned i = 0; i < num_components + component; i++) {
1250 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1251 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1252 ctx->ac.i32_1, "");
1253 }
1254 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1255 return result;
1256 }
1257
1258 static void
1259 store_tcs_output(struct ac_shader_abi *abi,
1260 const nir_variable *var,
1261 LLVMValueRef vertex_index,
1262 LLVMValueRef param_index,
1263 unsigned const_index,
1264 LLVMValueRef src,
1265 unsigned writemask)
1266 {
1267 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1268 const unsigned location = var->data.location;
1269 const unsigned component = var->data.location_frac;
1270 const bool is_patch = var->data.patch;
1271 const bool is_compact = var->data.compact;
1272 const unsigned count = glsl_count_attribute_slots(var->type, false);
1273 LLVMValueRef dw_addr;
1274 LLVMValueRef stride = NULL;
1275 LLVMValueRef buf_addr = NULL;
1276 unsigned param;
1277 bool store_lds = true;
1278
1279 if (is_patch) {
1280 if (!(ctx->tcs_patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
1281 store_lds = false;
1282 } else {
1283 if (!(ctx->tcs_outputs_read & (1ULL << location)))
1284 store_lds = false;
1285 }
1286
1287 param = shader_io_get_unique_index(location);
1288 if (location == VARYING_SLOT_CLIP_DIST0 &&
1289 is_compact && const_index > 3) {
1290 const_index -= 3;
1291 param++;
1292 }
1293
1294 if (!is_patch) {
1295 stride = get_tcs_out_vertex_stride(ctx);
1296 dw_addr = get_tcs_out_current_patch_offset(ctx);
1297 } else {
1298 dw_addr = get_tcs_out_current_patch_data_offset(ctx);
1299 }
1300
1301 if (param_index)
1302 mark_tess_output(ctx, is_patch, param, count);
1303 else
1304 mark_tess_output(ctx, is_patch, param, 1);
1305
1306 dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
1307 param_index);
1308 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
1309 vertex_index, param_index);
1310
1311 bool is_tess_factor = false;
1312 if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
1313 location == VARYING_SLOT_TESS_LEVEL_OUTER)
1314 is_tess_factor = true;
1315
1316 unsigned base = is_compact ? const_index : 0;
1317 for (unsigned chan = 0; chan < 8; chan++) {
1318 if (!(writemask & (1 << chan)))
1319 continue;
1320 LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
1321
1322 if (store_lds || is_tess_factor) {
1323 LLVMValueRef dw_addr_chan =
1324 LLVMBuildAdd(ctx->ac.builder, dw_addr,
1325 LLVMConstInt(ctx->ac.i32, chan, false), "");
1326 ac_lds_store(&ctx->ac, dw_addr_chan, value);
1327 }
1328
1329 if (!is_tess_factor && writemask != 0xF)
1330 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
1331 buf_addr, ctx->oc_lds,
1332 4 * (base + chan), 1, 0, true, false);
1333 }
1334
1335 if (writemask == 0xF) {
1336 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
1337 buf_addr, ctx->oc_lds,
1338 (base * 4), 1, 0, true, false);
1339 }
1340 }
1341
1342 static LLVMValueRef
1343 load_tes_input(struct ac_shader_abi *abi,
1344 LLVMTypeRef type,
1345 LLVMValueRef vertex_index,
1346 LLVMValueRef param_index,
1347 unsigned const_index,
1348 unsigned location,
1349 unsigned driver_location,
1350 unsigned component,
1351 unsigned num_components,
1352 bool is_patch,
1353 bool is_compact,
1354 bool load_input)
1355 {
1356 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1357 LLVMValueRef buf_addr;
1358 LLVMValueRef result;
1359 unsigned param = shader_io_get_unique_index(location);
1360
1361 if (location == VARYING_SLOT_CLIP_DIST0 && is_compact && const_index > 3) {
1362 const_index -= 3;
1363 param++;
1364 }
1365
1366 buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
1367 is_compact, vertex_index, param_index);
1368
1369 LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, component * 4, false);
1370 buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");
1371
1372 result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
1373 buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
1374 result = ac_trim_vector(&ctx->ac, result, num_components);
1375 return result;
1376 }
1377
1378 static LLVMValueRef
1379 load_gs_input(struct ac_shader_abi *abi,
1380 unsigned location,
1381 unsigned driver_location,
1382 unsigned component,
1383 unsigned num_components,
1384 unsigned vertex_index,
1385 unsigned const_index,
1386 LLVMTypeRef type)
1387 {
1388 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1389 LLVMValueRef vtx_offset;
1390 unsigned param, vtx_offset_param;
1391 LLVMValueRef value[4], result;
1392
1393 vtx_offset_param = vertex_index;
1394 assert(vtx_offset_param < 6);
1395 vtx_offset = LLVMBuildMul(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
1396 LLVMConstInt(ctx->ac.i32, 4, false), "");
1397
1398 param = shader_io_get_unique_index(location);
1399
1400 for (unsigned i = component; i < num_components + component; i++) {
1401 if (ctx->ac.chip_class >= GFX9) {
1402 LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
1403 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
1404 LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
1405 value[i] = ac_lds_load(&ctx->ac, dw_addr);
1406 } else {
1407 LLVMValueRef soffset =
1408 LLVMConstInt(ctx->ac.i32,
1409 (param * 4 + i + const_index) * 256,
1410 false);
1411
1412 value[i] = ac_build_buffer_load(&ctx->ac,
1413 ctx->esgs_ring, 1,
1414 ctx->ac.i32_0,
1415 vtx_offset, soffset,
1416 0, 1, 0, true, false);
1417
1418 value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i],
1419 type, "");
1420 }
1421 }
1422 result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
1423 result = ac_to_integer(&ctx->ac, result);
1424 return result;
1425 }
1426
1427
1428 static void radv_emit_kill(struct ac_shader_abi *abi, LLVMValueRef visible)
1429 {
1430 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1431 ac_build_kill_if_false(&ctx->ac, visible);
1432 }
1433
1434 static LLVMValueRef lookup_interp_param(struct ac_shader_abi *abi,
1435 enum glsl_interp_mode interp, unsigned location)
1436 {
1437 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1438
1439 switch (interp) {
1440 case INTERP_MODE_FLAT:
1441 default:
1442 return NULL;
1443 case INTERP_MODE_SMOOTH:
1444 case INTERP_MODE_NONE:
1445 if (location == INTERP_CENTER)
1446 return ctx->persp_center;
1447 else if (location == INTERP_CENTROID)
1448 return ctx->persp_centroid;
1449 else if (location == INTERP_SAMPLE)
1450 return ctx->persp_sample;
1451 break;
1452 case INTERP_MODE_NOPERSPECTIVE:
1453 if (location == INTERP_CENTER)
1454 return ctx->linear_center;
1455 else if (location == INTERP_CENTROID)
1456 return ctx->linear_centroid;
1457 else if (location == INTERP_SAMPLE)
1458 return ctx->linear_sample;
1459 break;
1460 }
1461 return NULL;
1462 }
1463
1464 static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
1465 LLVMValueRef sample_id)
1466 {
1467 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1468
1469 LLVMValueRef result;
1470 LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
1471
1472 ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
1473 ac_array_in_const_addr_space(ctx->ac.v2f32), "");
1474
1475 sample_id = LLVMBuildAdd(ctx->ac.builder, sample_id, ctx->sample_pos_offset, "");
1476 result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);
1477
1478 return result;
1479 }
1480
1481
1482 static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
1483 {
1484 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1485 uint8_t log2_ps_iter_samples = ctx->shader_info->info.ps.force_persample ?
1486 ctx->options->key.fs.log2_num_samples :
1487 ctx->options->key.fs.log2_ps_iter_samples;
1488
1489 /* The bit pattern matches that used by fixed function fragment
1490 * processing. */
1491 static const uint16_t ps_iter_masks[] = {
1492 0xffff, /* not used */
1493 0x5555,
1494 0x1111,
1495 0x0101,
1496 0x0001,
1497 };
1498 assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
1499
1500 uint32_t ps_iter_mask = ps_iter_masks[log2_ps_iter_samples];
1501
1502 LLVMValueRef result, sample_id;
1503 sample_id = ac_unpack_param(&ctx->ac, abi->ancillary, 8, 4);
1504 sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, ps_iter_mask, false), sample_id, "");
1505 result = LLVMBuildAnd(ctx->ac.builder, sample_id, abi->sample_coverage, "");
1506 return result;
1507 }
1508
1509
1510 static void
1511 visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
1512 {
1513 LLVMValueRef gs_next_vertex;
1514 LLVMValueRef can_emit;
1515 int idx;
1516 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1517
1518 assert(stream == 0);
1519
1520 /* Write vertex attribute values to GSVS ring */
1521 gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
1522 ctx->gs_next_vertex,
1523 "");
1524
1525 /* If this thread has already emitted the declared maximum number of
1526 * vertices, kill it: excessive vertex emissions are not supposed to
1527 * have any effect, and GS threads have no externally observable
1528 * effects other than emitting vertices.
1529 */
1530 can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
1531 LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
1532 ac_build_kill_if_false(&ctx->ac, can_emit);
1533
1534 /* loop num outputs */
1535 idx = 0;
1536 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
1537 LLVMValueRef *out_ptr = &addrs[i * 4];
1538 int length = 4;
1539 int slot = idx;
1540 int slot_inc = 1;
1541
1542 if (!(ctx->output_mask & (1ull << i)))
1543 continue;
1544
1545 if (i == VARYING_SLOT_CLIP_DIST0) {
1546 /* pack clip and cull into a single set of slots */
1547 length = ctx->num_output_clips + ctx->num_output_culls;
1548 if (length > 4)
1549 slot_inc = 2;
1550 }
1551 for (unsigned j = 0; j < length; j++) {
1552 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
1553 out_ptr[j], "");
1554 LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
1555 voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
1556 voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
1557
1558 out_val = LLVMBuildBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
1559
1560 ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
1561 out_val, 1,
1562 voffset, ctx->gs2vs_offset, 0,
1563 1, 1, true, true);
1564 }
1565 idx += slot_inc;
1566 }
1567
1568 gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
1569 ctx->ac.i32_1, "");
1570 LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex);
1571
1572 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
1573 }
1574
1575 static void
1576 visit_end_primitive(struct ac_shader_abi *abi, unsigned stream)
1577 {
1578 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1579 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8), ctx->gs_wave_id);
1580 }
1581
1582 static LLVMValueRef
1583 load_tess_coord(struct ac_shader_abi *abi)
1584 {
1585 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1586
1587 LLVMValueRef coord[4] = {
1588 ctx->tes_u,
1589 ctx->tes_v,
1590 ctx->ac.f32_0,
1591 ctx->ac.f32_0,
1592 };
1593
1594 if (ctx->tes_primitive_mode == GL_TRIANGLES)
1595 coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
1596 LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
1597
1598 return ac_build_gather_values(&ctx->ac, coord, 3);
1599 }
1600
1601 static LLVMValueRef
1602 load_patch_vertices_in(struct ac_shader_abi *abi)
1603 {
1604 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1605 return LLVMConstInt(ctx->ac.i32, ctx->options->key.tcs.input_vertices, false);
1606 }
1607
1608
1609 static LLVMValueRef radv_load_base_vertex(struct ac_shader_abi *abi)
1610 {
1611 return abi->base_vertex;
1612 }
1613
1614 static LLVMValueRef radv_load_ssbo(struct ac_shader_abi *abi,
1615 LLVMValueRef buffer_ptr, bool write)
1616 {
1617 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1618 LLVMValueRef result;
1619
1620 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1621
1622 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1623 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1624
1625 return result;
1626 }
1627
1628 static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi, LLVMValueRef buffer_ptr)
1629 {
1630 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1631 LLVMValueRef result;
1632
1633 LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
1634
1635 result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
1636 LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);
1637
1638 return result;
1639 }
1640
1641 static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
1642 unsigned descriptor_set,
1643 unsigned base_index,
1644 unsigned constant_index,
1645 LLVMValueRef index,
1646 enum ac_descriptor_type desc_type,
1647 bool image, bool write)
1648 {
1649 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1650 LLVMValueRef list = ctx->descriptor_sets[descriptor_set];
1651 struct radv_descriptor_set_layout *layout = ctx->options->layout->set[descriptor_set].layout;
1652 struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
1653 unsigned offset = binding->offset;
1654 unsigned stride = binding->size;
1655 unsigned type_size;
1656 LLVMBuilderRef builder = ctx->ac.builder;
1657 LLVMTypeRef type;
1658
1659 assert(base_index < layout->binding_count);
1660
1661 switch (desc_type) {
1662 case AC_DESC_IMAGE:
1663 type = ctx->ac.v8i32;
1664 type_size = 32;
1665 break;
1666 case AC_DESC_FMASK:
1667 type = ctx->ac.v8i32;
1668 offset += 32;
1669 type_size = 32;
1670 break;
1671 case AC_DESC_SAMPLER:
1672 type = ctx->ac.v4i32;
1673 if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1674 offset += 64;
1675
1676 type_size = 16;
1677 break;
1678 case AC_DESC_BUFFER:
1679 type = ctx->ac.v4i32;
1680 type_size = 16;
1681 break;
1682 default:
1683 unreachable("invalid desc_type\n");
1684 }
1685
1686 offset += constant_index * stride;
1687
1688 if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
1689 (!index || binding->immutable_samplers_equal)) {
1690 if (binding->immutable_samplers_equal)
1691 constant_index = 0;
1692
1693 const uint32_t *samplers = radv_immutable_samplers(layout, binding);
1694
1695 LLVMValueRef constants[] = {
1696 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
1697 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
1698 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
1699 LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
1700 };
1701 return ac_build_gather_values(&ctx->ac, constants, 4);
1702 }
1703
1704 assert(stride % type_size == 0);
1705
1706 if (!index)
1707 index = ctx->ac.i32_0;
1708
1709 index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
1710
1711 list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
1712 list = LLVMBuildPointerCast(builder, list, ac_array_in_const_addr_space(type), "");
1713
1714 return ac_build_load_to_sgpr(&ctx->ac, list, index);
1715 }
1716
1717
1718 static void
1719 handle_vs_input_decl(struct radv_shader_context *ctx,
1720 struct nir_variable *variable)
1721 {
1722 LLVMValueRef t_list_ptr = ctx->vertex_buffers;
1723 LLVMValueRef t_offset;
1724 LLVMValueRef t_list;
1725 LLVMValueRef input;
1726 LLVMValueRef buffer_index;
1727 int index = variable->data.location - VERT_ATTRIB_GENERIC0;
1728 int idx = variable->data.location;
1729 unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
1730 uint8_t input_usage_mask =
1731 ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
1732 unsigned num_channels = util_last_bit(input_usage_mask);
1733
1734 variable->data.driver_location = idx * 4;
1735
1736 for (unsigned i = 0; i < attrib_count; ++i, ++idx) {
1737 if (ctx->options->key.vs.instance_rate_inputs & (1u << (index + i))) {
1738 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.instance_id,
1739 ctx->abi.start_instance, "");
1740 if (ctx->options->key.vs.as_ls) {
1741 ctx->shader_info->vs.vgpr_comp_cnt =
1742 MAX2(2, ctx->shader_info->vs.vgpr_comp_cnt);
1743 } else {
1744 ctx->shader_info->vs.vgpr_comp_cnt =
1745 MAX2(1, ctx->shader_info->vs.vgpr_comp_cnt);
1746 }
1747 } else
1748 buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.vertex_id,
1749 ctx->abi.base_vertex, "");
1750 t_offset = LLVMConstInt(ctx->ac.i32, index + i, false);
1751
1752 t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
1753
1754 input = ac_build_buffer_load_format(&ctx->ac, t_list,
1755 buffer_index,
1756 ctx->ac.i32_0,
1757 num_channels, false, true);
1758
1759 input = ac_build_expand_to_vec4(&ctx->ac, input, num_channels);
1760
1761 for (unsigned chan = 0; chan < 4; chan++) {
1762 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
1763 ctx->inputs[ac_llvm_reg_index_soa(idx, chan)] =
1764 ac_to_integer(&ctx->ac, LLVMBuildExtractElement(ctx->ac.builder,
1765 input, llvm_chan, ""));
1766 }
1767 }
1768 }
1769
1770 static void interp_fs_input(struct radv_shader_context *ctx,
1771 unsigned attr,
1772 LLVMValueRef interp_param,
1773 LLVMValueRef prim_mask,
1774 LLVMValueRef result[4])
1775 {
1776 LLVMValueRef attr_number;
1777 unsigned chan;
1778 LLVMValueRef i, j;
1779 bool interp = interp_param != NULL;
1780
1781 attr_number = LLVMConstInt(ctx->ac.i32, attr, false);
1782
1783 /* fs.constant returns the param from the middle vertex, so it's not
1784 * really useful for flat shading. It's meant to be used for custom
1785 * interpolation (but the intrinsic can't fetch from the other two
1786 * vertices).
1787 *
1788 * Luckily, it doesn't matter, because we rely on the FLAT_SHADE state
1789 * to do the right thing. The only reason we use fs.constant is that
1790 * fs.interp cannot be used on integers, because they can be equal
1791 * to NaN.
1792 */
1793 if (interp) {
1794 interp_param = LLVMBuildBitCast(ctx->ac.builder, interp_param,
1795 ctx->ac.v2f32, "");
1796
1797 i = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1798 ctx->ac.i32_0, "");
1799 j = LLVMBuildExtractElement(ctx->ac.builder, interp_param,
1800 ctx->ac.i32_1, "");
1801 }
1802
1803 for (chan = 0; chan < 4; chan++) {
1804 LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
1805
1806 if (interp) {
1807 result[chan] = ac_build_fs_interp(&ctx->ac,
1808 llvm_chan,
1809 attr_number,
1810 prim_mask, i, j);
1811 } else {
1812 result[chan] = ac_build_fs_interp_mov(&ctx->ac,
1813 LLVMConstInt(ctx->ac.i32, 2, false),
1814 llvm_chan,
1815 attr_number,
1816 prim_mask);
1817 }
1818 }
1819 }
1820
1821 static void
1822 handle_fs_input_decl(struct radv_shader_context *ctx,
1823 struct nir_variable *variable)
1824 {
1825 int idx = variable->data.location;
1826 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
1827 LLVMValueRef interp;
1828
1829 variable->data.driver_location = idx * 4;
1830 ctx->input_mask |= ((1ull << attrib_count) - 1) << variable->data.location;
1831
1832 if (glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT) {
1833 unsigned interp_type;
1834 if (variable->data.sample)
1835 interp_type = INTERP_SAMPLE;
1836 else if (variable->data.centroid)
1837 interp_type = INTERP_CENTROID;
1838 else
1839 interp_type = INTERP_CENTER;
1840
1841 interp = lookup_interp_param(&ctx->abi, variable->data.interpolation, interp_type);
1842 } else
1843 interp = NULL;
1844
1845 for (unsigned i = 0; i < attrib_count; ++i)
1846 ctx->inputs[ac_llvm_reg_index_soa(idx + i, 0)] = interp;
1847
1848 }
1849
1850 static void
1851 handle_vs_inputs(struct radv_shader_context *ctx,
1852 struct nir_shader *nir) {
1853 nir_foreach_variable(variable, &nir->inputs)
1854 handle_vs_input_decl(ctx, variable);
1855 }
1856
1857 static void
1858 prepare_interp_optimize(struct radv_shader_context *ctx,
1859 struct nir_shader *nir)
1860 {
1861 if (!ctx->options->key.fs.multisample)
1862 return;
1863
1864 bool uses_center = false;
1865 bool uses_centroid = false;
1866 nir_foreach_variable(variable, &nir->inputs) {
1867 if (glsl_get_base_type(glsl_without_array(variable->type)) != GLSL_TYPE_FLOAT ||
1868 variable->data.sample)
1869 continue;
1870
1871 if (variable->data.centroid)
1872 uses_centroid = true;
1873 else
1874 uses_center = true;
1875 }
1876
1877 if (uses_center && uses_centroid) {
1878 LLVMValueRef sel = LLVMBuildICmp(ctx->ac.builder, LLVMIntSLT, ctx->abi.prim_mask, ctx->ac.i32_0, "");
1879 ctx->persp_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->persp_center, ctx->persp_centroid, "");
1880 ctx->linear_centroid = LLVMBuildSelect(ctx->ac.builder, sel, ctx->linear_center, ctx->linear_centroid, "");
1881 }
1882 }
1883
1884 static void
1885 handle_fs_inputs(struct radv_shader_context *ctx,
1886 struct nir_shader *nir)
1887 {
1888 prepare_interp_optimize(ctx, nir);
1889
1890 nir_foreach_variable(variable, &nir->inputs)
1891 handle_fs_input_decl(ctx, variable);
1892
1893 unsigned index = 0;
1894
1895 if (ctx->shader_info->info.ps.uses_input_attachments ||
1896 ctx->shader_info->info.needs_multiview_view_index)
1897 ctx->input_mask |= 1ull << VARYING_SLOT_LAYER;
1898
1899 for (unsigned i = 0; i < RADEON_LLVM_MAX_INPUTS; ++i) {
1900 LLVMValueRef interp_param;
1901 LLVMValueRef *inputs = ctx->inputs +ac_llvm_reg_index_soa(i, 0);
1902
1903 if (!(ctx->input_mask & (1ull << i)))
1904 continue;
1905
1906 if (i >= VARYING_SLOT_VAR0 || i == VARYING_SLOT_PNTC ||
1907 i == VARYING_SLOT_PRIMITIVE_ID || i == VARYING_SLOT_LAYER) {
1908 interp_param = *inputs;
1909 interp_fs_input(ctx, index, interp_param, ctx->abi.prim_mask,
1910 inputs);
1911
1912 if (!interp_param)
1913 ctx->shader_info->fs.flat_shaded_mask |= 1u << index;
1914 ++index;
1915 } else if (i == VARYING_SLOT_POS) {
1916 for(int i = 0; i < 3; ++i)
1917 inputs[i] = ctx->abi.frag_pos[i];
1918
1919 inputs[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
1920 ctx->abi.frag_pos[3]);
1921 }
1922 }
1923 ctx->shader_info->fs.num_interp = index;
1924 ctx->shader_info->fs.input_mask = ctx->input_mask >> VARYING_SLOT_VAR0;
1925
1926 if (ctx->shader_info->info.needs_multiview_view_index)
1927 ctx->abi.view_index = ctx->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
1928 }
1929
1930 static void
1931 scan_shader_output_decl(struct radv_shader_context *ctx,
1932 struct nir_variable *variable,
1933 struct nir_shader *shader,
1934 gl_shader_stage stage)
1935 {
1936 int idx = variable->data.location + variable->data.index;
1937 unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
1938 uint64_t mask_attribs;
1939
1940 variable->data.driver_location = idx * 4;
1941
1942 /* tess ctrl has it's own load/store paths for outputs */
1943 if (stage == MESA_SHADER_TESS_CTRL)
1944 return;
1945
1946 mask_attribs = ((1ull << attrib_count) - 1) << idx;
1947 if (stage == MESA_SHADER_VERTEX ||
1948 stage == MESA_SHADER_TESS_EVAL ||
1949 stage == MESA_SHADER_GEOMETRY) {
1950 if (idx == VARYING_SLOT_CLIP_DIST0) {
1951 int length = shader->info.clip_distance_array_size +
1952 shader->info.cull_distance_array_size;
1953 if (stage == MESA_SHADER_VERTEX) {
1954 ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
1955 ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
1956 }
1957 if (stage == MESA_SHADER_TESS_EVAL) {
1958 ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
1959 ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
1960 }
1961
1962 if (length > 4)
1963 attrib_count = 2;
1964 else
1965 attrib_count = 1;
1966 mask_attribs = 1ull << idx;
1967 }
1968 }
1969
1970 ctx->output_mask |= mask_attribs;
1971 }
1972
1973
1974 /* Initialize arguments for the shader export intrinsic */
1975 static void
1976 si_llvm_init_export_args(struct radv_shader_context *ctx,
1977 LLVMValueRef *values,
1978 unsigned enabled_channels,
1979 unsigned target,
1980 struct ac_export_args *args)
1981 {
1982 /* Specify the channels that are enabled. */
1983 args->enabled_channels = enabled_channels;
1984
1985 /* Specify whether the EXEC mask represents the valid mask */
1986 args->valid_mask = 0;
1987
1988 /* Specify whether this is the last export */
1989 args->done = 0;
1990
1991 /* Specify the target we are exporting */
1992 args->target = target;
1993
1994 args->compr = false;
1995 args->out[0] = LLVMGetUndef(ctx->ac.f32);
1996 args->out[1] = LLVMGetUndef(ctx->ac.f32);
1997 args->out[2] = LLVMGetUndef(ctx->ac.f32);
1998 args->out[3] = LLVMGetUndef(ctx->ac.f32);
1999
2000 if (ctx->stage == MESA_SHADER_FRAGMENT && target >= V_008DFC_SQ_EXP_MRT) {
2001 unsigned index = target - V_008DFC_SQ_EXP_MRT;
2002 unsigned col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
2003 bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
2004 bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1;
2005 unsigned chan;
2006
2007 LLVMValueRef (*packf)(struct ac_llvm_context *ctx, LLVMValueRef args[2]) = NULL;
2008 LLVMValueRef (*packi)(struct ac_llvm_context *ctx, LLVMValueRef args[2],
2009 unsigned bits, bool hi) = NULL;
2010
2011 switch(col_format) {
2012 case V_028714_SPI_SHADER_ZERO:
2013 args->enabled_channels = 0; /* writemask */
2014 args->target = V_008DFC_SQ_EXP_NULL;
2015 break;
2016
2017 case V_028714_SPI_SHADER_32_R:
2018 args->enabled_channels = 1;
2019 args->out[0] = values[0];
2020 break;
2021
2022 case V_028714_SPI_SHADER_32_GR:
2023 args->enabled_channels = 0x3;
2024 args->out[0] = values[0];
2025 args->out[1] = values[1];
2026 break;
2027
2028 case V_028714_SPI_SHADER_32_AR:
2029 args->enabled_channels = 0x9;
2030 args->out[0] = values[0];
2031 args->out[3] = values[3];
2032 break;
2033
2034 case V_028714_SPI_SHADER_FP16_ABGR:
2035 args->enabled_channels = 0x5;
2036 packf = ac_build_cvt_pkrtz_f16;
2037 break;
2038
2039 case V_028714_SPI_SHADER_UNORM16_ABGR:
2040 args->enabled_channels = 0x5;
2041 packf = ac_build_cvt_pknorm_u16;
2042 break;
2043
2044 case V_028714_SPI_SHADER_SNORM16_ABGR:
2045 args->enabled_channels = 0x5;
2046 packf = ac_build_cvt_pknorm_i16;
2047 break;
2048
2049 case V_028714_SPI_SHADER_UINT16_ABGR:
2050 args->enabled_channels = 0x5;
2051 packi = ac_build_cvt_pk_u16;
2052 break;
2053
2054 case V_028714_SPI_SHADER_SINT16_ABGR:
2055 args->enabled_channels = 0x5;
2056 packi = ac_build_cvt_pk_i16;
2057 break;
2058
2059 default:
2060 case V_028714_SPI_SHADER_32_ABGR:
2061 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2062 break;
2063 }
2064
2065 /* Pack f16 or norm_i16/u16. */
2066 if (packf) {
2067 for (chan = 0; chan < 2; chan++) {
2068 LLVMValueRef pack_args[2] = {
2069 values[2 * chan],
2070 values[2 * chan + 1]
2071 };
2072 LLVMValueRef packed;
2073
2074 packed = packf(&ctx->ac, pack_args);
2075 args->out[chan] = ac_to_float(&ctx->ac, packed);
2076 }
2077 args->compr = 1; /* COMPR flag */
2078 }
2079
2080 /* Pack i16/u16. */
2081 if (packi) {
2082 for (chan = 0; chan < 2; chan++) {
2083 LLVMValueRef pack_args[2] = {
2084 ac_to_integer(&ctx->ac, values[2 * chan]),
2085 ac_to_integer(&ctx->ac, values[2 * chan + 1])
2086 };
2087 LLVMValueRef packed;
2088
2089 packed = packi(&ctx->ac, pack_args,
2090 is_int8 ? 8 : is_int10 ? 10 : 16,
2091 chan == 1);
2092 args->out[chan] = ac_to_float(&ctx->ac, packed);
2093 }
2094 args->compr = 1; /* COMPR flag */
2095 }
2096 return;
2097 }
2098
2099 memcpy(&args->out[0], values, sizeof(values[0]) * 4);
2100
2101 for (unsigned i = 0; i < 4; ++i) {
2102 if (!(args->enabled_channels & (1 << i)))
2103 continue;
2104
2105 args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
2106 }
2107 }
2108
2109 static void
2110 radv_export_param(struct radv_shader_context *ctx, unsigned index,
2111 LLVMValueRef *values, unsigned enabled_channels)
2112 {
2113 struct ac_export_args args;
2114
2115 si_llvm_init_export_args(ctx, values, enabled_channels,
2116 V_008DFC_SQ_EXP_PARAM + index, &args);
2117 ac_build_export(&ctx->ac, &args);
2118 }
2119
2120 static LLVMValueRef
2121 radv_load_output(struct radv_shader_context *ctx, unsigned index, unsigned chan)
2122 {
2123 LLVMValueRef output =
2124 ctx->abi.outputs[ac_llvm_reg_index_soa(index, chan)];
2125
2126 return LLVMBuildLoad(ctx->ac.builder, output, "");
2127 }
2128
2129 static void
2130 handle_vs_outputs_post(struct radv_shader_context *ctx,
2131 bool export_prim_id,
2132 struct radv_vs_output_info *outinfo)
2133 {
2134 uint32_t param_count = 0;
2135 unsigned target;
2136 unsigned pos_idx, num_pos_exports = 0;
2137 struct ac_export_args args, pos_args[4] = {};
2138 LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_index_value = NULL;
2139 int i;
2140
2141 if (ctx->options->key.has_multiview_view_index) {
2142 LLVMValueRef* tmp_out = &ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
2143 if(!*tmp_out) {
2144 for(unsigned i = 0; i < 4; ++i)
2145 ctx->abi.outputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, i)] =
2146 ac_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
2147 }
2148
2149 LLVMBuildStore(ctx->ac.builder, ac_to_float(&ctx->ac, ctx->abi.view_index), *tmp_out);
2150 ctx->output_mask |= 1ull << VARYING_SLOT_LAYER;
2151 }
2152
2153 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
2154 sizeof(outinfo->vs_output_param_offset));
2155
2156 if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
2157 LLVMValueRef slots[8];
2158 unsigned j;
2159
2160 if (outinfo->cull_dist_mask)
2161 outinfo->cull_dist_mask <<= ctx->num_output_clips;
2162
2163 i = VARYING_SLOT_CLIP_DIST0;
2164 for (j = 0; j < ctx->num_output_clips + ctx->num_output_culls; j++)
2165 slots[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2166
2167 for (i = ctx->num_output_clips + ctx->num_output_culls; i < 8; i++)
2168 slots[i] = LLVMGetUndef(ctx->ac.f32);
2169
2170 if (ctx->num_output_clips + ctx->num_output_culls > 4) {
2171 target = V_008DFC_SQ_EXP_POS + 3;
2172 si_llvm_init_export_args(ctx, &slots[4], 0xf, target, &args);
2173 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2174 &args, sizeof(args));
2175 }
2176
2177 target = V_008DFC_SQ_EXP_POS + 2;
2178 si_llvm_init_export_args(ctx, &slots[0], 0xf, target, &args);
2179 memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
2180 &args, sizeof(args));
2181
2182 }
2183
2184 LLVMValueRef pos_values[4] = {ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_1};
2185 if (ctx->output_mask & (1ull << VARYING_SLOT_POS)) {
2186 for (unsigned j = 0; j < 4; j++)
2187 pos_values[j] = radv_load_output(ctx, VARYING_SLOT_POS, j);
2188 }
2189 si_llvm_init_export_args(ctx, pos_values, 0xf, V_008DFC_SQ_EXP_POS, &pos_args[0]);
2190
2191 if (ctx->output_mask & (1ull << VARYING_SLOT_PSIZ)) {
2192 outinfo->writes_pointsize = true;
2193 psize_value = radv_load_output(ctx, VARYING_SLOT_PSIZ, 0);
2194 }
2195
2196 if (ctx->output_mask & (1ull << VARYING_SLOT_LAYER)) {
2197 outinfo->writes_layer = true;
2198 layer_value = radv_load_output(ctx, VARYING_SLOT_LAYER, 0);
2199 }
2200
2201 if (ctx->output_mask & (1ull << VARYING_SLOT_VIEWPORT)) {
2202 outinfo->writes_viewport_index = true;
2203 viewport_index_value = radv_load_output(ctx, VARYING_SLOT_VIEWPORT, 0);
2204 }
2205
2206 if (outinfo->writes_pointsize ||
2207 outinfo->writes_layer ||
2208 outinfo->writes_viewport_index) {
2209 pos_args[1].enabled_channels = ((outinfo->writes_pointsize == true ? 1 : 0) |
2210 (outinfo->writes_layer == true ? 4 : 0));
2211 pos_args[1].valid_mask = 0;
2212 pos_args[1].done = 0;
2213 pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
2214 pos_args[1].compr = 0;
2215 pos_args[1].out[0] = ctx->ac.f32_0; /* X */
2216 pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
2217 pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
2218 pos_args[1].out[3] = ctx->ac.f32_0; /* W */
2219
2220 if (outinfo->writes_pointsize == true)
2221 pos_args[1].out[0] = psize_value;
2222 if (outinfo->writes_layer == true)
2223 pos_args[1].out[2] = layer_value;
2224 if (outinfo->writes_viewport_index == true) {
2225 if (ctx->options->chip_class >= GFX9) {
2226 /* GFX9 has the layer in out.z[10:0] and the viewport
2227 * index in out.z[19:16].
2228 */
2229 LLVMValueRef v = viewport_index_value;
2230 v = ac_to_integer(&ctx->ac, v);
2231 v = LLVMBuildShl(ctx->ac.builder, v,
2232 LLVMConstInt(ctx->ac.i32, 16, false),
2233 "");
2234 v = LLVMBuildOr(ctx->ac.builder, v,
2235 ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
2236
2237 pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
2238 pos_args[1].enabled_channels |= 1 << 2;
2239 } else {
2240 pos_args[1].out[3] = viewport_index_value;
2241 pos_args[1].enabled_channels |= 1 << 3;
2242 }
2243 }
2244 }
2245 for (i = 0; i < 4; i++) {
2246 if (pos_args[i].out[0])
2247 num_pos_exports++;
2248 }
2249
2250 pos_idx = 0;
2251 for (i = 0; i < 4; i++) {
2252 if (!pos_args[i].out[0])
2253 continue;
2254
2255 /* Specify the target we are exporting */
2256 pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
2257 if (pos_idx == num_pos_exports)
2258 pos_args[i].done = 1;
2259 ac_build_export(&ctx->ac, &pos_args[i]);
2260 }
2261
2262 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2263 LLVMValueRef values[4];
2264 if (!(ctx->output_mask & (1ull << i)))
2265 continue;
2266
2267 if (i != VARYING_SLOT_LAYER &&
2268 i != VARYING_SLOT_PRIMITIVE_ID &&
2269 i < VARYING_SLOT_VAR0)
2270 continue;
2271
2272 for (unsigned j = 0; j < 4; j++)
2273 values[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
2274
2275 unsigned output_usage_mask;
2276
2277 if (ctx->stage == MESA_SHADER_VERTEX &&
2278 !ctx->is_gs_copy_shader) {
2279 output_usage_mask =
2280 ctx->shader_info->info.vs.output_usage_mask[i];
2281 } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
2282 output_usage_mask =
2283 ctx->shader_info->info.tes.output_usage_mask[i];
2284 } else {
2285 /* Enable all channels for the GS copy shader because
2286 * we don't know the output usage mask currently.
2287 */
2288 output_usage_mask = 0xf;
2289 }
2290
2291 radv_export_param(ctx, param_count, values, output_usage_mask);
2292
2293 outinfo->vs_output_param_offset[i] = param_count++;
2294 }
2295
2296 if (export_prim_id) {
2297 LLVMValueRef values[4];
2298
2299 values[0] = ctx->vs_prim_id;
2300 ctx->shader_info->vs.vgpr_comp_cnt = MAX2(2,
2301 ctx->shader_info->vs.vgpr_comp_cnt);
2302 for (unsigned j = 1; j < 4; j++)
2303 values[j] = ctx->ac.f32_0;
2304
2305 radv_export_param(ctx, param_count, values, 0xf);
2306
2307 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count++;
2308 outinfo->export_prim_id = true;
2309 }
2310
2311 outinfo->pos_exports = num_pos_exports;
2312 outinfo->param_exports = param_count;
2313 }
2314
2315 static void
2316 handle_es_outputs_post(struct radv_shader_context *ctx,
2317 struct radv_es_output_info *outinfo)
2318 {
2319 int j;
2320 uint64_t max_output_written = 0;
2321 LLVMValueRef lds_base = NULL;
2322
2323 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2324 int param_index;
2325 int length = 4;
2326
2327 if (!(ctx->output_mask & (1ull << i)))
2328 continue;
2329
2330 if (i == VARYING_SLOT_CLIP_DIST0)
2331 length = ctx->num_output_clips + ctx->num_output_culls;
2332
2333 param_index = shader_io_get_unique_index(i);
2334
2335 max_output_written = MAX2(param_index + (length > 4), max_output_written);
2336 }
2337
2338 outinfo->esgs_itemsize = (max_output_written + 1) * 16;
2339
2340 if (ctx->ac.chip_class >= GFX9) {
2341 unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
2342 LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
2343 LLVMValueRef wave_idx = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2344 LLVMConstInt(ctx->ac.i32, 24, false),
2345 LLVMConstInt(ctx->ac.i32, 4, false), false);
2346 vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
2347 LLVMBuildMul(ctx->ac.builder, wave_idx,
2348 LLVMConstInt(ctx->ac.i32, 64, false), ""), "");
2349 lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
2350 LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
2351 }
2352
2353 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2354 LLVMValueRef dw_addr = NULL;
2355 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2356 int param_index;
2357 int length = 4;
2358
2359 if (!(ctx->output_mask & (1ull << i)))
2360 continue;
2361
2362 if (i == VARYING_SLOT_CLIP_DIST0)
2363 length = ctx->num_output_clips + ctx->num_output_culls;
2364
2365 param_index = shader_io_get_unique_index(i);
2366
2367 if (lds_base) {
2368 dw_addr = LLVMBuildAdd(ctx->ac.builder, lds_base,
2369 LLVMConstInt(ctx->ac.i32, param_index * 4, false),
2370 "");
2371 }
2372 for (j = 0; j < length; j++) {
2373 LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
2374 out_val = LLVMBuildBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
2375
2376 if (ctx->ac.chip_class >= GFX9) {
2377 ac_lds_store(&ctx->ac, dw_addr,
2378 LLVMBuildLoad(ctx->ac.builder, out_ptr[j], ""));
2379 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2380 } else {
2381 ac_build_buffer_store_dword(&ctx->ac,
2382 ctx->esgs_ring,
2383 out_val, 1,
2384 NULL, ctx->es2gs_offset,
2385 (4 * param_index + j) * 4,
2386 1, 1, true, true);
2387 }
2388 }
2389 }
2390 }
2391
2392 static void
2393 handle_ls_outputs_post(struct radv_shader_context *ctx)
2394 {
2395 LLVMValueRef vertex_id = ctx->rel_auto_id;
2396 uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->info.vs.ls_outputs_written);
2397 LLVMValueRef vertex_dw_stride = LLVMConstInt(ctx->ac.i32, num_tcs_inputs * 4, false);
2398 LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
2399 vertex_dw_stride, "");
2400
2401 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2402 LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
2403 int length = 4;
2404
2405 if (!(ctx->output_mask & (1ull << i)))
2406 continue;
2407
2408 if (i == VARYING_SLOT_CLIP_DIST0)
2409 length = ctx->num_output_clips + ctx->num_output_culls;
2410 int param = shader_io_get_unique_index(i);
2411 mark_tess_output(ctx, false, param, 1);
2412 if (length > 4)
2413 mark_tess_output(ctx, false, param + 1, 1);
2414 LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
2415 LLVMConstInt(ctx->ac.i32, param * 4, false),
2416 "");
2417 for (unsigned j = 0; j < length; j++) {
2418 ac_lds_store(&ctx->ac, dw_addr,
2419 LLVMBuildLoad(ctx->ac.builder, out_ptr[j], ""));
2420 dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr, ctx->ac.i32_1, "");
2421 }
2422 }
2423 }
2424
2425 struct ac_build_if_state
2426 {
2427 struct radv_shader_context *ctx;
2428 LLVMValueRef condition;
2429 LLVMBasicBlockRef entry_block;
2430 LLVMBasicBlockRef true_block;
2431 LLVMBasicBlockRef false_block;
2432 LLVMBasicBlockRef merge_block;
2433 };
2434
2435 static LLVMBasicBlockRef
2436 ac_build_insert_new_block(struct radv_shader_context *ctx, const char *name)
2437 {
2438 LLVMBasicBlockRef current_block;
2439 LLVMBasicBlockRef next_block;
2440 LLVMBasicBlockRef new_block;
2441
2442 /* get current basic block */
2443 current_block = LLVMGetInsertBlock(ctx->ac.builder);
2444
2445 /* chqeck if there's another block after this one */
2446 next_block = LLVMGetNextBasicBlock(current_block);
2447 if (next_block) {
2448 /* insert the new block before the next block */
2449 new_block = LLVMInsertBasicBlockInContext(ctx->context, next_block, name);
2450 }
2451 else {
2452 /* append new block after current block */
2453 LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
2454 new_block = LLVMAppendBasicBlockInContext(ctx->context, function, name);
2455 }
2456 return new_block;
2457 }
2458
2459 static void
2460 ac_nir_build_if(struct ac_build_if_state *ifthen,
2461 struct radv_shader_context *ctx,
2462 LLVMValueRef condition)
2463 {
2464 LLVMBasicBlockRef block = LLVMGetInsertBlock(ctx->ac.builder);
2465
2466 memset(ifthen, 0, sizeof *ifthen);
2467 ifthen->ctx = ctx;
2468 ifthen->condition = condition;
2469 ifthen->entry_block = block;
2470
2471 /* create endif/merge basic block for the phi functions */
2472 ifthen->merge_block = ac_build_insert_new_block(ctx, "endif-block");
2473
2474 /* create/insert true_block before merge_block */
2475 ifthen->true_block =
2476 LLVMInsertBasicBlockInContext(ctx->context,
2477 ifthen->merge_block,
2478 "if-true-block");
2479
2480 /* successive code goes into the true block */
2481 LLVMPositionBuilderAtEnd(ctx->ac.builder, ifthen->true_block);
2482 }
2483
2484 /**
2485 * End a conditional.
2486 */
2487 static void
2488 ac_nir_build_endif(struct ac_build_if_state *ifthen)
2489 {
2490 LLVMBuilderRef builder = ifthen->ctx->ac.builder;
2491
2492 /* Insert branch to the merge block from current block */
2493 LLVMBuildBr(builder, ifthen->merge_block);
2494
2495 /*
2496 * Now patch in the various branch instructions.
2497 */
2498
2499 /* Insert the conditional branch instruction at the end of entry_block */
2500 LLVMPositionBuilderAtEnd(builder, ifthen->entry_block);
2501 if (ifthen->false_block) {
2502 /* we have an else clause */
2503 LLVMBuildCondBr(builder, ifthen->condition,
2504 ifthen->true_block, ifthen->false_block);
2505 }
2506 else {
2507 /* no else clause */
2508 LLVMBuildCondBr(builder, ifthen->condition,
2509 ifthen->true_block, ifthen->merge_block);
2510 }
2511
2512 /* Resume building code at end of the ifthen->merge_block */
2513 LLVMPositionBuilderAtEnd(builder, ifthen->merge_block);
2514 }
2515
2516 static void
2517 write_tess_factors(struct radv_shader_context *ctx)
2518 {
2519 unsigned stride, outer_comps, inner_comps;
2520 struct ac_build_if_state if_ctx, inner_if_ctx;
2521 LLVMValueRef invocation_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 8, 5);
2522 LLVMValueRef rel_patch_id = ac_unpack_param(&ctx->ac, ctx->abi.tcs_rel_ids, 0, 8);
2523 unsigned tess_inner_index = 0, tess_outer_index;
2524 LLVMValueRef lds_base, lds_inner = NULL, lds_outer, byteoffset, buffer;
2525 LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
2526 int i;
2527 ac_emit_barrier(&ctx->ac, ctx->stage);
2528
2529 switch (ctx->options->key.tcs.primitive_mode) {
2530 case GL_ISOLINES:
2531 stride = 2;
2532 outer_comps = 2;
2533 inner_comps = 0;
2534 break;
2535 case GL_TRIANGLES:
2536 stride = 4;
2537 outer_comps = 3;
2538 inner_comps = 1;
2539 break;
2540 case GL_QUADS:
2541 stride = 6;
2542 outer_comps = 4;
2543 inner_comps = 2;
2544 break;
2545 default:
2546 return;
2547 }
2548
2549 ac_nir_build_if(&if_ctx, ctx,
2550 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2551 invocation_id, ctx->ac.i32_0, ""));
2552
2553 lds_base = get_tcs_out_current_patch_data_offset(ctx);
2554
2555 if (inner_comps) {
2556 tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2557 mark_tess_output(ctx, true, tess_inner_index, 1);
2558 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_base,
2559 LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), "");
2560 }
2561
2562 tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2563 mark_tess_output(ctx, true, tess_outer_index, 1);
2564 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_base,
2565 LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), "");
2566
2567 for (i = 0; i < 4; i++) {
2568 inner[i] = LLVMGetUndef(ctx->ac.i32);
2569 outer[i] = LLVMGetUndef(ctx->ac.i32);
2570 }
2571
2572 // LINES reverseal
2573 if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
2574 outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
2575 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2576 ctx->ac.i32_1, "");
2577 outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
2578 } else {
2579 for (i = 0; i < outer_comps; i++) {
2580 outer[i] = out[i] =
2581 ac_lds_load(&ctx->ac, lds_outer);
2582 lds_outer = LLVMBuildAdd(ctx->ac.builder, lds_outer,
2583 ctx->ac.i32_1, "");
2584 }
2585 for (i = 0; i < inner_comps; i++) {
2586 inner[i] = out[outer_comps+i] =
2587 ac_lds_load(&ctx->ac, lds_inner);
2588 lds_inner = LLVMBuildAdd(ctx->ac.builder, lds_inner,
2589 ctx->ac.i32_1, "");
2590 }
2591 }
2592
2593 /* Convert the outputs to vectors for stores. */
2594 vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
2595 vec1 = NULL;
2596
2597 if (stride > 4)
2598 vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
2599
2600
2601 buffer = ctx->hs_ring_tess_factor;
2602 tf_base = ctx->tess_factor_offset;
2603 byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
2604 LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
2605 unsigned tf_offset = 0;
2606
2607 if (ctx->options->chip_class <= VI) {
2608 ac_nir_build_if(&inner_if_ctx, ctx,
2609 LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
2610 rel_patch_id, ctx->ac.i32_0, ""));
2611
2612 /* Store the dynamic HS control word. */
2613 ac_build_buffer_store_dword(&ctx->ac, buffer,
2614 LLVMConstInt(ctx->ac.i32, 0x80000000, false),
2615 1, ctx->ac.i32_0, tf_base,
2616 0, 1, 0, true, false);
2617 tf_offset += 4;
2618
2619 ac_nir_build_endif(&inner_if_ctx);
2620 }
2621
2622 /* Store the tessellation factors. */
2623 ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
2624 MIN2(stride, 4), byteoffset, tf_base,
2625 tf_offset, 1, 0, true, false);
2626 if (vec1)
2627 ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
2628 stride - 4, byteoffset, tf_base,
2629 16 + tf_offset, 1, 0, true, false);
2630
2631 //store to offchip for TES to read - only if TES reads them
2632 if (ctx->options->key.tcs.tes_reads_tess_factors) {
2633 LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
2634 LLVMValueRef tf_inner_offset;
2635 unsigned param_outer, param_inner;
2636
2637 param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
2638 tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
2639 LLVMConstInt(ctx->ac.i32, param_outer, 0));
2640
2641 outer_vec = ac_build_gather_values(&ctx->ac, outer,
2642 util_next_power_of_two(outer_comps));
2643
2644 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
2645 outer_comps, tf_outer_offset,
2646 ctx->oc_lds, 0, 1, 0, true, false);
2647 if (inner_comps) {
2648 param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
2649 tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
2650 LLVMConstInt(ctx->ac.i32, param_inner, 0));
2651
2652 inner_vec = inner_comps == 1 ? inner[0] :
2653 ac_build_gather_values(&ctx->ac, inner, inner_comps);
2654 ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
2655 inner_comps, tf_inner_offset,
2656 ctx->oc_lds, 0, 1, 0, true, false);
2657 }
2658 }
2659 ac_nir_build_endif(&if_ctx);
2660 }
2661
2662 static void
2663 handle_tcs_outputs_post(struct radv_shader_context *ctx)
2664 {
2665 write_tess_factors(ctx);
2666 }
2667
2668 static bool
2669 si_export_mrt_color(struct radv_shader_context *ctx,
2670 LLVMValueRef *color, unsigned index,
2671 struct ac_export_args *args)
2672 {
2673 /* Export */
2674 si_llvm_init_export_args(ctx, color, 0xf,
2675 V_008DFC_SQ_EXP_MRT + index, args);
2676 if (!args->enabled_channels)
2677 return false; /* unnecessary NULL export */
2678
2679 return true;
2680 }
2681
2682 static void
2683 radv_export_mrt_z(struct radv_shader_context *ctx,
2684 LLVMValueRef depth, LLVMValueRef stencil,
2685 LLVMValueRef samplemask)
2686 {
2687 struct ac_export_args args;
2688
2689 ac_export_mrt_z(&ctx->ac, depth, stencil, samplemask, &args);
2690
2691 ac_build_export(&ctx->ac, &args);
2692 }
2693
2694 static void
2695 handle_fs_outputs_post(struct radv_shader_context *ctx)
2696 {
2697 unsigned index = 0;
2698 LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
2699 struct ac_export_args color_args[8];
2700
2701 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
2702 LLVMValueRef values[4];
2703
2704 if (!(ctx->output_mask & (1ull << i)))
2705 continue;
2706
2707 if (i < FRAG_RESULT_DATA0)
2708 continue;
2709
2710 for (unsigned j = 0; j < 4; j++)
2711 values[j] = ac_to_float(&ctx->ac,
2712 radv_load_output(ctx, i, j));
2713
2714 bool ret = si_export_mrt_color(ctx, values,
2715 i - FRAG_RESULT_DATA0,
2716 &color_args[index]);
2717 if (ret)
2718 index++;
2719 }
2720
2721 /* Process depth, stencil, samplemask. */
2722 if (ctx->shader_info->info.ps.writes_z) {
2723 depth = ac_to_float(&ctx->ac,
2724 radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
2725 }
2726 if (ctx->shader_info->info.ps.writes_stencil) {
2727 stencil = ac_to_float(&ctx->ac,
2728 radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
2729 }
2730 if (ctx->shader_info->info.ps.writes_sample_mask) {
2731 samplemask = ac_to_float(&ctx->ac,
2732 radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
2733 }
2734
2735 /* Set the DONE bit on last non-null color export only if Z isn't
2736 * exported.
2737 */
2738 if (index > 0 &&
2739 !ctx->shader_info->info.ps.writes_z &&
2740 !ctx->shader_info->info.ps.writes_stencil &&
2741 !ctx->shader_info->info.ps.writes_sample_mask) {
2742 unsigned last = index - 1;
2743
2744 color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
2745 color_args[last].done = 1; /* DONE bit */
2746 }
2747
2748 /* Export PS outputs. */
2749 for (unsigned i = 0; i < index; i++)
2750 ac_build_export(&ctx->ac, &color_args[i]);
2751
2752 if (depth || stencil || samplemask)
2753 radv_export_mrt_z(ctx, depth, stencil, samplemask);
2754 else if (!index)
2755 ac_build_export_null(&ctx->ac);
2756 }
2757
2758 static void
2759 emit_gs_epilogue(struct radv_shader_context *ctx)
2760 {
2761 ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
2762 }
2763
2764 static void
2765 handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
2766 LLVMValueRef *addrs)
2767 {
2768 struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
2769
2770 switch (ctx->stage) {
2771 case MESA_SHADER_VERTEX:
2772 if (ctx->options->key.vs.as_ls)
2773 handle_ls_outputs_post(ctx);
2774 else if (ctx->options->key.vs.as_es)
2775 handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
2776 else
2777 handle_vs_outputs_post(ctx, ctx->options->key.vs.export_prim_id,
2778 &ctx->shader_info->vs.outinfo);
2779 break;
2780 case MESA_SHADER_FRAGMENT:
2781 handle_fs_outputs_post(ctx);
2782 break;
2783 case MESA_SHADER_GEOMETRY:
2784 emit_gs_epilogue(ctx);
2785 break;
2786 case MESA_SHADER_TESS_CTRL:
2787 handle_tcs_outputs_post(ctx);
2788 break;
2789 case MESA_SHADER_TESS_EVAL:
2790 if (ctx->options->key.tes.as_es)
2791 handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
2792 else
2793 handle_vs_outputs_post(ctx, ctx->options->key.tes.export_prim_id,
2794 &ctx->shader_info->tes.outinfo);
2795 break;
2796 default:
2797 break;
2798 }
2799 }
2800
2801 static void ac_llvm_finalize_module(struct radv_shader_context *ctx)
2802 {
2803 LLVMPassManagerRef passmgr;
2804 /* Create the pass manager */
2805 passmgr = LLVMCreateFunctionPassManagerForModule(
2806 ctx->ac.module);
2807
2808 /* This pass should eliminate all the load and store instructions */
2809 LLVMAddPromoteMemoryToRegisterPass(passmgr);
2810
2811 /* Add some optimization passes */
2812 LLVMAddScalarReplAggregatesPass(passmgr);
2813 LLVMAddLICMPass(passmgr);
2814 LLVMAddAggressiveDCEPass(passmgr);
2815 LLVMAddCFGSimplificationPass(passmgr);
2816 LLVMAddInstructionCombiningPass(passmgr);
2817
2818 /* Run the pass */
2819 LLVMInitializeFunctionPassManager(passmgr);
2820 LLVMRunFunctionPassManager(passmgr, ctx->main_function);
2821 LLVMFinalizeFunctionPassManager(passmgr);
2822
2823 LLVMDisposeBuilder(ctx->ac.builder);
2824 LLVMDisposePassManager(passmgr);
2825
2826 ac_llvm_context_dispose(&ctx->ac);
2827 }
2828
2829 static void
2830 ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
2831 {
2832 struct radv_vs_output_info *outinfo;
2833
2834 switch (ctx->stage) {
2835 case MESA_SHADER_FRAGMENT:
2836 case MESA_SHADER_COMPUTE:
2837 case MESA_SHADER_TESS_CTRL:
2838 case MESA_SHADER_GEOMETRY:
2839 return;
2840 case MESA_SHADER_VERTEX:
2841 if (ctx->options->key.vs.as_ls ||
2842 ctx->options->key.vs.as_es)
2843 return;
2844 outinfo = &ctx->shader_info->vs.outinfo;
2845 break;
2846 case MESA_SHADER_TESS_EVAL:
2847 if (ctx->options->key.vs.as_es)
2848 return;
2849 outinfo = &ctx->shader_info->tes.outinfo;
2850 break;
2851 default:
2852 unreachable("Unhandled shader type");
2853 }
2854
2855 ac_optimize_vs_outputs(&ctx->ac,
2856 ctx->main_function,
2857 outinfo->vs_output_param_offset,
2858 VARYING_SLOT_MAX,
2859 &outinfo->param_exports);
2860 }
2861
2862 static void
2863 ac_setup_rings(struct radv_shader_context *ctx)
2864 {
2865 if ((ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) ||
2866 (ctx->stage == MESA_SHADER_TESS_EVAL && ctx->options->key.tes.as_es)) {
2867 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_VS, false));
2868 }
2869
2870 if (ctx->is_gs_copy_shader) {
2871 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
2872 }
2873 if (ctx->stage == MESA_SHADER_GEOMETRY) {
2874 LLVMValueRef tmp;
2875 ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_GS, false));
2876 ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
2877
2878 ctx->gsvs_ring = LLVMBuildBitCast(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.v4i32, "");
2879
2880 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->ac.i32, 2, false), "");
2881 tmp = LLVMBuildExtractElement(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
2882 tmp = LLVMBuildOr(ctx->ac.builder, tmp, ctx->gsvs_ring_stride, "");
2883 ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
2884 }
2885
2886 if (ctx->stage == MESA_SHADER_TESS_CTRL ||
2887 ctx->stage == MESA_SHADER_TESS_EVAL) {
2888 ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
2889 ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
2890 }
2891 }
2892
2893 static unsigned
2894 ac_nir_get_max_workgroup_size(enum chip_class chip_class,
2895 const struct nir_shader *nir)
2896 {
2897 switch (nir->info.stage) {
2898 case MESA_SHADER_TESS_CTRL:
2899 return chip_class >= CIK ? 128 : 64;
2900 case MESA_SHADER_GEOMETRY:
2901 return chip_class >= GFX9 ? 128 : 64;
2902 case MESA_SHADER_COMPUTE:
2903 break;
2904 default:
2905 return 0;
2906 }
2907
2908 unsigned max_workgroup_size = nir->info.cs.local_size[0] *
2909 nir->info.cs.local_size[1] *
2910 nir->info.cs.local_size[2];
2911 return max_workgroup_size;
2912 }
2913
2914 /* Fixup the HW not emitting the TCS regs if there are no HS threads. */
2915 static void ac_nir_fixup_ls_hs_input_vgprs(struct radv_shader_context *ctx)
2916 {
2917 LLVMValueRef count = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2918 LLVMConstInt(ctx->ac.i32, 8, false),
2919 LLVMConstInt(ctx->ac.i32, 8, false), false);
2920 LLVMValueRef hs_empty = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, count,
2921 ctx->ac.i32_0, "");
2922 ctx->abi.instance_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->rel_auto_id, ctx->abi.instance_id, "");
2923 ctx->vs_prim_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.vertex_id, ctx->vs_prim_id, "");
2924 ctx->rel_auto_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_rel_ids, ctx->rel_auto_id, "");
2925 ctx->abi.vertex_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.tcs_patch_id, ctx->abi.vertex_id, "");
2926 }
2927
2928 static void prepare_gs_input_vgprs(struct radv_shader_context *ctx)
2929 {
2930 for(int i = 5; i >= 0; --i) {
2931 ctx->gs_vtx_offset[i] = ac_build_bfe(&ctx->ac, ctx->gs_vtx_offset[i & ~1],
2932 LLVMConstInt(ctx->ac.i32, (i & 1) * 16, false),
2933 LLVMConstInt(ctx->ac.i32, 16, false), false);
2934 }
2935
2936 ctx->gs_wave_id = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
2937 LLVMConstInt(ctx->ac.i32, 16, false),
2938 LLVMConstInt(ctx->ac.i32, 8, false), false);
2939 }
2940
2941
2942 static
2943 LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
2944 struct nir_shader *const *shaders,
2945 int shader_count,
2946 struct radv_shader_variant_info *shader_info,
2947 const struct radv_nir_compiler_options *options)
2948 {
2949 struct radv_shader_context ctx = {0};
2950 unsigned i;
2951 ctx.options = options;
2952 ctx.shader_info = shader_info;
2953 ctx.context = LLVMContextCreate();
2954
2955 ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class,
2956 options->family);
2957 ctx.ac.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
2958 LLVMSetTarget(ctx.ac.module, options->supports_spill ? "amdgcn-mesa-mesa3d" : "amdgcn--");
2959
2960 LLVMTargetDataRef data_layout = LLVMCreateTargetDataLayout(tm);
2961 char *data_layout_str = LLVMCopyStringRepOfTargetData(data_layout);
2962 LLVMSetDataLayout(ctx.ac.module, data_layout_str);
2963 LLVMDisposeTargetData(data_layout);
2964 LLVMDisposeMessage(data_layout_str);
2965
2966 enum ac_float_mode float_mode =
2967 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
2968 AC_FLOAT_MODE_DEFAULT;
2969
2970 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
2971
2972 memset(shader_info, 0, sizeof(*shader_info));
2973
2974 for(int i = 0; i < shader_count; ++i)
2975 radv_nir_shader_info_pass(shaders[i], options, &shader_info->info);
2976
2977 for (i = 0; i < RADV_UD_MAX_SETS; i++)
2978 shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
2979 for (i = 0; i < AC_UD_MAX_UD; i++)
2980 shader_info->user_sgprs_locs.shader_data[i].sgpr_idx = -1;
2981
2982 ctx.max_workgroup_size = 0;
2983 for (int i = 0; i < shader_count; ++i) {
2984 ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
2985 ac_nir_get_max_workgroup_size(ctx.options->chip_class,
2986 shaders[i]));
2987 }
2988
2989 create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
2990 shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
2991
2992 ctx.abi.inputs = &ctx.inputs[0];
2993 ctx.abi.emit_outputs = handle_shader_outputs_post;
2994 ctx.abi.emit_vertex = visit_emit_vertex;
2995 ctx.abi.load_ubo = radv_load_ubo;
2996 ctx.abi.load_ssbo = radv_load_ssbo;
2997 ctx.abi.load_sampler_desc = radv_get_sampler_desc;
2998 ctx.abi.load_resource = radv_load_resource;
2999 ctx.abi.clamp_shadow_reference = false;
3000
3001 if (shader_count >= 2)
3002 ac_init_exec_full_mask(&ctx.ac);
3003
3004 if (ctx.ac.chip_class == GFX9 &&
3005 shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
3006 ac_nir_fixup_ls_hs_input_vgprs(&ctx);
3007
3008 for(int i = 0; i < shader_count; ++i) {
3009 ctx.stage = shaders[i]->info.stage;
3010 ctx.output_mask = 0;
3011 ctx.tess_outputs_written = 0;
3012 ctx.num_output_clips = shaders[i]->info.clip_distance_array_size;
3013 ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
3014
3015 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3016 ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
3017 ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
3018 ctx.abi.load_inputs = load_gs_input;
3019 ctx.abi.emit_primitive = visit_end_primitive;
3020 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3021 ctx.tcs_outputs_read = shaders[i]->info.outputs_read;
3022 ctx.tcs_patch_outputs_read = shaders[i]->info.patch_outputs_read;
3023 ctx.abi.load_tess_varyings = load_tcs_varyings;
3024 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3025 ctx.abi.store_tcs_outputs = store_tcs_output;
3026 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3027 if (shader_count == 1)
3028 ctx.tcs_num_inputs = ctx.options->key.tcs.num_inputs;
3029 else
3030 ctx.tcs_num_inputs = util_last_bit64(shader_info->info.vs.ls_outputs_written);
3031 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
3032 ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
3033 ctx.abi.load_tess_varyings = load_tes_input;
3034 ctx.abi.load_tess_coord = load_tess_coord;
3035 ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
3036 ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
3037 } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
3038 if (shader_info->info.vs.needs_instance_id) {
3039 if (ctx.options->key.vs.as_ls) {
3040 ctx.shader_info->vs.vgpr_comp_cnt =
3041 MAX2(2, ctx.shader_info->vs.vgpr_comp_cnt);
3042 } else {
3043 ctx.shader_info->vs.vgpr_comp_cnt =
3044 MAX2(1, ctx.shader_info->vs.vgpr_comp_cnt);
3045 }
3046 }
3047 ctx.abi.load_base_vertex = radv_load_base_vertex;
3048 } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
3049 shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
3050 ctx.abi.lookup_interp_param = lookup_interp_param;
3051 ctx.abi.load_sample_position = load_sample_position;
3052 ctx.abi.load_sample_mask_in = load_sample_mask_in;
3053 ctx.abi.emit_kill = radv_emit_kill;
3054 }
3055
3056 if (i)
3057 ac_emit_barrier(&ctx.ac, ctx.stage);
3058
3059 ac_setup_rings(&ctx);
3060
3061 LLVMBasicBlockRef merge_block;
3062 if (shader_count >= 2) {
3063 LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
3064 LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3065 merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
3066
3067 LLVMValueRef count = ac_build_bfe(&ctx.ac, ctx.merged_wave_info,
3068 LLVMConstInt(ctx.ac.i32, 8 * i, false),
3069 LLVMConstInt(ctx.ac.i32, 8, false), false);
3070 LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac);
3071 LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT,
3072 thread_id, count, "");
3073 LLVMBuildCondBr(ctx.ac.builder, cond, then_block, merge_block);
3074
3075 LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
3076 }
3077
3078 if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
3079 handle_fs_inputs(&ctx, shaders[i]);
3080 else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
3081 handle_vs_inputs(&ctx, shaders[i]);
3082 else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
3083 prepare_gs_input_vgprs(&ctx);
3084
3085 nir_foreach_variable(variable, &shaders[i]->outputs)
3086 scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
3087
3088 ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i]);
3089
3090 if (shader_count >= 2) {
3091 LLVMBuildBr(ctx.ac.builder, merge_block);
3092 LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
3093 }
3094
3095 if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
3096 unsigned addclip = shaders[i]->info.clip_distance_array_size +
3097 shaders[i]->info.cull_distance_array_size > 4;
3098 shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
3099 shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
3100 shaders[i]->info.gs.vertices_out;
3101 } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
3102 shader_info->tcs.outputs_written = ctx.tess_outputs_written;
3103 shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
3104 assert(ctx.tess_outputs_written == ctx.shader_info->info.tcs.outputs_written);
3105 assert(ctx.tess_patch_outputs_written == ctx.shader_info->info.tcs.patch_outputs_written);
3106 } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
3107 shader_info->vs.outputs_written = ctx.tess_outputs_written;
3108 assert(ctx.tess_outputs_written == ctx.shader_info->info.vs.ls_outputs_written);
3109 }
3110 }
3111
3112 LLVMBuildRetVoid(ctx.ac.builder);
3113
3114 if (options->dump_preoptir)
3115 ac_dump_module(ctx.ac.module);
3116
3117 ac_llvm_finalize_module(&ctx);
3118
3119 if (shader_count == 1)
3120 ac_nir_eliminate_const_vs_outputs(&ctx);
3121
3122 if (options->dump_shader) {
3123 ctx.shader_info->private_mem_vgprs =
3124 ac_count_scratch_private_memory(ctx.main_function);
3125 }
3126
3127 return ctx.ac.module;
3128 }
3129
3130 static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
3131 {
3132 unsigned *retval = (unsigned *)context;
3133 LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
3134 char *description = LLVMGetDiagInfoDescription(di);
3135
3136 if (severity == LLVMDSError) {
3137 *retval = 1;
3138 fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n",
3139 description);
3140 }
3141
3142 LLVMDisposeMessage(description);
3143 }
3144
3145 static unsigned ac_llvm_compile(LLVMModuleRef M,
3146 struct ac_shader_binary *binary,
3147 LLVMTargetMachineRef tm)
3148 {
3149 unsigned retval = 0;
3150 char *err;
3151 LLVMContextRef llvm_ctx;
3152 LLVMMemoryBufferRef out_buffer;
3153 unsigned buffer_size;
3154 const char *buffer_data;
3155 LLVMBool mem_err;
3156
3157 /* Setup Diagnostic Handler*/
3158 llvm_ctx = LLVMGetModuleContext(M);
3159
3160 LLVMContextSetDiagnosticHandler(llvm_ctx, ac_diagnostic_handler,
3161 &retval);
3162
3163 /* Compile IR*/
3164 mem_err = LLVMTargetMachineEmitToMemoryBuffer(tm, M, LLVMObjectFile,
3165 &err, &out_buffer);
3166
3167 /* Process Errors/Warnings */
3168 if (mem_err) {
3169 fprintf(stderr, "%s: %s", __FUNCTION__, err);
3170 free(err);
3171 retval = 1;
3172 goto out;
3173 }
3174
3175 /* Extract Shader Code*/
3176 buffer_size = LLVMGetBufferSize(out_buffer);
3177 buffer_data = LLVMGetBufferStart(out_buffer);
3178
3179 ac_elf_read(buffer_data, buffer_size, binary);
3180
3181 /* Clean up */
3182 LLVMDisposeMemoryBuffer(out_buffer);
3183
3184 out:
3185 return retval;
3186 }
3187
3188 static void ac_compile_llvm_module(LLVMTargetMachineRef tm,
3189 LLVMModuleRef llvm_module,
3190 struct ac_shader_binary *binary,
3191 struct ac_shader_config *config,
3192 struct radv_shader_variant_info *shader_info,
3193 gl_shader_stage stage,
3194 const struct radv_nir_compiler_options *options)
3195 {
3196 if (options->dump_shader)
3197 ac_dump_module(llvm_module);
3198
3199 memset(binary, 0, sizeof(*binary));
3200
3201 if (options->record_llvm_ir) {
3202 char *llvm_ir = LLVMPrintModuleToString(llvm_module);
3203 binary->llvm_ir_string = strdup(llvm_ir);
3204 LLVMDisposeMessage(llvm_ir);
3205 }
3206
3207 int v = ac_llvm_compile(llvm_module, binary, tm);
3208 if (v) {
3209 fprintf(stderr, "compile failed\n");
3210 }
3211
3212 if (options->dump_shader)
3213 fprintf(stderr, "disasm:\n%s\n", binary->disasm_string);
3214
3215 ac_shader_binary_read_config(binary, config, 0, options->supports_spill);
3216
3217 LLVMContextRef ctx = LLVMGetModuleContext(llvm_module);
3218 LLVMDisposeModule(llvm_module);
3219 LLVMContextDispose(ctx);
3220
3221 if (stage == MESA_SHADER_FRAGMENT) {
3222 shader_info->num_input_vgprs = 0;
3223 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
3224 shader_info->num_input_vgprs += 2;
3225 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
3226 shader_info->num_input_vgprs += 2;
3227 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
3228 shader_info->num_input_vgprs += 2;
3229 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
3230 shader_info->num_input_vgprs += 3;
3231 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
3232 shader_info->num_input_vgprs += 2;
3233 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
3234 shader_info->num_input_vgprs += 2;
3235 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
3236 shader_info->num_input_vgprs += 2;
3237 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
3238 shader_info->num_input_vgprs += 1;
3239 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
3240 shader_info->num_input_vgprs += 1;
3241 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
3242 shader_info->num_input_vgprs += 1;
3243 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
3244 shader_info->num_input_vgprs += 1;
3245 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
3246 shader_info->num_input_vgprs += 1;
3247 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr))
3248 shader_info->num_input_vgprs += 1;
3249 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr))
3250 shader_info->num_input_vgprs += 1;
3251 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
3252 shader_info->num_input_vgprs += 1;
3253 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
3254 shader_info->num_input_vgprs += 1;
3255 }
3256 config->num_vgprs = MAX2(config->num_vgprs, shader_info->num_input_vgprs);
3257
3258 /* +3 for scratch wave offset and VCC */
3259 config->num_sgprs = MAX2(config->num_sgprs,
3260 shader_info->num_input_sgprs + 3);
3261
3262 /* Enable 64-bit and 16-bit denormals, because there is no performance
3263 * cost.
3264 *
3265 * If denormals are enabled, all floating-point output modifiers are
3266 * ignored.
3267 *
3268 * Don't enable denormals for 32-bit floats, because:
3269 * - Floating-point output modifiers would be ignored by the hw.
3270 * - Some opcodes don't support denormals, such as v_mad_f32. We would
3271 * have to stop using those.
3272 * - SI & CI would be very slow.
3273 */
3274 config->float_mode |= V_00B028_FP_64_DENORMS;
3275 }
3276
3277 static void
3278 ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_shader *nir, const struct radv_nir_compiler_options *options)
3279 {
3280 switch (nir->info.stage) {
3281 case MESA_SHADER_COMPUTE:
3282 for (int i = 0; i < 3; ++i)
3283 shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
3284 break;
3285 case MESA_SHADER_FRAGMENT:
3286 shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
3287 break;
3288 case MESA_SHADER_GEOMETRY:
3289 shader_info->gs.vertices_in = nir->info.gs.vertices_in;
3290 shader_info->gs.vertices_out = nir->info.gs.vertices_out;
3291 shader_info->gs.output_prim = nir->info.gs.output_primitive;
3292 shader_info->gs.invocations = nir->info.gs.invocations;
3293 break;
3294 case MESA_SHADER_TESS_EVAL:
3295 shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
3296 shader_info->tes.spacing = nir->info.tess.spacing;
3297 shader_info->tes.ccw = nir->info.tess.ccw;
3298 shader_info->tes.point_mode = nir->info.tess.point_mode;
3299 shader_info->tes.as_es = options->key.tes.as_es;
3300 break;
3301 case MESA_SHADER_TESS_CTRL:
3302 shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
3303 break;
3304 case MESA_SHADER_VERTEX:
3305 shader_info->vs.as_es = options->key.vs.as_es;
3306 shader_info->vs.as_ls = options->key.vs.as_ls;
3307 /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
3308 if (options->key.vs.as_ls)
3309 shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
3310 break;
3311 default:
3312 break;
3313 }
3314 }
3315
3316 void
3317 radv_compile_nir_shader(LLVMTargetMachineRef tm,
3318 struct ac_shader_binary *binary,
3319 struct ac_shader_config *config,
3320 struct radv_shader_variant_info *shader_info,
3321 struct nir_shader *const *nir,
3322 int nir_count,
3323 const struct radv_nir_compiler_options *options)
3324 {
3325
3326 LLVMModuleRef llvm_module;
3327
3328 llvm_module = ac_translate_nir_to_llvm(tm, nir, nir_count, shader_info,
3329 options);
3330
3331 ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info,
3332 nir[0]->info.stage, options);
3333
3334 for (int i = 0; i < nir_count; ++i)
3335 ac_fill_shader_info(shader_info, nir[i], options);
3336
3337 /* Determine the ES type (VS or TES) for the GS on GFX9. */
3338 if (options->chip_class == GFX9) {
3339 if (nir_count == 2 &&
3340 nir[1]->info.stage == MESA_SHADER_GEOMETRY) {
3341 shader_info->gs.es_type = nir[0]->info.stage;
3342 }
3343 }
3344 }
3345
3346 static void
3347 ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
3348 {
3349 LLVMValueRef vtx_offset =
3350 LLVMBuildMul(ctx->ac.builder, ctx->abi.vertex_id,
3351 LLVMConstInt(ctx->ac.i32, 4, false), "");
3352 int idx = 0;
3353
3354 for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
3355 int length = 4;
3356 int slot = idx;
3357 int slot_inc = 1;
3358 if (!(ctx->output_mask & (1ull << i)))
3359 continue;
3360
3361 if (i == VARYING_SLOT_CLIP_DIST0) {
3362 /* unpack clip and cull from a single set of slots */
3363 length = ctx->num_output_clips + ctx->num_output_culls;
3364 if (length > 4)
3365 slot_inc = 2;
3366 }
3367
3368 for (unsigned j = 0; j < length; j++) {
3369 LLVMValueRef value, soffset;
3370
3371 soffset = LLVMConstInt(ctx->ac.i32,
3372 (slot * 4 + j) *
3373 ctx->gs_max_out_vertices * 16 * 4, false);
3374
3375 value = ac_build_buffer_load(&ctx->ac, ctx->gsvs_ring,
3376 1, ctx->ac.i32_0,
3377 vtx_offset, soffset,
3378 0, 1, 1, true, false);
3379
3380 LLVMBuildStore(ctx->ac.builder,
3381 ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
3382 }
3383 idx += slot_inc;
3384 }
3385 handle_vs_outputs_post(ctx, false, &ctx->shader_info->vs.outinfo);
3386 }
3387
3388 void
3389 radv_compile_gs_copy_shader(LLVMTargetMachineRef tm,
3390 struct nir_shader *geom_shader,
3391 struct ac_shader_binary *binary,
3392 struct ac_shader_config *config,
3393 struct radv_shader_variant_info *shader_info,
3394 const struct radv_nir_compiler_options *options)
3395 {
3396 struct radv_shader_context ctx = {0};
3397 ctx.context = LLVMContextCreate();
3398 ctx.options = options;
3399 ctx.shader_info = shader_info;
3400
3401 ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class,
3402 options->family);
3403 ctx.ac.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
3404
3405 ctx.is_gs_copy_shader = true;
3406 LLVMSetTarget(ctx.ac.module, "amdgcn--");
3407
3408 enum ac_float_mode float_mode =
3409 options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
3410 AC_FLOAT_MODE_DEFAULT;
3411
3412 ctx.ac.builder = ac_create_builder(ctx.context, float_mode);
3413 ctx.stage = MESA_SHADER_VERTEX;
3414
3415 create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
3416
3417 ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
3418 ac_setup_rings(&ctx);
3419
3420 ctx.num_output_clips = geom_shader->info.clip_distance_array_size;
3421 ctx.num_output_culls = geom_shader->info.cull_distance_array_size;
3422
3423 nir_foreach_variable(variable, &geom_shader->outputs) {
3424 scan_shader_output_decl(&ctx, variable, geom_shader, MESA_SHADER_VERTEX);
3425 ac_handle_shader_output_decl(&ctx.ac, &ctx.abi, geom_shader,
3426 variable, MESA_SHADER_VERTEX);
3427 }
3428
3429 ac_gs_copy_shader_emit(&ctx);
3430
3431 LLVMBuildRetVoid(ctx.ac.builder);
3432
3433 ac_llvm_finalize_module(&ctx);
3434
3435 ac_compile_llvm_module(tm, ctx.ac.module, binary, config, shader_info,
3436 MESA_SHADER_VERTEX, options);
3437 }