aco: fix waitcnt insertion on GFX10.3
[mesa.git] / src / amd / compiler / aco_instruction_selection_setup.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <array>
26 #include <unordered_map>
27 #include "aco_ir.h"
28 #include "nir.h"
29 #include "nir_control_flow.h"
30 #include "vulkan/radv_shader.h"
31 #include "vulkan/radv_descriptor_set.h"
32 #include "vulkan/radv_shader_args.h"
33 #include "sid.h"
34 #include "ac_exp_param.h"
35 #include "ac_shader_util.h"
36
37 #include "util/u_math.h"
38
39 #define MAX_INLINE_PUSH_CONSTS 8
40
41 namespace aco {
42
43 struct shader_io_state {
44 uint8_t mask[VARYING_SLOT_MAX];
45 Temp temps[VARYING_SLOT_MAX * 4u];
46
47 shader_io_state() {
48 memset(mask, 0, sizeof(mask));
49 std::fill_n(temps, VARYING_SLOT_MAX * 4u, Temp(0, RegClass::v1));
50 }
51 };
52
53 enum resource_flags {
54 has_glc_vmem_load = 0x1,
55 has_nonglc_vmem_load = 0x2,
56 has_glc_vmem_store = 0x4,
57 has_nonglc_vmem_store = 0x8,
58
59 has_vmem_store = has_glc_vmem_store | has_nonglc_vmem_store,
60 has_vmem_loadstore = has_vmem_store | has_glc_vmem_load | has_nonglc_vmem_load,
61 has_nonglc_vmem_loadstore = has_nonglc_vmem_load | has_nonglc_vmem_store,
62
63 buffer_is_restrict = 0x10,
64 };
65
66 struct isel_context {
67 const struct radv_nir_compiler_options *options;
68 struct radv_shader_args *args;
69 Program *program;
70 nir_shader *shader;
71 uint32_t constant_data_offset;
72 Block *block;
73 std::unique_ptr<Temp[]> allocated;
74 std::unordered_map<unsigned, std::array<Temp,NIR_MAX_VEC_COMPONENTS>> allocated_vec;
75 Stage stage; /* Stage */
76 bool has_gfx10_wave64_bpermute = false;
77 struct {
78 bool has_branch;
79 uint16_t loop_nest_depth = 0;
80 struct {
81 unsigned header_idx;
82 Block* exit;
83 bool has_divergent_continue = false;
84 bool has_divergent_branch = false;
85 } parent_loop;
86 struct {
87 bool is_divergent = false;
88 } parent_if;
89 bool exec_potentially_empty_discard = false; /* set to false when loop_nest_depth==0 && parent_if.is_divergent==false */
90 uint16_t exec_potentially_empty_break_depth = UINT16_MAX;
91 /* Set to false when loop_nest_depth==exec_potentially_empty_break_depth
92 * and parent_if.is_divergent==false. Called _break but it's also used for
93 * loop continues. */
94 bool exec_potentially_empty_break = false;
95 std::unique_ptr<unsigned[]> nir_to_aco; /* NIR block index to ACO block index */
96 } cf_info;
97
98 uint32_t resource_flag_offsets[MAX_SETS];
99 std::vector<uint8_t> buffer_resource_flags;
100
101 Temp arg_temps[AC_MAX_ARGS];
102
103 /* FS inputs */
104 Temp persp_centroid, linear_centroid;
105
106 /* GS inputs */
107 Temp gs_wave_id;
108
109 /* VS output information */
110 bool export_clip_dists;
111 unsigned num_clip_distances;
112 unsigned num_cull_distances;
113
114 /* tessellation information */
115 unsigned tcs_tess_lvl_out_loc;
116 unsigned tcs_tess_lvl_in_loc;
117 uint64_t tcs_temp_only_inputs;
118 uint32_t tcs_num_inputs;
119 uint32_t tcs_num_outputs;
120 uint32_t tcs_num_patch_outputs;
121 uint32_t tcs_num_patches;
122 bool tcs_in_out_eq = false;
123
124 /* I/O information */
125 shader_io_state inputs;
126 shader_io_state outputs;
127 uint8_t output_drv_loc_to_var_slot[MESA_SHADER_COMPUTE][VARYING_SLOT_MAX];
128 uint8_t output_tcs_patch_drv_loc_to_var_slot[VARYING_SLOT_MAX];
129 };
130
131 Temp get_arg(isel_context *ctx, struct ac_arg arg)
132 {
133 assert(arg.used);
134 return ctx->arg_temps[arg.arg_index];
135 }
136
137 unsigned get_interp_input(nir_intrinsic_op intrin, enum glsl_interp_mode interp)
138 {
139 switch (interp) {
140 case INTERP_MODE_SMOOTH:
141 case INTERP_MODE_NONE:
142 if (intrin == nir_intrinsic_load_barycentric_pixel ||
143 intrin == nir_intrinsic_load_barycentric_at_sample ||
144 intrin == nir_intrinsic_load_barycentric_at_offset)
145 return S_0286CC_PERSP_CENTER_ENA(1);
146 else if (intrin == nir_intrinsic_load_barycentric_centroid)
147 return S_0286CC_PERSP_CENTROID_ENA(1);
148 else if (intrin == nir_intrinsic_load_barycentric_sample)
149 return S_0286CC_PERSP_SAMPLE_ENA(1);
150 break;
151 case INTERP_MODE_NOPERSPECTIVE:
152 if (intrin == nir_intrinsic_load_barycentric_pixel)
153 return S_0286CC_LINEAR_CENTER_ENA(1);
154 else if (intrin == nir_intrinsic_load_barycentric_centroid)
155 return S_0286CC_LINEAR_CENTROID_ENA(1);
156 else if (intrin == nir_intrinsic_load_barycentric_sample)
157 return S_0286CC_LINEAR_SAMPLE_ENA(1);
158 break;
159 default:
160 break;
161 }
162 return 0;
163 }
164
165 /* If one side of a divergent IF ends in a branch and the other doesn't, we
166 * might have to emit the contents of the side without the branch at the merge
167 * block instead. This is so that we can use any SGPR live-out of the side
168 * without the branch without creating a linear phi in the invert or merge block. */
169 bool
170 sanitize_if(nir_function_impl *impl, nir_if *nif)
171 {
172 //TODO: skip this if the condition is uniform and there are no divergent breaks/continues?
173
174 nir_block *then_block = nir_if_last_then_block(nif);
175 nir_block *else_block = nir_if_last_else_block(nif);
176 bool then_jump = nir_block_ends_in_jump(then_block) || nir_block_is_unreachable(then_block);
177 bool else_jump = nir_block_ends_in_jump(else_block) || nir_block_is_unreachable(else_block);
178 if (then_jump == else_jump)
179 return false;
180
181 /* If the continue from block is empty then return as there is nothing to
182 * move.
183 */
184 if (nir_cf_list_is_empty_block(else_jump ? &nif->then_list : &nif->else_list))
185 return false;
186
187 /* Even though this if statement has a jump on one side, we may still have
188 * phis afterwards. Single-source phis can be produced by loop unrolling
189 * or dead control-flow passes and are perfectly legal. Run a quick phi
190 * removal on the block after the if to clean up any such phis.
191 */
192 nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
193
194 /* Finally, move the continue from branch after the if-statement. */
195 nir_block *last_continue_from_blk = else_jump ? then_block : else_block;
196 nir_block *first_continue_from_blk = else_jump ?
197 nir_if_first_then_block(nif) : nir_if_first_else_block(nif);
198
199 nir_cf_list tmp;
200 nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
201 nir_after_block(last_continue_from_blk));
202 nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
203
204 /* nir_cf_extract() invalidates dominance metadata, but it should still be
205 * correct because of the specific type of transformation we did. Block
206 * indices are not valid except for block_0's, which is all we care about for
207 * nir_block_is_unreachable(). */
208 impl->valid_metadata =
209 (nir_metadata)(impl->valid_metadata | nir_metadata_dominance | nir_metadata_block_index);
210
211 return true;
212 }
213
214 bool
215 sanitize_cf_list(nir_function_impl *impl, struct exec_list *cf_list)
216 {
217 bool progress = false;
218 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
219 switch (cf_node->type) {
220 case nir_cf_node_block:
221 break;
222 case nir_cf_node_if: {
223 nir_if *nif = nir_cf_node_as_if(cf_node);
224 progress |= sanitize_cf_list(impl, &nif->then_list);
225 progress |= sanitize_cf_list(impl, &nif->else_list);
226 progress |= sanitize_if(impl, nif);
227 break;
228 }
229 case nir_cf_node_loop: {
230 nir_loop *loop = nir_cf_node_as_loop(cf_node);
231 progress |= sanitize_cf_list(impl, &loop->body);
232 break;
233 }
234 case nir_cf_node_function:
235 unreachable("Invalid cf type");
236 }
237 }
238
239 return progress;
240 }
241
242 void get_buffer_resource_flags(isel_context *ctx, nir_ssa_def *def, unsigned access,
243 uint8_t **flags, uint32_t *count)
244 {
245 int desc_set = -1;
246 unsigned binding = 0;
247
248 if (!def) {
249 /* global resources are considered aliasing with all other buffers and
250 * buffer images */
251 // TODO: only merge flags of resources which can really alias.
252 } else if (def->parent_instr->type == nir_instr_type_intrinsic) {
253 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(def->parent_instr);
254 if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
255 desc_set = nir_intrinsic_desc_set(intrin);
256 binding = nir_intrinsic_binding(intrin);
257 }
258 } else if (def->parent_instr->type == nir_instr_type_deref) {
259 nir_deref_instr *deref = nir_instr_as_deref(def->parent_instr);
260 assert(deref->type->is_image());
261 if (deref->type->sampler_dimensionality != GLSL_SAMPLER_DIM_BUF) {
262 *flags = NULL;
263 *count = 0;
264 return;
265 }
266
267 nir_variable *var = nir_deref_instr_get_variable(deref);
268 desc_set = var->data.descriptor_set;
269 binding = var->data.binding;
270 }
271
272 if (desc_set < 0) {
273 *flags = ctx->buffer_resource_flags.data();
274 *count = ctx->buffer_resource_flags.size();
275 return;
276 }
277
278 unsigned set_offset = ctx->resource_flag_offsets[desc_set];
279
280 if (!(ctx->buffer_resource_flags[set_offset + binding] & buffer_is_restrict)) {
281 /* Non-restrict buffers alias only with other non-restrict buffers.
282 * We reserve flags[0] for these. */
283 *flags = ctx->buffer_resource_flags.data();
284 *count = 1;
285 return;
286 }
287
288 *flags = ctx->buffer_resource_flags.data() + set_offset + binding;
289 *count = 1;
290 }
291
292 uint8_t get_all_buffer_resource_flags(isel_context *ctx, nir_ssa_def *def, unsigned access)
293 {
294 uint8_t *flags;
295 uint32_t count;
296 get_buffer_resource_flags(ctx, def, access, &flags, &count);
297
298 uint8_t res = 0;
299 for (unsigned i = 0; i < count; i++)
300 res |= flags[i];
301 return res;
302 }
303
304 bool can_subdword_ssbo_store_use_smem(nir_intrinsic_instr *intrin)
305 {
306 unsigned wrmask = nir_intrinsic_write_mask(intrin);
307 if (util_last_bit(wrmask) != util_bitcount(wrmask) ||
308 util_bitcount(wrmask) * intrin->src[0].ssa->bit_size % 32 ||
309 util_bitcount(wrmask) != intrin->src[0].ssa->num_components)
310 return false;
311
312 if (nir_intrinsic_align_mul(intrin) % 4 || nir_intrinsic_align_offset(intrin) % 4)
313 return false;
314
315 return true;
316 }
317
318 void fill_desc_set_info(isel_context *ctx, nir_function_impl *impl)
319 {
320 radv_pipeline_layout *pipeline_layout = ctx->options->layout;
321
322 unsigned resource_flag_count = 1; /* +1 to reserve flags[0] for aliased resources */
323 for (unsigned i = 0; i < pipeline_layout->num_sets; i++) {
324 radv_descriptor_set_layout *layout = pipeline_layout->set[i].layout;
325 ctx->resource_flag_offsets[i] = resource_flag_count;
326 resource_flag_count += layout->binding_count;
327 }
328 ctx->buffer_resource_flags = std::vector<uint8_t>(resource_flag_count);
329
330 nir_foreach_variable_with_modes(var, impl->function->shader, nir_var_mem_ssbo) {
331 if (var->data.access & ACCESS_RESTRICT) {
332 uint32_t offset = ctx->resource_flag_offsets[var->data.descriptor_set];
333 ctx->buffer_resource_flags[offset + var->data.binding] |= buffer_is_restrict;
334 }
335 }
336
337 nir_foreach_block(block, impl) {
338 nir_foreach_instr(instr, block) {
339 if (instr->type != nir_instr_type_intrinsic)
340 continue;
341 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
342 if (!(nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_ACCESS]))
343 continue;
344
345 nir_ssa_def *res = NULL;
346 unsigned access = nir_intrinsic_access(intrin);
347 unsigned flags = 0;
348 bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
349 switch (intrin->intrinsic) {
350 case nir_intrinsic_load_ssbo: {
351 if (nir_dest_is_divergent(intrin->dest) && (!glc || ctx->program->chip_class >= GFX8))
352 flags |= glc ? has_glc_vmem_load : has_nonglc_vmem_load;
353 res = intrin->src[0].ssa;
354 break;
355 }
356 case nir_intrinsic_ssbo_atomic_add:
357 case nir_intrinsic_ssbo_atomic_imin:
358 case nir_intrinsic_ssbo_atomic_umin:
359 case nir_intrinsic_ssbo_atomic_imax:
360 case nir_intrinsic_ssbo_atomic_umax:
361 case nir_intrinsic_ssbo_atomic_and:
362 case nir_intrinsic_ssbo_atomic_or:
363 case nir_intrinsic_ssbo_atomic_xor:
364 case nir_intrinsic_ssbo_atomic_exchange:
365 case nir_intrinsic_ssbo_atomic_comp_swap:
366 flags |= has_glc_vmem_load | has_glc_vmem_store;
367 res = intrin->src[0].ssa;
368 break;
369 case nir_intrinsic_store_ssbo:
370 if (nir_src_is_divergent(intrin->src[2]) || ctx->program->chip_class < GFX8 ||
371 (intrin->src[0].ssa->bit_size < 32 && !can_subdword_ssbo_store_use_smem(intrin)))
372 flags |= glc ? has_glc_vmem_store : has_nonglc_vmem_store;
373 res = intrin->src[1].ssa;
374 break;
375 case nir_intrinsic_load_global:
376 if (!(access & ACCESS_NON_WRITEABLE))
377 flags |= glc ? has_glc_vmem_load : has_nonglc_vmem_load;
378 break;
379 case nir_intrinsic_store_global:
380 flags |= glc ? has_glc_vmem_store : has_nonglc_vmem_store;
381 break;
382 case nir_intrinsic_global_atomic_add:
383 case nir_intrinsic_global_atomic_imin:
384 case nir_intrinsic_global_atomic_umin:
385 case nir_intrinsic_global_atomic_imax:
386 case nir_intrinsic_global_atomic_umax:
387 case nir_intrinsic_global_atomic_and:
388 case nir_intrinsic_global_atomic_or:
389 case nir_intrinsic_global_atomic_xor:
390 case nir_intrinsic_global_atomic_exchange:
391 case nir_intrinsic_global_atomic_comp_swap:
392 flags |= has_glc_vmem_load | has_glc_vmem_store;
393 break;
394 case nir_intrinsic_image_deref_load:
395 res = intrin->src[0].ssa;
396 flags |= glc ? has_glc_vmem_load : has_nonglc_vmem_load;
397 break;
398 case nir_intrinsic_image_deref_store:
399 res = intrin->src[0].ssa;
400 flags |= (glc || ctx->program->chip_class == GFX6) ? has_glc_vmem_store : has_nonglc_vmem_store;
401 break;
402 case nir_intrinsic_image_deref_atomic_add:
403 case nir_intrinsic_image_deref_atomic_umin:
404 case nir_intrinsic_image_deref_atomic_imin:
405 case nir_intrinsic_image_deref_atomic_umax:
406 case nir_intrinsic_image_deref_atomic_imax:
407 case nir_intrinsic_image_deref_atomic_and:
408 case nir_intrinsic_image_deref_atomic_or:
409 case nir_intrinsic_image_deref_atomic_xor:
410 case nir_intrinsic_image_deref_atomic_exchange:
411 case nir_intrinsic_image_deref_atomic_comp_swap:
412 res = intrin->src[0].ssa;
413 flags |= has_glc_vmem_load | has_glc_vmem_store;
414 break;
415 default:
416 continue;
417 }
418
419 uint8_t *flags_ptr;
420 uint32_t count;
421 get_buffer_resource_flags(ctx, res, access, &flags_ptr, &count);
422
423 for (unsigned i = 0; i < count; i++)
424 flags_ptr[i] |= flags;
425 }
426 }
427 }
428
429 void apply_nuw_to_ssa(nir_shader *shader, struct hash_table *range_ht, nir_ssa_def *ssa,
430 const nir_unsigned_upper_bound_config *config)
431 {
432 nir_ssa_scalar scalar;
433 scalar.def = ssa;
434 scalar.comp = 0;
435
436 if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != nir_op_iadd)
437 return;
438
439 nir_alu_instr *add = nir_instr_as_alu(ssa->parent_instr);
440
441 if (add->no_unsigned_wrap)
442 return;
443
444 nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
445 nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
446
447 if (nir_ssa_scalar_is_const(src0)) {
448 nir_ssa_scalar tmp = src0;
449 src0 = src1;
450 src1 = tmp;
451 }
452
453 uint32_t src1_ub = nir_unsigned_upper_bound(shader, range_ht, src1, config);
454 add->no_unsigned_wrap = !nir_addition_might_overflow(shader, range_ht, src0, src1_ub, config);
455 }
456
457 void apply_nuw_to_offsets(isel_context *ctx, nir_function_impl *impl)
458 {
459 nir_unsigned_upper_bound_config config;
460 config.min_subgroup_size = 64;
461 config.max_subgroup_size = 64;
462 if (ctx->shader->info.stage == MESA_SHADER_COMPUTE && ctx->options->key.cs.subgroup_size) {
463 config.min_subgroup_size = ctx->options->key.cs.subgroup_size;
464 config.max_subgroup_size = ctx->options->key.cs.subgroup_size;
465 }
466 config.max_work_group_invocations = 2048;
467 config.max_work_group_count[0] = 65535;
468 config.max_work_group_count[1] = 65535;
469 config.max_work_group_count[2] = 65535;
470 config.max_work_group_size[0] = 2048;
471 config.max_work_group_size[1] = 2048;
472 config.max_work_group_size[2] = 2048;
473 for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; i++) {
474 unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[i];
475 unsigned dfmt = attrib_format & 0xf;
476 unsigned nfmt = (attrib_format >> 4) & 0x7;
477
478 uint32_t max = UINT32_MAX;
479 if (nfmt == V_008F0C_BUF_NUM_FORMAT_UNORM) {
480 max = 0x3f800000u;
481 } else if (nfmt == V_008F0C_BUF_NUM_FORMAT_UINT ||
482 nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED) {
483 bool uscaled = nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED;
484 switch (dfmt) {
485 case V_008F0C_BUF_DATA_FORMAT_8:
486 case V_008F0C_BUF_DATA_FORMAT_8_8:
487 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
488 max = uscaled ? 0x437f0000u : UINT8_MAX;
489 break;
490 case V_008F0C_BUF_DATA_FORMAT_10_10_10_2:
491 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
492 max = uscaled ? 0x447fc000u : 1023;
493 break;
494 case V_008F0C_BUF_DATA_FORMAT_10_11_11:
495 case V_008F0C_BUF_DATA_FORMAT_11_11_10:
496 max = uscaled ? 0x44ffe000u : 2047;
497 break;
498 case V_008F0C_BUF_DATA_FORMAT_16:
499 case V_008F0C_BUF_DATA_FORMAT_16_16:
500 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
501 max = uscaled ? 0x477fff00u : UINT16_MAX;
502 break;
503 case V_008F0C_BUF_DATA_FORMAT_32:
504 case V_008F0C_BUF_DATA_FORMAT_32_32:
505 case V_008F0C_BUF_DATA_FORMAT_32_32_32:
506 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
507 max = uscaled ? 0x4f800000u : UINT32_MAX;
508 break;
509 }
510 }
511 config.vertex_attrib_max[i] = max;
512 }
513
514 struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
515
516 nir_foreach_block(block, impl) {
517 nir_foreach_instr(instr, block) {
518 if (instr->type != nir_instr_type_intrinsic)
519 continue;
520 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
521
522 switch (intrin->intrinsic) {
523 case nir_intrinsic_load_constant:
524 case nir_intrinsic_load_uniform:
525 case nir_intrinsic_load_push_constant:
526 if (!nir_src_is_divergent(intrin->src[0]))
527 apply_nuw_to_ssa(ctx->shader, range_ht, intrin->src[0].ssa, &config);
528 break;
529 case nir_intrinsic_load_ubo:
530 case nir_intrinsic_load_ssbo:
531 if (!nir_src_is_divergent(intrin->src[1]))
532 apply_nuw_to_ssa(ctx->shader, range_ht, intrin->src[1].ssa, &config);
533 break;
534 case nir_intrinsic_store_ssbo:
535 if (!nir_src_is_divergent(intrin->src[2]))
536 apply_nuw_to_ssa(ctx->shader, range_ht, intrin->src[2].ssa, &config);
537 break;
538 default:
539 break;
540 }
541 }
542 }
543
544 _mesa_hash_table_destroy(range_ht, NULL);
545 }
546
547 RegClass get_reg_class(isel_context *ctx, RegType type, unsigned components, unsigned bitsize)
548 {
549 if (bitsize == 1)
550 return RegClass(RegType::sgpr, ctx->program->lane_mask.size() * components);
551 else
552 return RegClass::get(type, components * bitsize / 8u);
553 }
554
555 void init_context(isel_context *ctx, nir_shader *shader)
556 {
557 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
558 unsigned lane_mask_size = ctx->program->lane_mask.size();
559
560 ctx->shader = shader;
561 nir_divergence_analysis(shader, nir_divergence_view_index_uniform);
562
563 fill_desc_set_info(ctx, impl);
564
565 apply_nuw_to_offsets(ctx, impl);
566
567 /* sanitize control flow */
568 nir_metadata_require(impl, nir_metadata_dominance);
569 sanitize_cf_list(impl, &impl->body);
570 nir_metadata_preserve(impl, (nir_metadata)~nir_metadata_block_index);
571
572 /* we'll need this for isel */
573 nir_metadata_require(impl, nir_metadata_block_index);
574
575 if (!(ctx->stage & sw_gs_copy) && ctx->options->dump_preoptir) {
576 fprintf(stderr, "NIR shader before instruction selection:\n");
577 nir_print_shader(shader, stderr);
578 }
579
580 std::unique_ptr<Temp[]> allocated{new Temp[impl->ssa_alloc]()};
581
582 unsigned spi_ps_inputs = 0;
583
584 std::unique_ptr<unsigned[]> nir_to_aco{new unsigned[impl->num_blocks]()};
585
586 /* TODO: make this recursive to improve compile times and merge with fill_desc_set_info() */
587 bool done = false;
588 while (!done) {
589 done = true;
590 nir_foreach_block(block, impl) {
591 nir_foreach_instr(instr, block) {
592 switch(instr->type) {
593 case nir_instr_type_alu: {
594 nir_alu_instr *alu_instr = nir_instr_as_alu(instr);
595 RegType type = RegType::sgpr;
596 switch(alu_instr->op) {
597 case nir_op_fmul:
598 case nir_op_fadd:
599 case nir_op_fsub:
600 case nir_op_fmax:
601 case nir_op_fmin:
602 case nir_op_fmax3:
603 case nir_op_fmin3:
604 case nir_op_fmed3:
605 case nir_op_fneg:
606 case nir_op_fabs:
607 case nir_op_fsat:
608 case nir_op_fsign:
609 case nir_op_frcp:
610 case nir_op_frsq:
611 case nir_op_fsqrt:
612 case nir_op_fexp2:
613 case nir_op_flog2:
614 case nir_op_ffract:
615 case nir_op_ffloor:
616 case nir_op_fceil:
617 case nir_op_ftrunc:
618 case nir_op_fround_even:
619 case nir_op_fsin:
620 case nir_op_fcos:
621 case nir_op_f2f16:
622 case nir_op_f2f16_rtz:
623 case nir_op_f2f16_rtne:
624 case nir_op_f2f32:
625 case nir_op_f2f64:
626 case nir_op_u2f16:
627 case nir_op_u2f32:
628 case nir_op_u2f64:
629 case nir_op_i2f16:
630 case nir_op_i2f32:
631 case nir_op_i2f64:
632 case nir_op_pack_half_2x16:
633 case nir_op_unpack_half_2x16_split_x:
634 case nir_op_unpack_half_2x16_split_y:
635 case nir_op_fddx:
636 case nir_op_fddy:
637 case nir_op_fddx_fine:
638 case nir_op_fddy_fine:
639 case nir_op_fddx_coarse:
640 case nir_op_fddy_coarse:
641 case nir_op_fquantize2f16:
642 case nir_op_ldexp:
643 case nir_op_frexp_sig:
644 case nir_op_frexp_exp:
645 case nir_op_cube_face_index:
646 case nir_op_cube_face_coord:
647 type = RegType::vgpr;
648 break;
649 case nir_op_f2i16:
650 case nir_op_f2u16:
651 case nir_op_f2i32:
652 case nir_op_f2u32:
653 case nir_op_f2i64:
654 case nir_op_f2u64:
655 case nir_op_b2i8:
656 case nir_op_b2i16:
657 case nir_op_b2i32:
658 case nir_op_b2i64:
659 case nir_op_b2b32:
660 case nir_op_b2f16:
661 case nir_op_b2f32:
662 case nir_op_mov:
663 type = nir_dest_is_divergent(alu_instr->dest.dest) ? RegType::vgpr : RegType::sgpr;
664 break;
665 case nir_op_bcsel:
666 type = nir_dest_is_divergent(alu_instr->dest.dest) ? RegType::vgpr : RegType::sgpr;
667 /* fallthrough */
668 default:
669 for (unsigned i = 0; i < nir_op_infos[alu_instr->op].num_inputs; i++) {
670 if (allocated[alu_instr->src[i].src.ssa->index].type() == RegType::vgpr)
671 type = RegType::vgpr;
672 }
673 break;
674 }
675
676 RegClass rc = get_reg_class(ctx, type, alu_instr->dest.dest.ssa.num_components, alu_instr->dest.dest.ssa.bit_size);
677 allocated[alu_instr->dest.dest.ssa.index] = Temp(0, rc);
678 break;
679 }
680 case nir_instr_type_load_const: {
681 unsigned num_components = nir_instr_as_load_const(instr)->def.num_components;
682 unsigned bit_size = nir_instr_as_load_const(instr)->def.bit_size;
683 RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
684 allocated[nir_instr_as_load_const(instr)->def.index] = Temp(0, rc);
685 break;
686 }
687 case nir_instr_type_intrinsic: {
688 nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
689 if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest)
690 break;
691 RegType type = RegType::sgpr;
692 switch(intrinsic->intrinsic) {
693 case nir_intrinsic_load_push_constant:
694 case nir_intrinsic_load_work_group_id:
695 case nir_intrinsic_load_num_work_groups:
696 case nir_intrinsic_load_subgroup_id:
697 case nir_intrinsic_load_num_subgroups:
698 case nir_intrinsic_load_first_vertex:
699 case nir_intrinsic_load_base_instance:
700 case nir_intrinsic_get_buffer_size:
701 case nir_intrinsic_vote_all:
702 case nir_intrinsic_vote_any:
703 case nir_intrinsic_read_first_invocation:
704 case nir_intrinsic_read_invocation:
705 case nir_intrinsic_first_invocation:
706 case nir_intrinsic_ballot:
707 type = RegType::sgpr;
708 break;
709 case nir_intrinsic_load_sample_id:
710 case nir_intrinsic_load_sample_mask_in:
711 case nir_intrinsic_load_input:
712 case nir_intrinsic_load_output:
713 case nir_intrinsic_load_input_vertex:
714 case nir_intrinsic_load_per_vertex_input:
715 case nir_intrinsic_load_per_vertex_output:
716 case nir_intrinsic_load_vertex_id:
717 case nir_intrinsic_load_vertex_id_zero_base:
718 case nir_intrinsic_load_barycentric_sample:
719 case nir_intrinsic_load_barycentric_pixel:
720 case nir_intrinsic_load_barycentric_model:
721 case nir_intrinsic_load_barycentric_centroid:
722 case nir_intrinsic_load_barycentric_at_sample:
723 case nir_intrinsic_load_barycentric_at_offset:
724 case nir_intrinsic_load_interpolated_input:
725 case nir_intrinsic_load_frag_coord:
726 case nir_intrinsic_load_sample_pos:
727 case nir_intrinsic_load_layer_id:
728 case nir_intrinsic_load_local_invocation_id:
729 case nir_intrinsic_load_local_invocation_index:
730 case nir_intrinsic_load_subgroup_invocation:
731 case nir_intrinsic_load_tess_coord:
732 case nir_intrinsic_write_invocation_amd:
733 case nir_intrinsic_mbcnt_amd:
734 case nir_intrinsic_load_instance_id:
735 case nir_intrinsic_ssbo_atomic_add:
736 case nir_intrinsic_ssbo_atomic_imin:
737 case nir_intrinsic_ssbo_atomic_umin:
738 case nir_intrinsic_ssbo_atomic_imax:
739 case nir_intrinsic_ssbo_atomic_umax:
740 case nir_intrinsic_ssbo_atomic_and:
741 case nir_intrinsic_ssbo_atomic_or:
742 case nir_intrinsic_ssbo_atomic_xor:
743 case nir_intrinsic_ssbo_atomic_exchange:
744 case nir_intrinsic_ssbo_atomic_comp_swap:
745 case nir_intrinsic_global_atomic_add:
746 case nir_intrinsic_global_atomic_imin:
747 case nir_intrinsic_global_atomic_umin:
748 case nir_intrinsic_global_atomic_imax:
749 case nir_intrinsic_global_atomic_umax:
750 case nir_intrinsic_global_atomic_and:
751 case nir_intrinsic_global_atomic_or:
752 case nir_intrinsic_global_atomic_xor:
753 case nir_intrinsic_global_atomic_exchange:
754 case nir_intrinsic_global_atomic_comp_swap:
755 case nir_intrinsic_image_deref_atomic_add:
756 case nir_intrinsic_image_deref_atomic_umin:
757 case nir_intrinsic_image_deref_atomic_imin:
758 case nir_intrinsic_image_deref_atomic_umax:
759 case nir_intrinsic_image_deref_atomic_imax:
760 case nir_intrinsic_image_deref_atomic_and:
761 case nir_intrinsic_image_deref_atomic_or:
762 case nir_intrinsic_image_deref_atomic_xor:
763 case nir_intrinsic_image_deref_atomic_exchange:
764 case nir_intrinsic_image_deref_atomic_comp_swap:
765 case nir_intrinsic_image_deref_size:
766 case nir_intrinsic_shared_atomic_add:
767 case nir_intrinsic_shared_atomic_imin:
768 case nir_intrinsic_shared_atomic_umin:
769 case nir_intrinsic_shared_atomic_imax:
770 case nir_intrinsic_shared_atomic_umax:
771 case nir_intrinsic_shared_atomic_and:
772 case nir_intrinsic_shared_atomic_or:
773 case nir_intrinsic_shared_atomic_xor:
774 case nir_intrinsic_shared_atomic_exchange:
775 case nir_intrinsic_shared_atomic_comp_swap:
776 case nir_intrinsic_shared_atomic_fadd:
777 case nir_intrinsic_load_scratch:
778 case nir_intrinsic_load_invocation_id:
779 case nir_intrinsic_load_primitive_id:
780 type = RegType::vgpr;
781 break;
782 case nir_intrinsic_shuffle:
783 case nir_intrinsic_quad_broadcast:
784 case nir_intrinsic_quad_swap_horizontal:
785 case nir_intrinsic_quad_swap_vertical:
786 case nir_intrinsic_quad_swap_diagonal:
787 case nir_intrinsic_quad_swizzle_amd:
788 case nir_intrinsic_masked_swizzle_amd:
789 case nir_intrinsic_inclusive_scan:
790 case nir_intrinsic_exclusive_scan:
791 case nir_intrinsic_reduce:
792 case nir_intrinsic_load_ubo:
793 case nir_intrinsic_load_ssbo:
794 case nir_intrinsic_load_global:
795 case nir_intrinsic_vulkan_resource_index:
796 case nir_intrinsic_load_shared:
797 type = nir_dest_is_divergent(intrinsic->dest) ? RegType::vgpr : RegType::sgpr;
798 break;
799 case nir_intrinsic_load_view_index:
800 type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
801 break;
802 default:
803 for (unsigned i = 0; i < nir_intrinsic_infos[intrinsic->intrinsic].num_srcs; i++) {
804 if (allocated[intrinsic->src[i].ssa->index].type() == RegType::vgpr)
805 type = RegType::vgpr;
806 }
807 break;
808 }
809 RegClass rc = get_reg_class(ctx, type, intrinsic->dest.ssa.num_components, intrinsic->dest.ssa.bit_size);
810 allocated[intrinsic->dest.ssa.index] = Temp(0, rc);
811
812 switch(intrinsic->intrinsic) {
813 case nir_intrinsic_load_barycentric_sample:
814 case nir_intrinsic_load_barycentric_pixel:
815 case nir_intrinsic_load_barycentric_centroid:
816 case nir_intrinsic_load_barycentric_at_sample:
817 case nir_intrinsic_load_barycentric_at_offset: {
818 glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(intrinsic);
819 spi_ps_inputs |= get_interp_input(intrinsic->intrinsic, mode);
820 break;
821 }
822 case nir_intrinsic_load_barycentric_model:
823 spi_ps_inputs |= S_0286CC_PERSP_PULL_MODEL_ENA(1);
824 break;
825 case nir_intrinsic_load_front_face:
826 spi_ps_inputs |= S_0286CC_FRONT_FACE_ENA(1);
827 break;
828 case nir_intrinsic_load_frag_coord:
829 case nir_intrinsic_load_sample_pos: {
830 uint8_t mask = nir_ssa_def_components_read(&intrinsic->dest.ssa);
831 for (unsigned i = 0; i < 4; i++) {
832 if (mask & (1 << i))
833 spi_ps_inputs |= S_0286CC_POS_X_FLOAT_ENA(1) << i;
834
835 }
836 break;
837 }
838 case nir_intrinsic_load_sample_id:
839 spi_ps_inputs |= S_0286CC_ANCILLARY_ENA(1);
840 break;
841 case nir_intrinsic_load_sample_mask_in:
842 spi_ps_inputs |= S_0286CC_ANCILLARY_ENA(1);
843 spi_ps_inputs |= S_0286CC_SAMPLE_COVERAGE_ENA(1);
844 break;
845 default:
846 break;
847 }
848 break;
849 }
850 case nir_instr_type_tex: {
851 nir_tex_instr* tex = nir_instr_as_tex(instr);
852 unsigned size = tex->dest.ssa.num_components;
853
854 if (tex->dest.ssa.bit_size == 64)
855 size *= 2;
856 if (tex->op == nir_texop_texture_samples) {
857 assert(!tex->dest.ssa.divergent);
858 }
859 if (nir_dest_is_divergent(tex->dest))
860 allocated[tex->dest.ssa.index] = Temp(0, RegClass(RegType::vgpr, size));
861 else
862 allocated[tex->dest.ssa.index] = Temp(0, RegClass(RegType::sgpr, size));
863 break;
864 }
865 case nir_instr_type_parallel_copy: {
866 nir_foreach_parallel_copy_entry(entry, nir_instr_as_parallel_copy(instr)) {
867 allocated[entry->dest.ssa.index] = allocated[entry->src.ssa->index];
868 }
869 break;
870 }
871 case nir_instr_type_ssa_undef: {
872 unsigned num_components = nir_instr_as_ssa_undef(instr)->def.num_components;
873 unsigned bit_size = nir_instr_as_ssa_undef(instr)->def.bit_size;
874 RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
875 allocated[nir_instr_as_ssa_undef(instr)->def.index] = Temp(0, rc);
876 break;
877 }
878 case nir_instr_type_phi: {
879 nir_phi_instr* phi = nir_instr_as_phi(instr);
880 RegType type;
881 unsigned size = phi->dest.ssa.num_components;
882
883 if (phi->dest.ssa.bit_size == 1) {
884 assert(size == 1 && "multiple components not yet supported on boolean phis.");
885 type = RegType::sgpr;
886 size *= lane_mask_size;
887 allocated[phi->dest.ssa.index] = Temp(0, RegClass(type, size));
888 break;
889 }
890
891 if (nir_dest_is_divergent(phi->dest)) {
892 type = RegType::vgpr;
893 } else {
894 type = RegType::sgpr;
895 nir_foreach_phi_src (src, phi) {
896 if (allocated[src->src.ssa->index].type() == RegType::vgpr)
897 type = RegType::vgpr;
898 if (allocated[src->src.ssa->index].type() == RegType::none)
899 done = false;
900 }
901 }
902
903 RegClass rc = get_reg_class(ctx, type, phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
904 if (rc != allocated[phi->dest.ssa.index].regClass()) {
905 done = false;
906 } else {
907 nir_foreach_phi_src(src, phi)
908 assert(allocated[src->src.ssa->index].size() == rc.size());
909 }
910 allocated[phi->dest.ssa.index] = Temp(0, rc);
911 break;
912 }
913 default:
914 break;
915 }
916 }
917 }
918 }
919
920 if (G_0286CC_POS_W_FLOAT_ENA(spi_ps_inputs)) {
921 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
922 spi_ps_inputs |= S_0286CC_PERSP_CENTER_ENA(1);
923 }
924
925 if (!(spi_ps_inputs & 0x7F)) {
926 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
927 spi_ps_inputs |= S_0286CC_PERSP_CENTER_ENA(1);
928 }
929
930 ctx->program->config->spi_ps_input_ena = spi_ps_inputs;
931 ctx->program->config->spi_ps_input_addr = spi_ps_inputs;
932
933 for (unsigned i = 0; i < impl->ssa_alloc; i++)
934 allocated[i] = Temp(ctx->program->allocateId(), allocated[i].regClass());
935
936 ctx->allocated.reset(allocated.release());
937 ctx->cf_info.nir_to_aco.reset(nir_to_aco.release());
938 }
939
940 Pseudo_instruction *add_startpgm(struct isel_context *ctx)
941 {
942 unsigned arg_count = ctx->args->ac.arg_count;
943 if (ctx->stage == fragment_fs) {
944 /* LLVM optimizes away unused FS inputs and computes spi_ps_input_addr
945 * itself and then communicates the results back via the ELF binary.
946 * Mirror what LLVM does by re-mapping the VGPR arguments here.
947 *
948 * TODO: If we made the FS input scanning code into a separate pass that
949 * could run before argument setup, then this wouldn't be necessary
950 * anymore.
951 */
952 struct ac_shader_args *args = &ctx->args->ac;
953 arg_count = 0;
954 for (unsigned i = 0, vgpr_arg = 0, vgpr_reg = 0; i < args->arg_count; i++) {
955 if (args->args[i].file != AC_ARG_VGPR) {
956 arg_count++;
957 continue;
958 }
959
960 if (!(ctx->program->config->spi_ps_input_addr & (1 << vgpr_arg))) {
961 args->args[i].skip = true;
962 } else {
963 args->args[i].offset = vgpr_reg;
964 vgpr_reg += args->args[i].size;
965 arg_count++;
966 }
967 vgpr_arg++;
968 }
969 }
970
971 aco_ptr<Pseudo_instruction> startpgm{create_instruction<Pseudo_instruction>(aco_opcode::p_startpgm, Format::PSEUDO, 0, arg_count + 1)};
972 for (unsigned i = 0, arg = 0; i < ctx->args->ac.arg_count; i++) {
973 if (ctx->args->ac.args[i].skip)
974 continue;
975
976 enum ac_arg_regfile file = ctx->args->ac.args[i].file;
977 unsigned size = ctx->args->ac.args[i].size;
978 unsigned reg = ctx->args->ac.args[i].offset;
979 RegClass type = RegClass(file == AC_ARG_SGPR ? RegType::sgpr : RegType::vgpr, size);
980 Temp dst = Temp{ctx->program->allocateId(), type};
981 ctx->arg_temps[i] = dst;
982 startpgm->definitions[arg] = Definition(dst);
983 startpgm->definitions[arg].setFixed(PhysReg{file == AC_ARG_SGPR ? reg : reg + 256});
984 arg++;
985 }
986 startpgm->definitions[arg_count] = Definition{ctx->program->allocateId(), exec, ctx->program->lane_mask};
987 Pseudo_instruction *instr = startpgm.get();
988 ctx->block->instructions.push_back(std::move(startpgm));
989
990 /* Stash these in the program so that they can be accessed later when
991 * handling spilling.
992 */
993 ctx->program->private_segment_buffer = get_arg(ctx, ctx->args->ring_offsets);
994 ctx->program->scratch_offset = get_arg(ctx, ctx->args->scratch_offset);
995
996 return instr;
997 }
998
999 int
1000 type_size(const struct glsl_type *type, bool bindless)
1001 {
1002 // TODO: don't we need type->std430_base_alignment() here?
1003 return glsl_count_attribute_slots(type, false);
1004 }
1005
1006 void
1007 shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
1008 {
1009 assert(glsl_type_is_vector_or_scalar(type));
1010
1011 uint32_t comp_size = glsl_type_is_boolean(type)
1012 ? 4 : glsl_get_bit_size(type) / 8;
1013 unsigned length = glsl_get_vector_elements(type);
1014 *size = comp_size * length,
1015 *align = comp_size;
1016 }
1017
1018 static bool
1019 mem_vectorize_callback(unsigned align, unsigned bit_size,
1020 unsigned num_components, unsigned high_offset,
1021 nir_intrinsic_instr *low, nir_intrinsic_instr *high)
1022 {
1023 if (num_components > 4)
1024 return false;
1025
1026 /* >128 bit loads are split except with SMEM */
1027 if (bit_size * num_components > 128)
1028 return false;
1029
1030 switch (low->intrinsic) {
1031 case nir_intrinsic_load_global:
1032 case nir_intrinsic_store_global:
1033 case nir_intrinsic_store_ssbo:
1034 case nir_intrinsic_load_ssbo:
1035 case nir_intrinsic_load_ubo:
1036 case nir_intrinsic_load_push_constant:
1037 return align % (bit_size == 8 ? 2 : 4) == 0;
1038 case nir_intrinsic_load_deref:
1039 case nir_intrinsic_store_deref:
1040 assert(nir_src_as_deref(low->src[0])->mode == nir_var_mem_shared);
1041 /* fallthrough */
1042 case nir_intrinsic_load_shared:
1043 case nir_intrinsic_store_shared:
1044 if (bit_size * num_components > 64) /* 96 and 128 bit loads require 128 bit alignment and are split otherwise */
1045 return align % 16 == 0;
1046 else
1047 return align % (bit_size == 8 ? 2 : 4) == 0;
1048 default:
1049 return false;
1050 }
1051 return false;
1052 }
1053
1054 void
1055 setup_vs_output_info(isel_context *ctx, nir_shader *nir,
1056 bool export_prim_id, bool export_clip_dists,
1057 radv_vs_output_info *outinfo)
1058 {
1059 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
1060 sizeof(outinfo->vs_output_param_offset));
1061
1062 outinfo->param_exports = 0;
1063 int pos_written = 0x1;
1064 if (outinfo->writes_pointsize || outinfo->writes_viewport_index || outinfo->writes_layer)
1065 pos_written |= 1 << 1;
1066
1067 uint64_t mask = nir->info.outputs_written;
1068 while (mask) {
1069 int idx = u_bit_scan64(&mask);
1070 if (idx >= VARYING_SLOT_VAR0 || idx == VARYING_SLOT_LAYER ||
1071 idx == VARYING_SLOT_PRIMITIVE_ID || idx == VARYING_SLOT_VIEWPORT ||
1072 ((idx == VARYING_SLOT_CLIP_DIST0 || idx == VARYING_SLOT_CLIP_DIST1) && export_clip_dists)) {
1073 if (outinfo->vs_output_param_offset[idx] == AC_EXP_PARAM_UNDEFINED)
1074 outinfo->vs_output_param_offset[idx] = outinfo->param_exports++;
1075 }
1076 }
1077 if (outinfo->writes_layer &&
1078 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] == AC_EXP_PARAM_UNDEFINED) {
1079 /* when ctx->options->key.has_multiview_view_index = true, the layer
1080 * variable isn't declared in NIR and it's isel's job to get the layer */
1081 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] = outinfo->param_exports++;
1082 }
1083
1084 if (export_prim_id) {
1085 assert(outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] == AC_EXP_PARAM_UNDEFINED);
1086 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = outinfo->param_exports++;
1087 }
1088
1089 ctx->export_clip_dists = export_clip_dists;
1090 ctx->num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
1091 ctx->num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
1092
1093 assert(ctx->num_clip_distances + ctx->num_cull_distances <= 8);
1094
1095 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
1096 pos_written |= 1 << 2;
1097 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
1098 pos_written |= 1 << 3;
1099
1100 outinfo->pos_exports = util_bitcount(pos_written);
1101 }
1102
1103 void
1104 setup_vs_variables(isel_context *ctx, nir_shader *nir)
1105 {
1106 nir_foreach_shader_in_variable(variable, nir)
1107 {
1108 variable->data.driver_location = variable->data.location * 4;
1109 }
1110 nir_foreach_shader_out_variable(variable, nir)
1111 {
1112 if (ctx->stage == vertex_vs || ctx->stage == ngg_vertex_gs)
1113 variable->data.driver_location = variable->data.location * 4;
1114
1115 assert(variable->data.location >= 0 && variable->data.location <= UINT8_MAX);
1116 ctx->output_drv_loc_to_var_slot[MESA_SHADER_VERTEX][variable->data.driver_location / 4] = variable->data.location;
1117 }
1118
1119 if (ctx->stage == vertex_vs || ctx->stage == ngg_vertex_gs) {
1120 radv_vs_output_info *outinfo = &ctx->program->info->vs.outinfo;
1121 setup_vs_output_info(ctx, nir, outinfo->export_prim_id,
1122 ctx->options->key.vs_common_out.export_clip_dists, outinfo);
1123 } else if (ctx->stage == vertex_ls) {
1124 ctx->tcs_num_inputs = ctx->program->info->vs.num_linked_outputs;
1125 }
1126
1127 if (ctx->stage == ngg_vertex_gs && ctx->args->options->key.vs_common_out.export_prim_id) {
1128 /* We need to store the primitive IDs in LDS */
1129 unsigned lds_size = ctx->program->info->ngg_info.esgs_ring_size;
1130 ctx->program->config->lds_size = (lds_size + ctx->program->lds_alloc_granule - 1) /
1131 ctx->program->lds_alloc_granule;
1132 }
1133 }
1134
1135 void setup_gs_variables(isel_context *ctx, nir_shader *nir)
1136 {
1137 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs)
1138 ctx->program->config->lds_size = ctx->program->info->gs_ring_info.lds_size; /* Already in units of the alloc granularity */
1139
1140 nir_foreach_shader_out_variable(variable, nir) {
1141 variable->data.driver_location = variable->data.location * 4;
1142 }
1143
1144 if (ctx->stage == vertex_geometry_gs)
1145 ctx->program->info->gs.es_type = MESA_SHADER_VERTEX;
1146 else if (ctx->stage == tess_eval_geometry_gs)
1147 ctx->program->info->gs.es_type = MESA_SHADER_TESS_EVAL;
1148 }
1149
1150 void
1151 setup_tcs_info(isel_context *ctx, nir_shader *nir, nir_shader *vs)
1152 {
1153 /* When the number of TCS input and output vertices are the same (typically 3):
1154 * - There is an equal amount of LS and HS invocations
1155 * - In case of merged LSHS shaders, the LS and HS halves of the shader
1156 * always process the exact same vertex. We can use this knowledge to optimize them.
1157 *
1158 * We don't set tcs_in_out_eq if the float controls differ because that might
1159 * involve different float modes for the same block and our optimizer
1160 * doesn't handle a instruction dominating another with a different mode.
1161 */
1162 ctx->tcs_in_out_eq =
1163 ctx->stage == vertex_tess_control_hs &&
1164 ctx->args->options->key.tcs.input_vertices == nir->info.tess.tcs_vertices_out &&
1165 vs->info.float_controls_execution_mode == nir->info.float_controls_execution_mode;
1166
1167 if (ctx->tcs_in_out_eq) {
1168 ctx->tcs_temp_only_inputs = ~nir->info.tess.tcs_cross_invocation_inputs_read &
1169 ~nir->info.inputs_read_indirectly &
1170 nir->info.inputs_read;
1171 }
1172
1173 ctx->tcs_num_inputs = ctx->program->info->tcs.num_linked_inputs;
1174 ctx->tcs_num_outputs = ctx->program->info->tcs.num_linked_outputs;
1175 ctx->tcs_num_patch_outputs = ctx->program->info->tcs.num_linked_patch_outputs;
1176
1177 ctx->tcs_num_patches = get_tcs_num_patches(
1178 ctx->args->options->key.tcs.input_vertices,
1179 nir->info.tess.tcs_vertices_out,
1180 ctx->tcs_num_inputs,
1181 ctx->tcs_num_outputs,
1182 ctx->tcs_num_patch_outputs,
1183 ctx->args->options->tess_offchip_block_dw_size,
1184 ctx->args->options->chip_class,
1185 ctx->args->options->family);
1186 unsigned lds_size = calculate_tess_lds_size(
1187 ctx->args->options->chip_class,
1188 ctx->args->options->key.tcs.input_vertices,
1189 nir->info.tess.tcs_vertices_out,
1190 ctx->tcs_num_inputs,
1191 ctx->tcs_num_patches,
1192 ctx->tcs_num_outputs,
1193 ctx->tcs_num_patch_outputs);
1194
1195 ctx->args->shader_info->tcs.num_patches = ctx->tcs_num_patches;
1196 ctx->args->shader_info->tcs.num_lds_blocks = lds_size;
1197 ctx->program->config->lds_size = (lds_size + ctx->program->lds_alloc_granule - 1) /
1198 ctx->program->lds_alloc_granule;
1199 }
1200
1201 void
1202 setup_tcs_variables(isel_context *ctx, nir_shader *nir)
1203 {
1204 nir_foreach_shader_out_variable(variable, nir) {
1205 assert(variable->data.location >= 0 && variable->data.location <= UINT8_MAX);
1206
1207 if (variable->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
1208 ctx->tcs_tess_lvl_out_loc = variable->data.driver_location * 4u;
1209 else if (variable->data.location == VARYING_SLOT_TESS_LEVEL_INNER)
1210 ctx->tcs_tess_lvl_in_loc = variable->data.driver_location * 4u;
1211
1212 if (variable->data.patch)
1213 ctx->output_tcs_patch_drv_loc_to_var_slot[variable->data.driver_location / 4] = variable->data.location;
1214 else
1215 ctx->output_drv_loc_to_var_slot[MESA_SHADER_TESS_CTRL][variable->data.driver_location / 4] = variable->data.location;
1216 }
1217 }
1218
1219 void
1220 setup_tes_variables(isel_context *ctx, nir_shader *nir)
1221 {
1222 ctx->tcs_num_patches = ctx->args->options->key.tes.num_patches;
1223 ctx->tcs_num_outputs = ctx->program->info->tes.num_linked_inputs;
1224
1225 nir_foreach_shader_out_variable(variable, nir) {
1226 if (ctx->stage == tess_eval_vs || ctx->stage == ngg_tess_eval_gs)
1227 variable->data.driver_location = variable->data.location * 4;
1228 }
1229
1230 if (ctx->stage == tess_eval_vs || ctx->stage == ngg_tess_eval_gs) {
1231 radv_vs_output_info *outinfo = &ctx->program->info->tes.outinfo;
1232 setup_vs_output_info(ctx, nir, outinfo->export_prim_id,
1233 ctx->options->key.vs_common_out.export_clip_dists, outinfo);
1234 }
1235 }
1236
1237 void
1238 setup_variables(isel_context *ctx, nir_shader *nir)
1239 {
1240 switch (nir->info.stage) {
1241 case MESA_SHADER_FRAGMENT: {
1242 nir_foreach_shader_out_variable(variable, nir)
1243 {
1244 int idx = variable->data.location + variable->data.index;
1245 variable->data.driver_location = idx * 4;
1246 }
1247 break;
1248 }
1249 case MESA_SHADER_COMPUTE: {
1250 ctx->program->config->lds_size = (nir->info.cs.shared_size + ctx->program->lds_alloc_granule - 1) /
1251 ctx->program->lds_alloc_granule;
1252 break;
1253 }
1254 case MESA_SHADER_VERTEX: {
1255 setup_vs_variables(ctx, nir);
1256 break;
1257 }
1258 case MESA_SHADER_GEOMETRY: {
1259 setup_gs_variables(ctx, nir);
1260 break;
1261 }
1262 case MESA_SHADER_TESS_CTRL: {
1263 setup_tcs_variables(ctx, nir);
1264 break;
1265 }
1266 case MESA_SHADER_TESS_EVAL: {
1267 setup_tes_variables(ctx, nir);
1268 break;
1269 }
1270 default:
1271 unreachable("Unhandled shader stage.");
1272 }
1273 }
1274
1275 unsigned
1276 lower_bit_size_callback(const nir_alu_instr *alu, void *_)
1277 {
1278 if (nir_op_is_vec(alu->op))
1279 return 0;
1280
1281 unsigned bit_size = alu->dest.dest.ssa.bit_size;
1282 if (nir_alu_instr_is_comparison(alu))
1283 bit_size = nir_src_bit_size(alu->src[0].src);
1284
1285 if (bit_size >= 32 || bit_size == 1)
1286 return 0;
1287
1288 if (alu->op == nir_op_bcsel)
1289 return 0;
1290
1291 const nir_op_info *info = &nir_op_infos[alu->op];
1292
1293 if (info->is_conversion)
1294 return 0;
1295
1296 bool is_integer = info->output_type & (nir_type_uint | nir_type_int);
1297 for (unsigned i = 0; is_integer && (i < info->num_inputs); i++)
1298 is_integer = info->input_types[i] & (nir_type_uint | nir_type_int);
1299
1300 return is_integer ? 32 : 0;
1301 }
1302
1303 void
1304 setup_nir(isel_context *ctx, nir_shader *nir)
1305 {
1306 Program *program = ctx->program;
1307
1308 /* align and copy constant data */
1309 while (program->constant_data.size() % 4u)
1310 program->constant_data.push_back(0);
1311 ctx->constant_data_offset = program->constant_data.size();
1312 program->constant_data.insert(program->constant_data.end(),
1313 (uint8_t*)nir->constant_data,
1314 (uint8_t*)nir->constant_data + nir->constant_data_size);
1315
1316 /* the variable setup has to be done before lower_io / CSE */
1317 setup_variables(ctx, nir);
1318
1319 /* optimize and lower memory operations */
1320 if (nir_lower_explicit_io(nir, nir_var_mem_global, nir_address_format_64bit_global)) {
1321 nir_opt_constant_folding(nir);
1322 nir_opt_cse(nir);
1323 }
1324
1325 bool lower_to_scalar = false;
1326 bool lower_pack = false;
1327 nir_variable_mode robust_modes = (nir_variable_mode)0;
1328
1329 if (ctx->options->robust_buffer_access) {
1330 robust_modes = (nir_variable_mode)(nir_var_mem_ubo |
1331 nir_var_mem_ssbo |
1332 nir_var_mem_global |
1333 nir_var_mem_push_const);
1334 }
1335
1336 if (nir_opt_load_store_vectorize(nir,
1337 (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
1338 nir_var_mem_push_const | nir_var_mem_shared |
1339 nir_var_mem_global),
1340 mem_vectorize_callback, robust_modes)) {
1341 lower_to_scalar = true;
1342 lower_pack = true;
1343 }
1344 if (nir->info.stage != MESA_SHADER_COMPUTE)
1345 nir_lower_io(nir, (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out), type_size, (nir_lower_io_options)0);
1346
1347 lower_to_scalar |= nir_opt_shrink_vectors(nir);
1348
1349 if (lower_to_scalar)
1350 nir_lower_alu_to_scalar(nir, NULL, NULL);
1351 if (lower_pack)
1352 nir_lower_pack(nir);
1353
1354 /* lower ALU operations */
1355 nir_lower_int64(nir);
1356
1357 if (nir_lower_bit_size(nir, lower_bit_size_callback, NULL))
1358 nir_copy_prop(nir); /* allow nir_opt_idiv_const() to optimize lowered divisions */
1359
1360 nir_opt_idiv_const(nir, 32);
1361 nir_lower_idiv(nir, nir_lower_idiv_precise);
1362
1363 /* optimize the lowered ALU operations */
1364 bool more_algebraic = true;
1365 while (more_algebraic) {
1366 more_algebraic = false;
1367 NIR_PASS_V(nir, nir_copy_prop);
1368 NIR_PASS_V(nir, nir_opt_dce);
1369 NIR_PASS_V(nir, nir_opt_constant_folding);
1370 NIR_PASS(more_algebraic, nir, nir_opt_algebraic);
1371 }
1372
1373 /* Do late algebraic optimization to turn add(a, neg(b)) back into
1374 * subs, then the mandatory cleanup after algebraic. Note that it may
1375 * produce fnegs, and if so then we need to keep running to squash
1376 * fneg(fneg(a)).
1377 */
1378 bool more_late_algebraic = true;
1379 while (more_late_algebraic) {
1380 more_late_algebraic = false;
1381 NIR_PASS(more_late_algebraic, nir, nir_opt_algebraic_late);
1382 NIR_PASS_V(nir, nir_opt_constant_folding);
1383 NIR_PASS_V(nir, nir_copy_prop);
1384 NIR_PASS_V(nir, nir_opt_dce);
1385 NIR_PASS_V(nir, nir_opt_cse);
1386 }
1387
1388 /* cleanup passes */
1389 nir_lower_load_const_to_scalar(nir);
1390 nir_move_options move_opts = (nir_move_options)(
1391 nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
1392 nir_move_comparisons | nir_move_copies);
1393 nir_opt_sink(nir, move_opts);
1394 nir_opt_move(nir, move_opts);
1395 nir_convert_to_lcssa(nir, true, false);
1396 nir_lower_phis_to_scalar(nir);
1397
1398 nir_function_impl *func = nir_shader_get_entrypoint(nir);
1399 nir_index_ssa_defs(func);
1400 }
1401
1402 void
1403 setup_xnack(Program *program)
1404 {
1405 switch (program->family) {
1406 /* GFX8 APUs */
1407 case CHIP_CARRIZO:
1408 case CHIP_STONEY:
1409 /* GFX9 APUS */
1410 case CHIP_RAVEN:
1411 case CHIP_RAVEN2:
1412 case CHIP_RENOIR:
1413 program->xnack_enabled = true;
1414 break;
1415 default:
1416 break;
1417 }
1418 }
1419
1420 isel_context
1421 setup_isel_context(Program* program,
1422 unsigned shader_count,
1423 struct nir_shader *const *shaders,
1424 ac_shader_config* config,
1425 struct radv_shader_args *args,
1426 bool is_gs_copy_shader)
1427 {
1428 Stage stage = 0;
1429 for (unsigned i = 0; i < shader_count; i++) {
1430 switch (shaders[i]->info.stage) {
1431 case MESA_SHADER_VERTEX:
1432 stage |= sw_vs;
1433 break;
1434 case MESA_SHADER_TESS_CTRL:
1435 stage |= sw_tcs;
1436 break;
1437 case MESA_SHADER_TESS_EVAL:
1438 stage |= sw_tes;
1439 break;
1440 case MESA_SHADER_GEOMETRY:
1441 stage |= is_gs_copy_shader ? sw_gs_copy : sw_gs;
1442 break;
1443 case MESA_SHADER_FRAGMENT:
1444 stage |= sw_fs;
1445 break;
1446 case MESA_SHADER_COMPUTE:
1447 stage |= sw_cs;
1448 break;
1449 default:
1450 unreachable("Shader stage not implemented");
1451 }
1452 }
1453 bool gfx9_plus = args->options->chip_class >= GFX9;
1454 bool ngg = args->shader_info->is_ngg && args->options->chip_class >= GFX10;
1455 if (stage == sw_vs && args->shader_info->vs.as_es && !ngg)
1456 stage |= hw_es;
1457 else if (stage == sw_vs && !args->shader_info->vs.as_ls && !ngg)
1458 stage |= hw_vs;
1459 else if (stage == sw_vs && ngg)
1460 stage |= hw_ngg_gs; /* GFX10/NGG: VS without GS uses the HW GS stage */
1461 else if (stage == sw_gs)
1462 stage |= hw_gs;
1463 else if (stage == sw_fs)
1464 stage |= hw_fs;
1465 else if (stage == sw_cs)
1466 stage |= hw_cs;
1467 else if (stage == sw_gs_copy)
1468 stage |= hw_vs;
1469 else if (stage == (sw_vs | sw_gs) && gfx9_plus && !ngg)
1470 stage |= hw_gs;
1471 else if (stage == sw_vs && args->shader_info->vs.as_ls)
1472 stage |= hw_ls; /* GFX6-8: VS is a Local Shader, when tessellation is used */
1473 else if (stage == sw_tcs)
1474 stage |= hw_hs; /* GFX6-8: TCS is a Hull Shader */
1475 else if (stage == (sw_vs | sw_tcs))
1476 stage |= hw_hs; /* GFX9-10: VS+TCS merged into a Hull Shader */
1477 else if (stage == sw_tes && !args->shader_info->tes.as_es && !ngg)
1478 stage |= hw_vs; /* GFX6-9: TES without GS uses the HW VS stage (and GFX10/legacy) */
1479 else if (stage == sw_tes && !args->shader_info->tes.as_es && ngg)
1480 stage |= hw_ngg_gs; /* GFX10/NGG: TES without GS uses the HW GS stage */
1481 else if (stage == sw_tes && args->shader_info->tes.as_es && !ngg)
1482 stage |= hw_es; /* GFX6-8: TES is an Export Shader */
1483 else if (stage == (sw_tes | sw_gs) && gfx9_plus && !ngg)
1484 stage |= hw_gs; /* GFX9: TES+GS merged into a GS (and GFX10/legacy) */
1485 else
1486 unreachable("Shader stage not implemented");
1487
1488 init_program(program, stage, args->shader_info,
1489 args->options->chip_class, args->options->family, config);
1490
1491 isel_context ctx = {};
1492 ctx.program = program;
1493 ctx.args = args;
1494 ctx.options = args->options;
1495 ctx.stage = program->stage;
1496
1497 /* TODO: Check if we need to adjust min_waves for unknown workgroup sizes. */
1498 if (program->stage & (hw_vs | hw_fs)) {
1499 /* PS and legacy VS have separate waves, no workgroups */
1500 program->workgroup_size = program->wave_size;
1501 } else if (program->stage == compute_cs) {
1502 /* CS sets the workgroup size explicitly */
1503 unsigned* bsize = program->info->cs.block_size;
1504 program->workgroup_size = bsize[0] * bsize[1] * bsize[2];
1505 } else if ((program->stage & hw_es) || program->stage == geometry_gs) {
1506 /* Unmerged ESGS operate in workgroups if on-chip GS (LDS rings) are enabled on GFX7-8 (not implemented in Mesa) */
1507 program->workgroup_size = program->wave_size;
1508 } else if (program->stage & hw_gs) {
1509 /* If on-chip GS (LDS rings) are enabled on GFX9 or later, merged GS operates in workgroups */
1510 assert(program->chip_class >= GFX9);
1511 uint32_t es_verts_per_subgrp = G_028A44_ES_VERTS_PER_SUBGRP(program->info->gs_ring_info.vgt_gs_onchip_cntl);
1512 uint32_t gs_instr_prims_in_subgrp = G_028A44_GS_INST_PRIMS_IN_SUBGRP(program->info->gs_ring_info.vgt_gs_onchip_cntl);
1513 uint32_t workgroup_size = MAX2(es_verts_per_subgrp, gs_instr_prims_in_subgrp);
1514 program->workgroup_size = MAX2(MIN2(workgroup_size, 256), 1);
1515 } else if (program->stage == vertex_ls) {
1516 /* Unmerged LS operates in workgroups */
1517 program->workgroup_size = UINT_MAX; /* TODO: probably tcs_num_patches * tcs_vertices_in, but those are not plumbed to ACO for LS */
1518 } else if (program->stage == tess_control_hs) {
1519 /* Unmerged HS operates in workgroups, size is determined by the output vertices */
1520 setup_tcs_info(&ctx, shaders[0], NULL);
1521 program->workgroup_size = ctx.tcs_num_patches * shaders[0]->info.tess.tcs_vertices_out;
1522 } else if (program->stage == vertex_tess_control_hs) {
1523 /* Merged LSHS operates in workgroups, but can still have a different number of LS and HS invocations */
1524 setup_tcs_info(&ctx, shaders[1], shaders[0]);
1525 program->workgroup_size = ctx.tcs_num_patches * MAX2(shaders[1]->info.tess.tcs_vertices_out, ctx.args->options->key.tcs.input_vertices);
1526 } else if (program->stage & hw_ngg_gs) {
1527 /* TODO: Calculate workgroup size of NGG shaders. */
1528 program->workgroup_size = UINT_MAX;
1529 } else {
1530 unreachable("Unsupported shader stage.");
1531 }
1532
1533 calc_min_waves(program);
1534 program->vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
1535 program->sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
1536
1537 unsigned scratch_size = 0;
1538 if (program->stage == gs_copy_vs) {
1539 assert(shader_count == 1);
1540 setup_vs_output_info(&ctx, shaders[0], false, true, &args->shader_info->vs.outinfo);
1541 } else {
1542 for (unsigned i = 0; i < shader_count; i++) {
1543 nir_shader *nir = shaders[i];
1544 setup_nir(&ctx, nir);
1545 }
1546
1547 for (unsigned i = 0; i < shader_count; i++)
1548 scratch_size = std::max(scratch_size, shaders[i]->scratch_size);
1549 }
1550
1551 ctx.program->config->scratch_bytes_per_wave = align(scratch_size * ctx.program->wave_size, 1024);
1552
1553 ctx.block = ctx.program->create_and_insert_block();
1554 ctx.block->loop_nest_depth = 0;
1555 ctx.block->kind = block_kind_top_level;
1556
1557 setup_xnack(program);
1558 program->sram_ecc_enabled = args->options->family == CHIP_ARCTURUS;
1559 /* apparently gfx702 also has fast v_fma_f32 but I can't find a family for that */
1560 program->has_fast_fma32 = program->chip_class >= GFX9;
1561 if (args->options->family == CHIP_TAHITI || args->options->family == CHIP_CARRIZO || args->options->family == CHIP_HAWAII)
1562 program->has_fast_fma32 = true;
1563
1564 return ctx;
1565 }
1566
1567 }