aco/wave32: Use lane mask regclass for exec/vcc.
[mesa.git] / src / amd / compiler / aco_instruction_selection_setup.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <array>
26 #include <unordered_map>
27 #include "aco_ir.h"
28 #include "nir.h"
29 #include "vulkan/radv_shader.h"
30 #include "vulkan/radv_descriptor_set.h"
31 #include "vulkan/radv_shader_args.h"
32 #include "sid.h"
33 #include "ac_exp_param.h"
34 #include "ac_shader_util.h"
35
36 #include "util/u_math.h"
37
38 #define MAX_INLINE_PUSH_CONSTS 8
39
40 namespace aco {
41
42 struct vs_output_state {
43 uint8_t mask[VARYING_SLOT_VAR31 + 1];
44 Temp outputs[VARYING_SLOT_VAR31 + 1][4];
45 };
46
47 struct isel_context {
48 const struct radv_nir_compiler_options *options;
49 struct radv_shader_args *args;
50 Program *program;
51 nir_shader *shader;
52 uint32_t constant_data_offset;
53 Block *block;
54 bool *divergent_vals;
55 std::unique_ptr<Temp[]> allocated;
56 std::unordered_map<unsigned, std::array<Temp,4>> allocated_vec;
57 Stage stage; /* Stage */
58 bool has_gfx10_wave64_bpermute = false;
59 struct {
60 bool has_branch;
61 uint16_t loop_nest_depth = 0;
62 struct {
63 unsigned header_idx;
64 Block* exit;
65 bool has_divergent_continue = false;
66 bool has_divergent_branch = false;
67 } parent_loop;
68 struct {
69 bool is_divergent = false;
70 } parent_if;
71 bool exec_potentially_empty = false;
72 std::unique_ptr<unsigned[]> nir_to_aco; /* NIR block index to ACO block index */
73 } cf_info;
74
75 Temp arg_temps[AC_MAX_ARGS];
76
77 /* inputs common for merged stages */
78 Temp merged_wave_info = Temp(0, s1);
79
80 /* FS inputs */
81 Temp persp_centroid, linear_centroid;
82
83 /* VS inputs */
84 bool needs_instance_id;
85
86 /* VS output information */
87 unsigned num_clip_distances;
88 unsigned num_cull_distances;
89 vs_output_state vs_output;
90 };
91
92 Temp get_arg(isel_context *ctx, struct ac_arg arg)
93 {
94 assert(arg.used);
95 return ctx->arg_temps[arg.arg_index];
96 }
97
98 unsigned get_interp_input(nir_intrinsic_op intrin, enum glsl_interp_mode interp)
99 {
100 switch (interp) {
101 case INTERP_MODE_SMOOTH:
102 case INTERP_MODE_NONE:
103 if (intrin == nir_intrinsic_load_barycentric_pixel ||
104 intrin == nir_intrinsic_load_barycentric_at_sample ||
105 intrin == nir_intrinsic_load_barycentric_at_offset)
106 return S_0286CC_PERSP_CENTER_ENA(1);
107 else if (intrin == nir_intrinsic_load_barycentric_centroid)
108 return S_0286CC_PERSP_CENTROID_ENA(1);
109 else if (intrin == nir_intrinsic_load_barycentric_sample)
110 return S_0286CC_PERSP_SAMPLE_ENA(1);
111 break;
112 case INTERP_MODE_NOPERSPECTIVE:
113 if (intrin == nir_intrinsic_load_barycentric_pixel)
114 return S_0286CC_LINEAR_CENTER_ENA(1);
115 else if (intrin == nir_intrinsic_load_barycentric_centroid)
116 return S_0286CC_LINEAR_CENTROID_ENA(1);
117 else if (intrin == nir_intrinsic_load_barycentric_sample)
118 return S_0286CC_LINEAR_SAMPLE_ENA(1);
119 break;
120 default:
121 break;
122 }
123 return 0;
124 }
125
126 void init_context(isel_context *ctx, nir_shader *shader)
127 {
128 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
129 unsigned lane_mask_size = ctx->program->lane_mask.size();
130
131 ctx->shader = shader;
132 ctx->divergent_vals = nir_divergence_analysis(shader, nir_divergence_view_index_uniform);
133
134 std::unique_ptr<Temp[]> allocated{new Temp[impl->ssa_alloc]()};
135
136 unsigned spi_ps_inputs = 0;
137
138 std::unique_ptr<unsigned[]> nir_to_aco{new unsigned[impl->num_blocks]()};
139
140 bool done = false;
141 while (!done) {
142 done = true;
143 nir_foreach_block(block, impl) {
144 nir_foreach_instr(instr, block) {
145 switch(instr->type) {
146 case nir_instr_type_alu: {
147 nir_alu_instr *alu_instr = nir_instr_as_alu(instr);
148 unsigned size = alu_instr->dest.dest.ssa.num_components;
149 if (alu_instr->dest.dest.ssa.bit_size == 64)
150 size *= 2;
151 RegType type = RegType::sgpr;
152 switch(alu_instr->op) {
153 case nir_op_fmul:
154 case nir_op_fadd:
155 case nir_op_fsub:
156 case nir_op_fmax:
157 case nir_op_fmin:
158 case nir_op_fmax3:
159 case nir_op_fmin3:
160 case nir_op_fmed3:
161 case nir_op_fneg:
162 case nir_op_fabs:
163 case nir_op_fsat:
164 case nir_op_fsign:
165 case nir_op_frcp:
166 case nir_op_frsq:
167 case nir_op_fsqrt:
168 case nir_op_fexp2:
169 case nir_op_flog2:
170 case nir_op_ffract:
171 case nir_op_ffloor:
172 case nir_op_fceil:
173 case nir_op_ftrunc:
174 case nir_op_fround_even:
175 case nir_op_fsin:
176 case nir_op_fcos:
177 case nir_op_f2f32:
178 case nir_op_f2f64:
179 case nir_op_u2f32:
180 case nir_op_u2f64:
181 case nir_op_i2f32:
182 case nir_op_i2f64:
183 case nir_op_pack_half_2x16:
184 case nir_op_unpack_half_2x16_split_x:
185 case nir_op_unpack_half_2x16_split_y:
186 case nir_op_fddx:
187 case nir_op_fddy:
188 case nir_op_fddx_fine:
189 case nir_op_fddy_fine:
190 case nir_op_fddx_coarse:
191 case nir_op_fddy_coarse:
192 case nir_op_fquantize2f16:
193 case nir_op_ldexp:
194 case nir_op_frexp_sig:
195 case nir_op_frexp_exp:
196 case nir_op_cube_face_index:
197 case nir_op_cube_face_coord:
198 type = RegType::vgpr;
199 break;
200 case nir_op_flt:
201 case nir_op_fge:
202 case nir_op_feq:
203 case nir_op_fne:
204 case nir_op_ilt:
205 case nir_op_ige:
206 case nir_op_ult:
207 case nir_op_uge:
208 case nir_op_ieq:
209 case nir_op_ine:
210 case nir_op_i2b1:
211 size = lane_mask_size;
212 break;
213 case nir_op_f2i64:
214 case nir_op_f2u64:
215 case nir_op_b2i32:
216 case nir_op_b2f32:
217 case nir_op_f2i32:
218 case nir_op_f2u32:
219 type = ctx->divergent_vals[alu_instr->dest.dest.ssa.index] ? RegType::vgpr : RegType::sgpr;
220 break;
221 case nir_op_bcsel:
222 if (alu_instr->dest.dest.ssa.bit_size == 1) {
223 size = lane_mask_size;
224 } else {
225 if (ctx->divergent_vals[alu_instr->dest.dest.ssa.index]) {
226 type = RegType::vgpr;
227 } else {
228 if (allocated[alu_instr->src[1].src.ssa->index].type() == RegType::vgpr ||
229 allocated[alu_instr->src[2].src.ssa->index].type() == RegType::vgpr) {
230 type = RegType::vgpr;
231 }
232 }
233 if (alu_instr->src[1].src.ssa->num_components == 1 && alu_instr->src[2].src.ssa->num_components == 1) {
234 assert(allocated[alu_instr->src[1].src.ssa->index].size() == allocated[alu_instr->src[2].src.ssa->index].size());
235 size = allocated[alu_instr->src[1].src.ssa->index].size();
236 }
237 }
238 break;
239 case nir_op_mov:
240 if (alu_instr->dest.dest.ssa.bit_size == 1) {
241 size = lane_mask_size;
242 } else {
243 type = ctx->divergent_vals[alu_instr->dest.dest.ssa.index] ? RegType::vgpr : RegType::sgpr;
244 }
245 break;
246 default:
247 if (alu_instr->dest.dest.ssa.bit_size == 1) {
248 size = lane_mask_size;
249 } else {
250 for (unsigned i = 0; i < nir_op_infos[alu_instr->op].num_inputs; i++) {
251 if (allocated[alu_instr->src[i].src.ssa->index].type() == RegType::vgpr)
252 type = RegType::vgpr;
253 }
254 }
255 break;
256 }
257 allocated[alu_instr->dest.dest.ssa.index] = Temp(0, RegClass(type, size));
258 break;
259 }
260 case nir_instr_type_load_const: {
261 unsigned size = nir_instr_as_load_const(instr)->def.num_components;
262 if (nir_instr_as_load_const(instr)->def.bit_size == 64)
263 size *= 2;
264 else if (nir_instr_as_load_const(instr)->def.bit_size == 1)
265 size *= lane_mask_size;
266 allocated[nir_instr_as_load_const(instr)->def.index] = Temp(0, RegClass(RegType::sgpr, size));
267 break;
268 }
269 case nir_instr_type_intrinsic: {
270 nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
271 if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest)
272 break;
273 unsigned size = intrinsic->dest.ssa.num_components;
274 if (intrinsic->dest.ssa.bit_size == 64)
275 size *= 2;
276 RegType type = RegType::sgpr;
277 switch(intrinsic->intrinsic) {
278 case nir_intrinsic_load_push_constant:
279 case nir_intrinsic_load_work_group_id:
280 case nir_intrinsic_load_num_work_groups:
281 case nir_intrinsic_load_subgroup_id:
282 case nir_intrinsic_load_num_subgroups:
283 case nir_intrinsic_load_first_vertex:
284 case nir_intrinsic_load_base_instance:
285 case nir_intrinsic_get_buffer_size:
286 case nir_intrinsic_vote_all:
287 case nir_intrinsic_vote_any:
288 case nir_intrinsic_read_first_invocation:
289 case nir_intrinsic_read_invocation:
290 case nir_intrinsic_first_invocation:
291 type = RegType::sgpr;
292 if (intrinsic->dest.ssa.bit_size == 1)
293 size = lane_mask_size;
294 break;
295 case nir_intrinsic_ballot:
296 type = RegType::sgpr;
297 size = lane_mask_size;
298 break;
299 case nir_intrinsic_load_sample_id:
300 case nir_intrinsic_load_sample_mask_in:
301 case nir_intrinsic_load_input:
302 case nir_intrinsic_load_vertex_id:
303 case nir_intrinsic_load_vertex_id_zero_base:
304 case nir_intrinsic_load_barycentric_sample:
305 case nir_intrinsic_load_barycentric_pixel:
306 case nir_intrinsic_load_barycentric_centroid:
307 case nir_intrinsic_load_barycentric_at_sample:
308 case nir_intrinsic_load_barycentric_at_offset:
309 case nir_intrinsic_load_interpolated_input:
310 case nir_intrinsic_load_frag_coord:
311 case nir_intrinsic_load_sample_pos:
312 case nir_intrinsic_load_layer_id:
313 case nir_intrinsic_load_local_invocation_id:
314 case nir_intrinsic_load_local_invocation_index:
315 case nir_intrinsic_load_subgroup_invocation:
316 case nir_intrinsic_write_invocation_amd:
317 case nir_intrinsic_mbcnt_amd:
318 case nir_intrinsic_load_instance_id:
319 case nir_intrinsic_ssbo_atomic_add:
320 case nir_intrinsic_ssbo_atomic_imin:
321 case nir_intrinsic_ssbo_atomic_umin:
322 case nir_intrinsic_ssbo_atomic_imax:
323 case nir_intrinsic_ssbo_atomic_umax:
324 case nir_intrinsic_ssbo_atomic_and:
325 case nir_intrinsic_ssbo_atomic_or:
326 case nir_intrinsic_ssbo_atomic_xor:
327 case nir_intrinsic_ssbo_atomic_exchange:
328 case nir_intrinsic_ssbo_atomic_comp_swap:
329 case nir_intrinsic_global_atomic_add:
330 case nir_intrinsic_global_atomic_imin:
331 case nir_intrinsic_global_atomic_umin:
332 case nir_intrinsic_global_atomic_imax:
333 case nir_intrinsic_global_atomic_umax:
334 case nir_intrinsic_global_atomic_and:
335 case nir_intrinsic_global_atomic_or:
336 case nir_intrinsic_global_atomic_xor:
337 case nir_intrinsic_global_atomic_exchange:
338 case nir_intrinsic_global_atomic_comp_swap:
339 case nir_intrinsic_image_deref_atomic_add:
340 case nir_intrinsic_image_deref_atomic_umin:
341 case nir_intrinsic_image_deref_atomic_imin:
342 case nir_intrinsic_image_deref_atomic_umax:
343 case nir_intrinsic_image_deref_atomic_imax:
344 case nir_intrinsic_image_deref_atomic_and:
345 case nir_intrinsic_image_deref_atomic_or:
346 case nir_intrinsic_image_deref_atomic_xor:
347 case nir_intrinsic_image_deref_atomic_exchange:
348 case nir_intrinsic_image_deref_atomic_comp_swap:
349 case nir_intrinsic_image_deref_size:
350 case nir_intrinsic_shared_atomic_add:
351 case nir_intrinsic_shared_atomic_imin:
352 case nir_intrinsic_shared_atomic_umin:
353 case nir_intrinsic_shared_atomic_imax:
354 case nir_intrinsic_shared_atomic_umax:
355 case nir_intrinsic_shared_atomic_and:
356 case nir_intrinsic_shared_atomic_or:
357 case nir_intrinsic_shared_atomic_xor:
358 case nir_intrinsic_shared_atomic_exchange:
359 case nir_intrinsic_shared_atomic_comp_swap:
360 case nir_intrinsic_load_scratch:
361 type = RegType::vgpr;
362 break;
363 case nir_intrinsic_shuffle:
364 case nir_intrinsic_quad_broadcast:
365 case nir_intrinsic_quad_swap_horizontal:
366 case nir_intrinsic_quad_swap_vertical:
367 case nir_intrinsic_quad_swap_diagonal:
368 case nir_intrinsic_quad_swizzle_amd:
369 case nir_intrinsic_masked_swizzle_amd:
370 case nir_intrinsic_inclusive_scan:
371 case nir_intrinsic_exclusive_scan:
372 if (intrinsic->dest.ssa.bit_size == 1) {
373 size = lane_mask_size;
374 type = RegType::sgpr;
375 } else if (!ctx->divergent_vals[intrinsic->dest.ssa.index]) {
376 type = RegType::sgpr;
377 } else {
378 type = RegType::vgpr;
379 }
380 break;
381 case nir_intrinsic_load_view_index:
382 type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
383 break;
384 case nir_intrinsic_load_front_face:
385 case nir_intrinsic_load_helper_invocation:
386 case nir_intrinsic_is_helper_invocation:
387 type = RegType::sgpr;
388 size = lane_mask_size;
389 break;
390 case nir_intrinsic_reduce:
391 if (intrinsic->dest.ssa.bit_size == 1) {
392 size = lane_mask_size;
393 type = RegType::sgpr;
394 } else if (nir_intrinsic_cluster_size(intrinsic) == 0 ||
395 !ctx->divergent_vals[intrinsic->dest.ssa.index]) {
396 type = RegType::sgpr;
397 } else {
398 type = RegType::vgpr;
399 }
400 break;
401 case nir_intrinsic_load_ubo:
402 case nir_intrinsic_load_ssbo:
403 case nir_intrinsic_load_global:
404 case nir_intrinsic_vulkan_resource_index:
405 type = ctx->divergent_vals[intrinsic->dest.ssa.index] ? RegType::vgpr : RegType::sgpr;
406 break;
407 /* due to copy propagation, the swizzled imov is removed if num dest components == 1 */
408 case nir_intrinsic_load_shared:
409 if (ctx->divergent_vals[intrinsic->dest.ssa.index])
410 type = RegType::vgpr;
411 else
412 type = RegType::sgpr;
413 break;
414 default:
415 for (unsigned i = 0; i < nir_intrinsic_infos[intrinsic->intrinsic].num_srcs; i++) {
416 if (allocated[intrinsic->src[i].ssa->index].type() == RegType::vgpr)
417 type = RegType::vgpr;
418 }
419 break;
420 }
421 allocated[intrinsic->dest.ssa.index] = Temp(0, RegClass(type, size));
422
423 switch(intrinsic->intrinsic) {
424 case nir_intrinsic_load_barycentric_sample:
425 case nir_intrinsic_load_barycentric_pixel:
426 case nir_intrinsic_load_barycentric_centroid:
427 case nir_intrinsic_load_barycentric_at_sample:
428 case nir_intrinsic_load_barycentric_at_offset: {
429 glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(intrinsic);
430 spi_ps_inputs |= get_interp_input(intrinsic->intrinsic, mode);
431 break;
432 }
433 case nir_intrinsic_load_front_face:
434 spi_ps_inputs |= S_0286CC_FRONT_FACE_ENA(1);
435 break;
436 case nir_intrinsic_load_frag_coord:
437 case nir_intrinsic_load_sample_pos: {
438 uint8_t mask = nir_ssa_def_components_read(&intrinsic->dest.ssa);
439 for (unsigned i = 0; i < 4; i++) {
440 if (mask & (1 << i))
441 spi_ps_inputs |= S_0286CC_POS_X_FLOAT_ENA(1) << i;
442
443 }
444 break;
445 }
446 case nir_intrinsic_load_sample_id:
447 spi_ps_inputs |= S_0286CC_ANCILLARY_ENA(1);
448 break;
449 case nir_intrinsic_load_sample_mask_in:
450 spi_ps_inputs |= S_0286CC_ANCILLARY_ENA(1);
451 spi_ps_inputs |= S_0286CC_SAMPLE_COVERAGE_ENA(1);
452 break;
453 default:
454 break;
455 }
456 break;
457 }
458 case nir_instr_type_tex: {
459 nir_tex_instr* tex = nir_instr_as_tex(instr);
460 unsigned size = tex->dest.ssa.num_components;
461
462 if (tex->dest.ssa.bit_size == 64)
463 size *= 2;
464 if (tex->op == nir_texop_texture_samples)
465 assert(!ctx->divergent_vals[tex->dest.ssa.index]);
466 if (ctx->divergent_vals[tex->dest.ssa.index])
467 allocated[tex->dest.ssa.index] = Temp(0, RegClass(RegType::vgpr, size));
468 else
469 allocated[tex->dest.ssa.index] = Temp(0, RegClass(RegType::sgpr, size));
470 break;
471 }
472 case nir_instr_type_parallel_copy: {
473 nir_foreach_parallel_copy_entry(entry, nir_instr_as_parallel_copy(instr)) {
474 allocated[entry->dest.ssa.index] = allocated[entry->src.ssa->index];
475 }
476 break;
477 }
478 case nir_instr_type_ssa_undef: {
479 unsigned size = nir_instr_as_ssa_undef(instr)->def.num_components;
480 if (nir_instr_as_ssa_undef(instr)->def.bit_size == 64)
481 size *= 2;
482 allocated[nir_instr_as_ssa_undef(instr)->def.index] = Temp(0, RegClass(RegType::sgpr, size));
483 break;
484 }
485 case nir_instr_type_phi: {
486 nir_phi_instr* phi = nir_instr_as_phi(instr);
487 RegType type;
488 unsigned size = phi->dest.ssa.num_components;
489
490 if (phi->dest.ssa.bit_size == 1) {
491 assert(size == 1 && "multiple components not yet supported on boolean phis.");
492 type = RegType::sgpr;
493 size *= lane_mask_size;
494 allocated[phi->dest.ssa.index] = Temp(0, RegClass(type, size));
495 break;
496 }
497
498 if (ctx->divergent_vals[phi->dest.ssa.index]) {
499 type = RegType::vgpr;
500 } else {
501 type = RegType::sgpr;
502 nir_foreach_phi_src (src, phi) {
503 if (allocated[src->src.ssa->index].type() == RegType::vgpr)
504 type = RegType::vgpr;
505 if (allocated[src->src.ssa->index].type() == RegType::none)
506 done = false;
507 }
508 }
509
510 size *= phi->dest.ssa.bit_size == 64 ? 2 : 1;
511 RegClass rc = RegClass(type, size);
512 if (rc != allocated[phi->dest.ssa.index].regClass()) {
513 done = false;
514 } else {
515 nir_foreach_phi_src(src, phi)
516 assert(allocated[src->src.ssa->index].size() == rc.size());
517 }
518 allocated[phi->dest.ssa.index] = Temp(0, rc);
519 break;
520 }
521 default:
522 break;
523 }
524 }
525 }
526 }
527
528 if (G_0286CC_POS_W_FLOAT_ENA(spi_ps_inputs)) {
529 /* If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be enabled too */
530 spi_ps_inputs |= S_0286CC_PERSP_CENTER_ENA(1);
531 }
532
533 if (!(spi_ps_inputs & 0x7F)) {
534 /* At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled */
535 spi_ps_inputs |= S_0286CC_PERSP_CENTER_ENA(1);
536 }
537
538 ctx->program->config->spi_ps_input_ena = spi_ps_inputs;
539 ctx->program->config->spi_ps_input_addr = spi_ps_inputs;
540
541 for (unsigned i = 0; i < impl->ssa_alloc; i++)
542 allocated[i] = Temp(ctx->program->allocateId(), allocated[i].regClass());
543
544 ctx->allocated.reset(allocated.release());
545 ctx->cf_info.nir_to_aco.reset(nir_to_aco.release());
546 }
547
548 Pseudo_instruction *add_startpgm(struct isel_context *ctx)
549 {
550 unsigned arg_count = ctx->args->ac.arg_count;
551 if (ctx->stage == fragment_fs) {
552 /* LLVM optimizes away unused FS inputs and computes spi_ps_input_addr
553 * itself and then communicates the results back via the ELF binary.
554 * Mirror what LLVM does by re-mapping the VGPR arguments here.
555 *
556 * TODO: If we made the FS input scanning code into a separate pass that
557 * could run before argument setup, then this wouldn't be necessary
558 * anymore.
559 */
560 struct ac_shader_args *args = &ctx->args->ac;
561 arg_count = 0;
562 for (unsigned i = 0, vgpr_arg = 0, vgpr_reg = 0; i < args->arg_count; i++) {
563 if (args->args[i].file != AC_ARG_VGPR) {
564 arg_count++;
565 continue;
566 }
567
568 if (!(ctx->program->config->spi_ps_input_addr & (1 << vgpr_arg))) {
569 args->args[i].skip = true;
570 } else {
571 args->args[i].offset = vgpr_reg;
572 vgpr_reg += args->args[i].size;
573 arg_count++;
574 }
575 vgpr_arg++;
576 }
577 }
578
579 aco_ptr<Pseudo_instruction> startpgm{create_instruction<Pseudo_instruction>(aco_opcode::p_startpgm, Format::PSEUDO, 0, arg_count + 1)};
580 for (unsigned i = 0, arg = 0; i < ctx->args->ac.arg_count; i++) {
581 if (ctx->args->ac.args[i].skip)
582 continue;
583
584 enum ac_arg_regfile file = ctx->args->ac.args[i].file;
585 unsigned size = ctx->args->ac.args[i].size;
586 unsigned reg = ctx->args->ac.args[i].offset;
587 RegClass type = RegClass(file == AC_ARG_SGPR ? RegType::sgpr : RegType::vgpr, size);
588 Temp dst = Temp{ctx->program->allocateId(), type};
589 ctx->arg_temps[i] = dst;
590 startpgm->definitions[arg] = Definition(dst);
591 startpgm->definitions[arg].setFixed(PhysReg{file == AC_ARG_SGPR ? reg : reg + 256});
592 arg++;
593 }
594 startpgm->definitions[arg_count] = Definition{ctx->program->allocateId(), exec, ctx->program->lane_mask};
595 Pseudo_instruction *instr = startpgm.get();
596 ctx->block->instructions.push_back(std::move(startpgm));
597
598 /* Stash these in the program so that they can be accessed later when
599 * handling spilling.
600 */
601 ctx->program->private_segment_buffer = get_arg(ctx, ctx->args->ring_offsets);
602 ctx->program->scratch_offset = get_arg(ctx, ctx->args->scratch_offset);
603
604 return instr;
605 }
606
607 int
608 type_size(const struct glsl_type *type, bool bindless)
609 {
610 // TODO: don't we need type->std430_base_alignment() here?
611 return glsl_count_attribute_slots(type, false);
612 }
613
614 void
615 shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
616 {
617 assert(glsl_type_is_vector_or_scalar(type));
618
619 uint32_t comp_size = glsl_type_is_boolean(type)
620 ? 4 : glsl_get_bit_size(type) / 8;
621 unsigned length = glsl_get_vector_elements(type);
622 *size = comp_size * length,
623 *align = comp_size;
624 }
625
626 static bool
627 mem_vectorize_callback(unsigned align, unsigned bit_size,
628 unsigned num_components, unsigned high_offset,
629 nir_intrinsic_instr *low, nir_intrinsic_instr *high)
630 {
631 if ((bit_size != 32 && bit_size != 64) || num_components > 4)
632 return false;
633
634 /* >128 bit loads are split except with SMEM */
635 if (bit_size * num_components > 128)
636 return false;
637
638 switch (low->intrinsic) {
639 case nir_intrinsic_load_ubo:
640 case nir_intrinsic_load_ssbo:
641 case nir_intrinsic_store_ssbo:
642 case nir_intrinsic_load_push_constant:
643 return align % 4 == 0;
644 case nir_intrinsic_load_deref:
645 case nir_intrinsic_store_deref:
646 assert(nir_src_as_deref(low->src[0])->mode == nir_var_mem_shared);
647 /* fallthrough */
648 case nir_intrinsic_load_shared:
649 case nir_intrinsic_store_shared:
650 if (bit_size * num_components > 64) /* 96 and 128 bit loads require 128 bit alignment and are split otherwise */
651 return align % 16 == 0;
652 else
653 return align % 4 == 0;
654 default:
655 return false;
656 }
657 return false;
658 }
659
660 void
661 setup_vs_variables(isel_context *ctx, nir_shader *nir)
662 {
663 nir_foreach_variable(variable, &nir->inputs)
664 {
665 variable->data.driver_location = variable->data.location * 4;
666 }
667 nir_foreach_variable(variable, &nir->outputs)
668 {
669 variable->data.driver_location = variable->data.location * 4;
670 }
671
672 radv_vs_output_info *outinfo = &ctx->program->info->vs.outinfo;
673
674 memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
675 sizeof(outinfo->vs_output_param_offset));
676
677 ctx->needs_instance_id = ctx->program->info->vs.needs_instance_id;
678
679 bool export_clip_dists = ctx->options->key.vs_common_out.export_clip_dists;
680
681 outinfo->param_exports = 0;
682 int pos_written = 0x1;
683 if (outinfo->writes_pointsize || outinfo->writes_viewport_index || outinfo->writes_layer)
684 pos_written |= 1 << 1;
685
686 nir_foreach_variable(variable, &nir->outputs)
687 {
688 int idx = variable->data.location;
689 unsigned slots = variable->type->count_attribute_slots(false);
690 if (variable->data.compact) {
691 unsigned component_count = variable->data.location_frac + variable->type->length;
692 slots = (component_count + 3) / 4;
693 }
694
695 if (idx >= VARYING_SLOT_VAR0 || idx == VARYING_SLOT_LAYER || idx == VARYING_SLOT_PRIMITIVE_ID ||
696 ((idx == VARYING_SLOT_CLIP_DIST0 || idx == VARYING_SLOT_CLIP_DIST1) && export_clip_dists)) {
697 for (unsigned i = 0; i < slots; i++) {
698 if (outinfo->vs_output_param_offset[idx + i] == AC_EXP_PARAM_UNDEFINED)
699 outinfo->vs_output_param_offset[idx + i] = outinfo->param_exports++;
700 }
701 }
702 }
703 if (outinfo->writes_layer &&
704 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] == AC_EXP_PARAM_UNDEFINED) {
705 /* when ctx->options->key.has_multiview_view_index = true, the layer
706 * variable isn't declared in NIR and it's isel's job to get the layer */
707 outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] = outinfo->param_exports++;
708 }
709
710 if (outinfo->export_prim_id) {
711 assert(outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] == AC_EXP_PARAM_UNDEFINED);
712 outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = outinfo->param_exports++;
713 }
714
715 ctx->num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
716 ctx->num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
717
718 assert(ctx->num_clip_distances + ctx->num_cull_distances <= 8);
719
720 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
721 pos_written |= 1 << 2;
722 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
723 pos_written |= 1 << 3;
724
725 outinfo->pos_exports = util_bitcount(pos_written);
726 }
727
728 void
729 setup_variables(isel_context *ctx, nir_shader *nir)
730 {
731 switch (nir->info.stage) {
732 case MESA_SHADER_FRAGMENT: {
733 nir_foreach_variable(variable, &nir->outputs)
734 {
735 int idx = variable->data.location + variable->data.index;
736 variable->data.driver_location = idx * 4;
737 }
738 break;
739 }
740 case MESA_SHADER_COMPUTE: {
741 ctx->program->config->lds_size = (nir->info.cs.shared_size + ctx->program->lds_alloc_granule - 1) /
742 ctx->program->lds_alloc_granule;
743 break;
744 }
745 case MESA_SHADER_VERTEX: {
746 setup_vs_variables(ctx, nir);
747 break;
748 }
749 default:
750 unreachable("Unhandled shader stage.");
751 }
752 }
753
754 isel_context
755 setup_isel_context(Program* program,
756 unsigned shader_count,
757 struct nir_shader *const *shaders,
758 ac_shader_config* config,
759 struct radv_shader_args *args)
760 {
761 program->stage = 0;
762 for (unsigned i = 0; i < shader_count; i++) {
763 switch (shaders[i]->info.stage) {
764 case MESA_SHADER_VERTEX:
765 program->stage |= sw_vs;
766 break;
767 case MESA_SHADER_TESS_CTRL:
768 program->stage |= sw_tcs;
769 break;
770 case MESA_SHADER_TESS_EVAL:
771 program->stage |= sw_tes;
772 break;
773 case MESA_SHADER_GEOMETRY:
774 program->stage |= sw_gs;
775 break;
776 case MESA_SHADER_FRAGMENT:
777 program->stage |= sw_fs;
778 break;
779 case MESA_SHADER_COMPUTE:
780 program->stage |= sw_cs;
781 break;
782 default:
783 unreachable("Shader stage not implemented");
784 }
785 }
786 if (program->stage == sw_vs)
787 program->stage |= hw_vs;
788 else if (program->stage == sw_fs)
789 program->stage |= hw_fs;
790 else if (program->stage == sw_cs)
791 program->stage |= hw_cs;
792 else
793 unreachable("Shader stage not implemented");
794
795 program->config = config;
796 program->info = args->shader_info;
797 program->chip_class = args->options->chip_class;
798 program->family = args->options->family;
799 program->wave_size = args->shader_info->wave_size;
800 program->lane_mask = program->wave_size == 32 ? s1 : s2;
801
802 program->lds_alloc_granule = args->options->chip_class >= GFX7 ? 512 : 256;
803 program->lds_limit = args->options->chip_class >= GFX7 ? 65536 : 32768;
804 program->vgpr_limit = 256;
805
806 if (args->options->chip_class >= GFX10) {
807 program->physical_sgprs = 2560; /* doesn't matter as long as it's at least 128 * 20 */
808 program->sgpr_alloc_granule = 127;
809 program->sgpr_limit = 106;
810 } else if (program->chip_class >= GFX8) {
811 program->physical_sgprs = 800;
812 program->sgpr_alloc_granule = 15;
813 if (args->options->family == CHIP_TONGA || args->options->family == CHIP_ICELAND)
814 program->sgpr_limit = 94; /* workaround hardware bug */
815 else
816 program->sgpr_limit = 102;
817 } else {
818 program->physical_sgprs = 512;
819 program->sgpr_alloc_granule = 7;
820 program->sgpr_limit = 104;
821 }
822 /* TODO: we don't have to allocate VCC if we don't need it */
823 program->needs_vcc = true;
824
825 isel_context ctx = {};
826 ctx.program = program;
827 ctx.args = args;
828 ctx.options = args->options;
829 ctx.stage = program->stage;
830
831 for (unsigned i = 0; i < shader_count; i++) {
832 nir_shader *nir = shaders[i];
833
834 /* align and copy constant data */
835 while (program->constant_data.size() % 4u)
836 program->constant_data.push_back(0);
837 ctx.constant_data_offset = program->constant_data.size();
838 program->constant_data.insert(program->constant_data.end(),
839 (uint8_t*)nir->constant_data,
840 (uint8_t*)nir->constant_data + nir->constant_data_size);
841
842 /* the variable setup has to be done before lower_io / CSE */
843 setup_variables(&ctx, nir);
844
845 /* optimize and lower memory operations */
846 bool lower_to_scalar = false;
847 bool lower_pack = false;
848 if (nir_opt_load_store_vectorize(nir,
849 (nir_variable_mode)(nir_var_mem_ssbo | nir_var_mem_ubo |
850 nir_var_mem_push_const | nir_var_mem_shared),
851 mem_vectorize_callback)) {
852 lower_to_scalar = true;
853 lower_pack = true;
854 }
855 if (nir->info.stage != MESA_SHADER_COMPUTE)
856 nir_lower_io(nir, (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out), type_size, (nir_lower_io_options)0);
857 nir_lower_explicit_io(nir, nir_var_mem_global, nir_address_format_64bit_global);
858
859 if (lower_to_scalar)
860 nir_lower_alu_to_scalar(nir, NULL, NULL);
861 if (lower_pack)
862 nir_lower_pack(nir);
863
864 /* lower ALU operations */
865 // TODO: implement logic64 in aco, it's more effective for sgprs
866 nir_lower_int64(nir, nir->options->lower_int64_options);
867
868 nir_opt_idiv_const(nir, 32);
869 nir_lower_idiv(nir, nir_lower_idiv_precise);
870
871 /* optimize the lowered ALU operations */
872 bool more_algebraic = true;
873 while (more_algebraic) {
874 more_algebraic = false;
875 NIR_PASS_V(nir, nir_copy_prop);
876 NIR_PASS_V(nir, nir_opt_dce);
877 NIR_PASS_V(nir, nir_opt_constant_folding);
878 NIR_PASS(more_algebraic, nir, nir_opt_algebraic);
879 }
880
881 /* Do late algebraic optimization to turn add(a, neg(b)) back into
882 * subs, then the mandatory cleanup after algebraic. Note that it may
883 * produce fnegs, and if so then we need to keep running to squash
884 * fneg(fneg(a)).
885 */
886 bool more_late_algebraic = true;
887 while (more_late_algebraic) {
888 more_late_algebraic = false;
889 NIR_PASS(more_late_algebraic, nir, nir_opt_algebraic_late);
890 NIR_PASS_V(nir, nir_opt_constant_folding);
891 NIR_PASS_V(nir, nir_copy_prop);
892 NIR_PASS_V(nir, nir_opt_dce);
893 NIR_PASS_V(nir, nir_opt_cse);
894 }
895
896 /* cleanup passes */
897 nir_lower_load_const_to_scalar(nir);
898 nir_opt_shrink_load(nir);
899 nir_move_options move_opts = (nir_move_options)(
900 nir_move_const_undef | nir_move_load_ubo | nir_move_load_input | nir_move_comparisons);
901 nir_opt_sink(nir, move_opts);
902 nir_opt_move(nir, move_opts);
903 nir_convert_to_lcssa(nir, true, false);
904 nir_lower_phis_to_scalar(nir);
905
906 nir_function_impl *func = nir_shader_get_entrypoint(nir);
907 nir_index_ssa_defs(func);
908 nir_metadata_require(func, nir_metadata_block_index);
909
910 if (args->options->dump_preoptir) {
911 fprintf(stderr, "NIR shader before instruction selection:\n");
912 nir_print_shader(nir, stderr);
913 }
914 }
915
916 unsigned scratch_size = 0;
917 for (unsigned i = 0; i < shader_count; i++)
918 scratch_size = std::max(scratch_size, shaders[i]->scratch_size);
919 ctx.program->config->scratch_bytes_per_wave = align(scratch_size * ctx.program->wave_size, 1024);
920
921 ctx.block = ctx.program->create_and_insert_block();
922 ctx.block->loop_nest_depth = 0;
923 ctx.block->kind = block_kind_top_level;
924
925 return ctx;
926 }
927
928 }