radv/aco: implement logic64 instead of lowering
[mesa.git] / src / amd / compiler / aco_instruction_selection.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include <algorithm>
27 #include <array>
28 #include <stack>
29 #include <map>
30
31 #include "ac_shader_util.h"
32 #include "aco_ir.h"
33 #include "aco_builder.h"
34 #include "aco_interface.h"
35 #include "aco_instruction_selection_setup.cpp"
36 #include "util/fast_idiv_by_const.h"
37
38 namespace aco {
39 namespace {
40
41 class loop_info_RAII {
42 isel_context* ctx;
43 unsigned header_idx_old;
44 Block* exit_old;
45 bool divergent_cont_old;
46 bool divergent_branch_old;
47 bool divergent_if_old;
48
49 public:
50 loop_info_RAII(isel_context* ctx, unsigned loop_header_idx, Block* loop_exit)
51 : ctx(ctx),
52 header_idx_old(ctx->cf_info.parent_loop.header_idx), exit_old(ctx->cf_info.parent_loop.exit),
53 divergent_cont_old(ctx->cf_info.parent_loop.has_divergent_continue),
54 divergent_branch_old(ctx->cf_info.parent_loop.has_divergent_branch),
55 divergent_if_old(ctx->cf_info.parent_if.is_divergent)
56 {
57 ctx->cf_info.parent_loop.header_idx = loop_header_idx;
58 ctx->cf_info.parent_loop.exit = loop_exit;
59 ctx->cf_info.parent_loop.has_divergent_continue = false;
60 ctx->cf_info.parent_loop.has_divergent_branch = false;
61 ctx->cf_info.parent_if.is_divergent = false;
62 ctx->cf_info.loop_nest_depth = ctx->cf_info.loop_nest_depth + 1;
63 }
64
65 ~loop_info_RAII()
66 {
67 ctx->cf_info.parent_loop.header_idx = header_idx_old;
68 ctx->cf_info.parent_loop.exit = exit_old;
69 ctx->cf_info.parent_loop.has_divergent_continue = divergent_cont_old;
70 ctx->cf_info.parent_loop.has_divergent_branch = divergent_branch_old;
71 ctx->cf_info.parent_if.is_divergent = divergent_if_old;
72 ctx->cf_info.loop_nest_depth = ctx->cf_info.loop_nest_depth - 1;
73 if (!ctx->cf_info.loop_nest_depth && !ctx->cf_info.parent_if.is_divergent)
74 ctx->cf_info.exec_potentially_empty_discard = false;
75 }
76 };
77
78 struct if_context {
79 Temp cond;
80
81 bool divergent_old;
82 bool exec_potentially_empty_discard_old;
83 bool exec_potentially_empty_break_old;
84 uint16_t exec_potentially_empty_break_depth_old;
85
86 unsigned BB_if_idx;
87 unsigned invert_idx;
88 bool uniform_has_then_branch;
89 bool then_branch_divergent;
90 Block BB_invert;
91 Block BB_endif;
92 };
93
94 static bool visit_cf_list(struct isel_context *ctx,
95 struct exec_list *list);
96
97 static void add_logical_edge(unsigned pred_idx, Block *succ)
98 {
99 succ->logical_preds.emplace_back(pred_idx);
100 }
101
102
103 static void add_linear_edge(unsigned pred_idx, Block *succ)
104 {
105 succ->linear_preds.emplace_back(pred_idx);
106 }
107
108 static void add_edge(unsigned pred_idx, Block *succ)
109 {
110 add_logical_edge(pred_idx, succ);
111 add_linear_edge(pred_idx, succ);
112 }
113
114 static void append_logical_start(Block *b)
115 {
116 Builder(NULL, b).pseudo(aco_opcode::p_logical_start);
117 }
118
119 static void append_logical_end(Block *b)
120 {
121 Builder(NULL, b).pseudo(aco_opcode::p_logical_end);
122 }
123
124 Temp get_ssa_temp(struct isel_context *ctx, nir_ssa_def *def)
125 {
126 assert(ctx->allocated[def->index].id());
127 return ctx->allocated[def->index];
128 }
129
130 Temp emit_mbcnt(isel_context *ctx, Definition dst,
131 Operand mask_lo = Operand((uint32_t) -1), Operand mask_hi = Operand((uint32_t) -1))
132 {
133 Builder bld(ctx->program, ctx->block);
134 Definition lo_def = ctx->program->wave_size == 32 ? dst : bld.def(v1);
135 Temp thread_id_lo = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, lo_def, mask_lo, Operand(0u));
136
137 if (ctx->program->wave_size == 32) {
138 return thread_id_lo;
139 } else {
140 Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
141 return thread_id_hi;
142 }
143 }
144
145 Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_needs_wqm = false)
146 {
147 Builder bld(ctx->program, ctx->block);
148
149 if (!dst.id())
150 dst = bld.tmp(src.regClass());
151
152 assert(src.size() == dst.size());
153
154 if (ctx->stage != fragment_fs) {
155 if (!dst.id())
156 return src;
157
158 bld.copy(Definition(dst), src);
159 return dst;
160 }
161
162 bld.pseudo(aco_opcode::p_wqm, Definition(dst), src);
163 ctx->program->needs_wqm |= program_needs_wqm;
164 return dst;
165 }
166
167 static Temp emit_bpermute(isel_context *ctx, Builder &bld, Temp index, Temp data)
168 {
169 if (index.regClass() == s1)
170 return bld.readlane(bld.def(s1), data, index);
171
172 if (ctx->options->chip_class <= GFX7) {
173 /* GFX6-7: there is no bpermute instruction */
174 Operand index_op(index);
175 Operand input_data(data);
176 index_op.setLateKill(true);
177 input_data.setLateKill(true);
178
179 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(bld.lm), bld.def(bld.lm, vcc), index_op, input_data);
180 } else if (ctx->options->chip_class >= GFX10 && ctx->program->wave_size == 64) {
181 /* GFX10 wave64 mode: emulate full-wave bpermute */
182 if (!ctx->has_gfx10_wave64_bpermute) {
183 ctx->has_gfx10_wave64_bpermute = true;
184 ctx->program->config->num_shared_vgprs = 8; /* Shared VGPRs are allocated in groups of 8 */
185 ctx->program->vgpr_limit -= 4; /* We allocate 8 shared VGPRs, so we'll have 4 fewer normal VGPRs */
186 }
187
188 Temp index_is_lo = bld.vopc(aco_opcode::v_cmp_ge_u32, bld.def(bld.lm), Operand(31u), index);
189 Builder::Result index_is_lo_split = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), index_is_lo);
190 Temp index_is_lo_n1 = bld.sop1(aco_opcode::s_not_b32, bld.def(s1), bld.def(s1, scc), index_is_lo_split.def(1).getTemp());
191 Operand same_half = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), index_is_lo_split.def(0).getTemp(), index_is_lo_n1);
192 Operand index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
193 Operand input_data(data);
194
195 index_x4.setLateKill(true);
196 input_data.setLateKill(true);
197 same_half.setLateKill(true);
198
199 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(s2), bld.def(s1, scc), index_x4, input_data, same_half);
200 } else {
201 /* GFX8-9 or GFX10 wave32: bpermute works normally */
202 Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
203 return bld.ds(aco_opcode::ds_bpermute_b32, bld.def(v1), index_x4, data);
204 }
205 }
206
207 Temp as_vgpr(isel_context *ctx, Temp val)
208 {
209 if (val.type() == RegType::sgpr) {
210 Builder bld(ctx->program, ctx->block);
211 return bld.copy(bld.def(RegType::vgpr, val.size()), val);
212 }
213 assert(val.type() == RegType::vgpr);
214 return val;
215 }
216
217 //assumes a != 0xffffffff
218 void emit_v_div_u32(isel_context *ctx, Temp dst, Temp a, uint32_t b)
219 {
220 assert(b != 0);
221 Builder bld(ctx->program, ctx->block);
222
223 if (util_is_power_of_two_or_zero(b)) {
224 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand((uint32_t)util_logbase2(b)), a);
225 return;
226 }
227
228 util_fast_udiv_info info = util_compute_fast_udiv_info(b, 32, 32);
229
230 assert(info.multiplier <= 0xffffffff);
231
232 bool pre_shift = info.pre_shift != 0;
233 bool increment = info.increment != 0;
234 bool multiply = true;
235 bool post_shift = info.post_shift != 0;
236
237 if (!pre_shift && !increment && !multiply && !post_shift) {
238 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), a);
239 return;
240 }
241
242 Temp pre_shift_dst = a;
243 if (pre_shift) {
244 pre_shift_dst = (increment || multiply || post_shift) ? bld.tmp(v1) : dst;
245 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(pre_shift_dst), Operand((uint32_t)info.pre_shift), a);
246 }
247
248 Temp increment_dst = pre_shift_dst;
249 if (increment) {
250 increment_dst = (post_shift || multiply) ? bld.tmp(v1) : dst;
251 bld.vadd32(Definition(increment_dst), Operand((uint32_t) info.increment), pre_shift_dst);
252 }
253
254 Temp multiply_dst = increment_dst;
255 if (multiply) {
256 multiply_dst = post_shift ? bld.tmp(v1) : dst;
257 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(multiply_dst), increment_dst,
258 bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand((uint32_t)info.multiplier)));
259 }
260
261 if (post_shift) {
262 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand((uint32_t)info.post_shift), multiply_dst);
263 }
264 }
265
266 void emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, Temp dst)
267 {
268 Builder bld(ctx->program, ctx->block);
269 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(idx));
270 }
271
272
273 Temp emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, RegClass dst_rc)
274 {
275 /* no need to extract the whole vector */
276 if (src.regClass() == dst_rc) {
277 assert(idx == 0);
278 return src;
279 }
280
281 assert(src.bytes() > (idx * dst_rc.bytes()));
282 Builder bld(ctx->program, ctx->block);
283 auto it = ctx->allocated_vec.find(src.id());
284 if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) {
285 if (it->second[idx].regClass() == dst_rc) {
286 return it->second[idx];
287 } else {
288 assert(!dst_rc.is_subdword());
289 assert(dst_rc.type() == RegType::vgpr && it->second[idx].type() == RegType::sgpr);
290 return bld.copy(bld.def(dst_rc), it->second[idx]);
291 }
292 }
293
294 if (dst_rc.is_subdword())
295 src = as_vgpr(ctx, src);
296
297 if (src.bytes() == dst_rc.bytes()) {
298 assert(idx == 0);
299 return bld.copy(bld.def(dst_rc), src);
300 } else {
301 Temp dst = bld.tmp(dst_rc);
302 emit_extract_vector(ctx, src, idx, dst);
303 return dst;
304 }
305 }
306
307 void emit_split_vector(isel_context* ctx, Temp vec_src, unsigned num_components)
308 {
309 if (num_components == 1)
310 return;
311 if (ctx->allocated_vec.find(vec_src.id()) != ctx->allocated_vec.end())
312 return;
313 RegClass rc;
314 if (num_components > vec_src.size()) {
315 if (vec_src.type() == RegType::sgpr) {
316 /* should still help get_alu_src() */
317 emit_split_vector(ctx, vec_src, vec_src.size());
318 return;
319 }
320 /* sub-dword split */
321 rc = RegClass(RegType::vgpr, vec_src.bytes() / num_components).as_subdword();
322 } else {
323 rc = RegClass(vec_src.type(), vec_src.size() / num_components);
324 }
325 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, num_components)};
326 split->operands[0] = Operand(vec_src);
327 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
328 for (unsigned i = 0; i < num_components; i++) {
329 elems[i] = {ctx->program->allocateId(), rc};
330 split->definitions[i] = Definition(elems[i]);
331 }
332 ctx->block->instructions.emplace_back(std::move(split));
333 ctx->allocated_vec.emplace(vec_src.id(), elems);
334 }
335
336 /* This vector expansion uses a mask to determine which elements in the new vector
337 * come from the original vector. The other elements are undefined. */
338 void expand_vector(isel_context* ctx, Temp vec_src, Temp dst, unsigned num_components, unsigned mask)
339 {
340 emit_split_vector(ctx, vec_src, util_bitcount(mask));
341
342 if (vec_src == dst)
343 return;
344
345 Builder bld(ctx->program, ctx->block);
346 if (num_components == 1) {
347 if (dst.type() == RegType::sgpr)
348 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec_src);
349 else
350 bld.copy(Definition(dst), vec_src);
351 return;
352 }
353
354 unsigned component_size = dst.size() / num_components;
355 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
356
357 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
358 vec->definitions[0] = Definition(dst);
359 unsigned k = 0;
360 for (unsigned i = 0; i < num_components; i++) {
361 if (mask & (1 << i)) {
362 Temp src = emit_extract_vector(ctx, vec_src, k++, RegClass(vec_src.type(), component_size));
363 if (dst.type() == RegType::sgpr)
364 src = bld.as_uniform(src);
365 vec->operands[i] = Operand(src);
366 } else {
367 vec->operands[i] = Operand(0u);
368 }
369 elems[i] = vec->operands[i].getTemp();
370 }
371 ctx->block->instructions.emplace_back(std::move(vec));
372 ctx->allocated_vec.emplace(dst.id(), elems);
373 }
374
375 /* adjust misaligned small bit size loads */
376 void byte_align_scalar(isel_context *ctx, Temp vec, Operand offset, Temp dst)
377 {
378 Builder bld(ctx->program, ctx->block);
379 Operand shift;
380 Temp select = Temp();
381 if (offset.isConstant()) {
382 assert(offset.constantValue() && offset.constantValue() < 4);
383 shift = Operand(offset.constantValue() * 8);
384 } else {
385 /* bit_offset = 8 * (offset & 0x3) */
386 Temp tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), offset, Operand(3u));
387 select = bld.tmp(s1);
388 shift = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.scc(Definition(select)), tmp, Operand(3u));
389 }
390
391 if (vec.size() == 1) {
392 bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), vec, shift);
393 } else if (vec.size() == 2) {
394 Temp tmp = dst.size() == 2 ? dst : bld.tmp(s2);
395 bld.sop2(aco_opcode::s_lshr_b64, Definition(tmp), bld.def(s1, scc), vec, shift);
396 if (tmp == dst)
397 emit_split_vector(ctx, dst, 2);
398 else
399 emit_extract_vector(ctx, tmp, 0, dst);
400 } else if (vec.size() == 4) {
401 Temp lo = bld.tmp(s2), hi = bld.tmp(s2);
402 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
403 hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand(0u));
404 if (select != Temp())
405 hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), select);
406 lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
407 Temp mid = bld.tmp(s1);
408 lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
409 hi = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), hi, shift);
410 mid = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), hi, mid);
411 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, mid);
412 emit_split_vector(ctx, dst, 2);
413 }
414 }
415
416 void byte_align_vector(isel_context *ctx, Temp vec, Operand offset, Temp dst, unsigned component_size)
417 {
418 Builder bld(ctx->program, ctx->block);
419 if (offset.isTemp()) {
420 Temp tmp[4] = {vec, vec, vec, vec};
421
422 if (vec.size() == 4) {
423 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1), tmp[3] = bld.tmp(v1);
424 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), Definition(tmp[3]), vec);
425 } else if (vec.size() == 3) {
426 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
427 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec);
428 } else if (vec.size() == 2) {
429 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
430 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
431 }
432 for (unsigned i = 0; i < dst.size(); i++)
433 tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset);
434
435 vec = tmp[0];
436 if (dst.size() == 2)
437 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
438
439 offset = Operand(0u);
440 }
441
442 unsigned num_components = dst.bytes() / component_size;
443 if (vec.regClass() == dst.regClass()) {
444 assert(offset.constantValue() == 0);
445 bld.copy(Definition(dst), vec);
446 emit_split_vector(ctx, dst, num_components);
447 return;
448 }
449
450 emit_split_vector(ctx, vec, vec.bytes() / component_size);
451 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
452 RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
453
454 assert(offset.constantValue() % component_size == 0);
455 unsigned skip = offset.constantValue() / component_size;
456 for (unsigned i = 0; i < num_components; i++)
457 elems[i] = emit_extract_vector(ctx, vec, i + skip, rc);
458
459 /* if dst is vgpr - split the src and create a shrunk version according to the mask. */
460 if (dst.type() == RegType::vgpr) {
461 aco_ptr<Pseudo_instruction> create_vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
462 for (unsigned i = 0; i < num_components; i++)
463 create_vec->operands[i] = Operand(elems[i]);
464 create_vec->definitions[0] = Definition(dst);
465 bld.insert(std::move(create_vec));
466
467 /* if dst is sgpr - split the src, but move the original to sgpr. */
468 } else if (skip) {
469 vec = bld.pseudo(aco_opcode::p_as_uniform, bld.def(RegClass(RegType::sgpr, vec.size())), vec);
470 byte_align_scalar(ctx, vec, offset, dst);
471 } else {
472 assert(dst.size() == vec.size());
473 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
474 }
475
476 ctx->allocated_vec.emplace(dst.id(), elems);
477 }
478
479 Temp bool_to_vector_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s2))
480 {
481 Builder bld(ctx->program, ctx->block);
482 if (!dst.id())
483 dst = bld.tmp(bld.lm);
484
485 assert(val.regClass() == s1);
486 assert(dst.regClass() == bld.lm);
487
488 return bld.sop2(Builder::s_cselect, Definition(dst), Operand((uint32_t) -1), Operand(0u), bld.scc(val));
489 }
490
491 Temp bool_to_scalar_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s1))
492 {
493 Builder bld(ctx->program, ctx->block);
494 if (!dst.id())
495 dst = bld.tmp(s1);
496
497 assert(val.regClass() == bld.lm);
498 assert(dst.regClass() == s1);
499
500 /* if we're currently in WQM mode, ensure that the source is also computed in WQM */
501 Temp tmp = bld.tmp(s1);
502 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.scc(Definition(tmp)), val, Operand(exec, bld.lm));
503 return emit_wqm(ctx, tmp, dst);
504 }
505
506 Temp get_alu_src(struct isel_context *ctx, nir_alu_src src, unsigned size=1)
507 {
508 if (src.src.ssa->num_components == 1 && src.swizzle[0] == 0 && size == 1)
509 return get_ssa_temp(ctx, src.src.ssa);
510
511 if (src.src.ssa->num_components == size) {
512 bool identity_swizzle = true;
513 for (unsigned i = 0; identity_swizzle && i < size; i++) {
514 if (src.swizzle[i] != i)
515 identity_swizzle = false;
516 }
517 if (identity_swizzle)
518 return get_ssa_temp(ctx, src.src.ssa);
519 }
520
521 Temp vec = get_ssa_temp(ctx, src.src.ssa);
522 unsigned elem_size = vec.bytes() / src.src.ssa->num_components;
523 assert(elem_size > 0);
524 assert(vec.bytes() % elem_size == 0);
525
526 if (elem_size < 4 && vec.type() == RegType::sgpr) {
527 assert(src.src.ssa->bit_size == 8 || src.src.ssa->bit_size == 16);
528 assert(size == 1);
529 unsigned swizzle = src.swizzle[0];
530 if (vec.size() > 1) {
531 assert(src.src.ssa->bit_size == 16);
532 vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
533 swizzle = swizzle & 1;
534 }
535 if (swizzle == 0)
536 return vec;
537
538 Temp dst{ctx->program->allocateId(), s1};
539 aco_ptr<SOP2_instruction> bfe{create_instruction<SOP2_instruction>(aco_opcode::s_bfe_u32, Format::SOP2, 2, 2)};
540 bfe->operands[0] = Operand(vec);
541 bfe->operands[1] = Operand(uint32_t((src.src.ssa->bit_size << 16) | (src.src.ssa->bit_size * swizzle)));
542 bfe->definitions[0] = Definition(dst);
543 bfe->definitions[1] = Definition(ctx->program->allocateId(), scc, s1);
544 ctx->block->instructions.emplace_back(std::move(bfe));
545 return dst;
546 }
547
548 RegClass elem_rc = elem_size < 4 ? RegClass(vec.type(), elem_size).as_subdword() : RegClass(vec.type(), elem_size / 4);
549 if (size == 1) {
550 return emit_extract_vector(ctx, vec, src.swizzle[0], elem_rc);
551 } else {
552 assert(size <= 4);
553 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
554 aco_ptr<Pseudo_instruction> vec_instr{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, size, 1)};
555 for (unsigned i = 0; i < size; ++i) {
556 elems[i] = emit_extract_vector(ctx, vec, src.swizzle[i], elem_rc);
557 vec_instr->operands[i] = Operand{elems[i]};
558 }
559 Temp dst{ctx->program->allocateId(), RegClass(vec.type(), elem_size * size / 4)};
560 vec_instr->definitions[0] = Definition(dst);
561 ctx->block->instructions.emplace_back(std::move(vec_instr));
562 ctx->allocated_vec.emplace(dst.id(), elems);
563 return dst;
564 }
565 }
566
567 Temp convert_pointer_to_64_bit(isel_context *ctx, Temp ptr)
568 {
569 if (ptr.size() == 2)
570 return ptr;
571 Builder bld(ctx->program, ctx->block);
572 if (ptr.type() == RegType::vgpr)
573 ptr = bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), ptr);
574 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s2),
575 ptr, Operand((unsigned)ctx->options->address32_hi));
576 }
577
578 void emit_sop2_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst, bool writes_scc)
579 {
580 aco_ptr<SOP2_instruction> sop2{create_instruction<SOP2_instruction>(op, Format::SOP2, 2, writes_scc ? 2 : 1)};
581 sop2->operands[0] = Operand(get_alu_src(ctx, instr->src[0]));
582 sop2->operands[1] = Operand(get_alu_src(ctx, instr->src[1]));
583 sop2->definitions[0] = Definition(dst);
584 if (writes_scc)
585 sop2->definitions[1] = Definition(ctx->program->allocateId(), scc, s1);
586 ctx->block->instructions.emplace_back(std::move(sop2));
587 }
588
589 void emit_vop2_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
590 bool commutative, bool swap_srcs=false, bool flush_denorms = false)
591 {
592 Builder bld(ctx->program, ctx->block);
593 bld.is_precise = instr->exact;
594
595 Temp src0 = get_alu_src(ctx, instr->src[swap_srcs ? 1 : 0]);
596 Temp src1 = get_alu_src(ctx, instr->src[swap_srcs ? 0 : 1]);
597 if (src1.type() == RegType::sgpr) {
598 if (commutative && src0.type() == RegType::vgpr) {
599 Temp t = src0;
600 src0 = src1;
601 src1 = t;
602 } else {
603 src1 = as_vgpr(ctx, src1);
604 }
605 }
606
607 if (flush_denorms && ctx->program->chip_class < GFX9) {
608 assert(dst.size() == 1);
609 Temp tmp = bld.vop2(op, bld.def(v1), src0, src1);
610 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand(0x3f800000u), tmp);
611 } else {
612 bld.vop2(op, Definition(dst), src0, src1);
613 }
614 }
615
616 void emit_vop2_instruction_logic64(isel_context *ctx, nir_alu_instr *instr,
617 aco_opcode op, Temp dst)
618 {
619 Builder bld(ctx->program, ctx->block);
620 bld.is_precise = instr->exact;
621
622 Temp src0 = get_alu_src(ctx, instr->src[0]);
623 Temp src1 = get_alu_src(ctx, instr->src[1]);
624
625 if (src1.type() == RegType::sgpr) {
626 assert(src0.type() == RegType::vgpr);
627 std::swap(src0, src1);
628 }
629
630 Temp src00 = bld.tmp(src0.type(), 1);
631 Temp src01 = bld.tmp(src0.type(), 1);
632 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
633 Temp src10 = bld.tmp(v1);
634 Temp src11 = bld.tmp(v1);
635 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
636 Temp lo = bld.vop2(op, bld.def(v1), src00, src10);
637 Temp hi = bld.vop2(op, bld.def(v1), src01, src11);
638 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
639 }
640
641 void emit_vop3a_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
642 bool flush_denorms = false)
643 {
644 Temp src0 = get_alu_src(ctx, instr->src[0]);
645 Temp src1 = get_alu_src(ctx, instr->src[1]);
646 Temp src2 = get_alu_src(ctx, instr->src[2]);
647
648 /* ensure that the instruction has at most 1 sgpr operand
649 * The optimizer will inline constants for us */
650 if (src0.type() == RegType::sgpr && src1.type() == RegType::sgpr)
651 src0 = as_vgpr(ctx, src0);
652 if (src1.type() == RegType::sgpr && src2.type() == RegType::sgpr)
653 src1 = as_vgpr(ctx, src1);
654 if (src2.type() == RegType::sgpr && src0.type() == RegType::sgpr)
655 src2 = as_vgpr(ctx, src2);
656
657 Builder bld(ctx->program, ctx->block);
658 bld.is_precise = instr->exact;
659 if (flush_denorms && ctx->program->chip_class < GFX9) {
660 assert(dst.size() == 1);
661 Temp tmp = bld.vop3(op, Definition(dst), src0, src1, src2);
662 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand(0x3f800000u), tmp);
663 } else {
664 bld.vop3(op, Definition(dst), src0, src1, src2);
665 }
666 }
667
668 void emit_vop1_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
669 {
670 Builder bld(ctx->program, ctx->block);
671 bld.is_precise = instr->exact;
672 if (dst.type() == RegType::sgpr)
673 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
674 bld.vop1(op, bld.def(RegType::vgpr, dst.size()), get_alu_src(ctx, instr->src[0])));
675 else
676 bld.vop1(op, Definition(dst), get_alu_src(ctx, instr->src[0]));
677 }
678
679 void emit_vopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
680 {
681 Temp src0 = get_alu_src(ctx, instr->src[0]);
682 Temp src1 = get_alu_src(ctx, instr->src[1]);
683 assert(src0.size() == src1.size());
684
685 aco_ptr<Instruction> vopc;
686 if (src1.type() == RegType::sgpr) {
687 if (src0.type() == RegType::vgpr) {
688 /* to swap the operands, we might also have to change the opcode */
689 switch (op) {
690 case aco_opcode::v_cmp_lt_f16:
691 op = aco_opcode::v_cmp_gt_f16;
692 break;
693 case aco_opcode::v_cmp_ge_f16:
694 op = aco_opcode::v_cmp_le_f16;
695 break;
696 case aco_opcode::v_cmp_lt_i16:
697 op = aco_opcode::v_cmp_gt_i16;
698 break;
699 case aco_opcode::v_cmp_ge_i16:
700 op = aco_opcode::v_cmp_le_i16;
701 break;
702 case aco_opcode::v_cmp_lt_u16:
703 op = aco_opcode::v_cmp_gt_u16;
704 break;
705 case aco_opcode::v_cmp_ge_u16:
706 op = aco_opcode::v_cmp_le_u16;
707 break;
708 case aco_opcode::v_cmp_lt_f32:
709 op = aco_opcode::v_cmp_gt_f32;
710 break;
711 case aco_opcode::v_cmp_ge_f32:
712 op = aco_opcode::v_cmp_le_f32;
713 break;
714 case aco_opcode::v_cmp_lt_i32:
715 op = aco_opcode::v_cmp_gt_i32;
716 break;
717 case aco_opcode::v_cmp_ge_i32:
718 op = aco_opcode::v_cmp_le_i32;
719 break;
720 case aco_opcode::v_cmp_lt_u32:
721 op = aco_opcode::v_cmp_gt_u32;
722 break;
723 case aco_opcode::v_cmp_ge_u32:
724 op = aco_opcode::v_cmp_le_u32;
725 break;
726 case aco_opcode::v_cmp_lt_f64:
727 op = aco_opcode::v_cmp_gt_f64;
728 break;
729 case aco_opcode::v_cmp_ge_f64:
730 op = aco_opcode::v_cmp_le_f64;
731 break;
732 case aco_opcode::v_cmp_lt_i64:
733 op = aco_opcode::v_cmp_gt_i64;
734 break;
735 case aco_opcode::v_cmp_ge_i64:
736 op = aco_opcode::v_cmp_le_i64;
737 break;
738 case aco_opcode::v_cmp_lt_u64:
739 op = aco_opcode::v_cmp_gt_u64;
740 break;
741 case aco_opcode::v_cmp_ge_u64:
742 op = aco_opcode::v_cmp_le_u64;
743 break;
744 default: /* eq and ne are commutative */
745 break;
746 }
747 Temp t = src0;
748 src0 = src1;
749 src1 = t;
750 } else {
751 src1 = as_vgpr(ctx, src1);
752 }
753 }
754
755 Builder bld(ctx->program, ctx->block);
756 bld.vopc(op, bld.hint_vcc(Definition(dst)), src0, src1);
757 }
758
759 void emit_sopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
760 {
761 Temp src0 = get_alu_src(ctx, instr->src[0]);
762 Temp src1 = get_alu_src(ctx, instr->src[1]);
763 Builder bld(ctx->program, ctx->block);
764
765 assert(dst.regClass() == bld.lm);
766 assert(src0.type() == RegType::sgpr);
767 assert(src1.type() == RegType::sgpr);
768 assert(src0.regClass() == src1.regClass());
769
770 /* Emit the SALU comparison instruction */
771 Temp cmp = bld.sopc(op, bld.scc(bld.def(s1)), src0, src1);
772 /* Turn the result into a per-lane bool */
773 bool_to_vector_condition(ctx, cmp, dst);
774 }
775
776 void emit_comparison(isel_context *ctx, nir_alu_instr *instr, Temp dst,
777 aco_opcode v16_op, aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes, aco_opcode s64_op = aco_opcode::num_opcodes)
778 {
779 aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op : instr->src[0].src.ssa->bit_size == 32 ? s32_op : aco_opcode::num_opcodes;
780 aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op : instr->src[0].src.ssa->bit_size == 32 ? v32_op : v16_op;
781 bool use_valu = s_op == aco_opcode::num_opcodes ||
782 nir_dest_is_divergent(instr->dest.dest) ||
783 ctx->allocated[instr->src[0].src.ssa->index].type() == RegType::vgpr ||
784 ctx->allocated[instr->src[1].src.ssa->index].type() == RegType::vgpr;
785 aco_opcode op = use_valu ? v_op : s_op;
786 assert(op != aco_opcode::num_opcodes);
787 assert(dst.regClass() == ctx->program->lane_mask);
788
789 if (use_valu)
790 emit_vopc_instruction(ctx, instr, op, dst);
791 else
792 emit_sopc_instruction(ctx, instr, op, dst);
793 }
794
795 void emit_boolean_logic(isel_context *ctx, nir_alu_instr *instr, Builder::WaveSpecificOpcode op, Temp dst)
796 {
797 Builder bld(ctx->program, ctx->block);
798 Temp src0 = get_alu_src(ctx, instr->src[0]);
799 Temp src1 = get_alu_src(ctx, instr->src[1]);
800
801 assert(dst.regClass() == bld.lm);
802 assert(src0.regClass() == bld.lm);
803 assert(src1.regClass() == bld.lm);
804
805 bld.sop2(op, Definition(dst), bld.def(s1, scc), src0, src1);
806 }
807
808 void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst)
809 {
810 Builder bld(ctx->program, ctx->block);
811 Temp cond = get_alu_src(ctx, instr->src[0]);
812 Temp then = get_alu_src(ctx, instr->src[1]);
813 Temp els = get_alu_src(ctx, instr->src[2]);
814
815 assert(cond.regClass() == bld.lm);
816
817 if (dst.type() == RegType::vgpr) {
818 aco_ptr<Instruction> bcsel;
819 if (dst.size() == 1) {
820 then = as_vgpr(ctx, then);
821 els = as_vgpr(ctx, els);
822
823 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), els, then, cond);
824 } else if (dst.size() == 2) {
825 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
826 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), then);
827 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
828 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), els);
829
830 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, cond);
831 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, cond);
832
833 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
834 } else {
835 fprintf(stderr, "Unimplemented NIR instr bit size: ");
836 nir_print_instr(&instr->instr, stderr);
837 fprintf(stderr, "\n");
838 }
839 return;
840 }
841
842 if (instr->dest.dest.ssa.bit_size == 1) {
843 assert(dst.regClass() == bld.lm);
844 assert(then.regClass() == bld.lm);
845 assert(els.regClass() == bld.lm);
846 }
847
848 if (!nir_src_is_divergent(instr->src[0].src)) { /* uniform condition and values in sgpr */
849 if (dst.regClass() == s1 || dst.regClass() == s2) {
850 assert((then.regClass() == s1 || then.regClass() == s2) && els.regClass() == then.regClass());
851 assert(dst.size() == then.size());
852 aco_opcode op = dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64;
853 bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond)));
854 } else {
855 fprintf(stderr, "Unimplemented uniform bcsel bit size: ");
856 nir_print_instr(&instr->instr, stderr);
857 fprintf(stderr, "\n");
858 }
859 return;
860 }
861
862 /* divergent boolean bcsel
863 * this implements bcsel on bools: dst = s0 ? s1 : s2
864 * are going to be: dst = (s0 & s1) | (~s0 & s2) */
865 assert(instr->dest.dest.ssa.bit_size == 1);
866
867 if (cond.id() != then.id())
868 then = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), cond, then);
869
870 if (cond.id() == els.id())
871 bld.sop1(Builder::s_mov, Definition(dst), then);
872 else
873 bld.sop2(Builder::s_or, Definition(dst), bld.def(s1, scc), then,
874 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), els, cond));
875 }
876
877 void emit_scaled_op(isel_context *ctx, Builder& bld, Definition dst, Temp val,
878 aco_opcode op, uint32_t undo)
879 {
880 /* multiply by 16777216 to handle denormals */
881 Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.hint_vcc(bld.def(bld.lm)),
882 as_vgpr(ctx, val), bld.copy(bld.def(v1), Operand((1u << 7) | (1u << 4))));
883 Temp scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x4b800000u), val);
884 scaled = bld.vop1(op, bld.def(v1), scaled);
885 scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(undo), scaled);
886
887 Temp not_scaled = bld.vop1(op, bld.def(v1), val);
888
889 bld.vop2(aco_opcode::v_cndmask_b32, dst, not_scaled, scaled, is_denormal);
890 }
891
892 void emit_rcp(isel_context *ctx, Builder& bld, Definition dst, Temp val)
893 {
894 if (ctx->block->fp_mode.denorm32 == 0) {
895 bld.vop1(aco_opcode::v_rcp_f32, dst, val);
896 return;
897 }
898
899 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rcp_f32, 0x4b800000u);
900 }
901
902 void emit_rsq(isel_context *ctx, Builder& bld, Definition dst, Temp val)
903 {
904 if (ctx->block->fp_mode.denorm32 == 0) {
905 bld.vop1(aco_opcode::v_rsq_f32, dst, val);
906 return;
907 }
908
909 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rsq_f32, 0x45800000u);
910 }
911
912 void emit_sqrt(isel_context *ctx, Builder& bld, Definition dst, Temp val)
913 {
914 if (ctx->block->fp_mode.denorm32 == 0) {
915 bld.vop1(aco_opcode::v_sqrt_f32, dst, val);
916 return;
917 }
918
919 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_sqrt_f32, 0x39800000u);
920 }
921
922 void emit_log2(isel_context *ctx, Builder& bld, Definition dst, Temp val)
923 {
924 if (ctx->block->fp_mode.denorm32 == 0) {
925 bld.vop1(aco_opcode::v_log_f32, dst, val);
926 return;
927 }
928
929 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_log_f32, 0xc1c00000u);
930 }
931
932 Temp emit_trunc_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val)
933 {
934 if (ctx->options->chip_class >= GFX7)
935 return bld.vop1(aco_opcode::v_trunc_f64, Definition(dst), val);
936
937 /* GFX6 doesn't support V_TRUNC_F64, lower it. */
938 /* TODO: create more efficient code! */
939 if (val.type() == RegType::sgpr)
940 val = as_vgpr(ctx, val);
941
942 /* Split the input value. */
943 Temp val_lo = bld.tmp(v1), val_hi = bld.tmp(v1);
944 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
945
946 /* Extract the exponent and compute the unbiased value. */
947 Temp exponent = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), val_hi, Operand(20u), Operand(11u));
948 exponent = bld.vsub32(bld.def(v1), exponent, Operand(1023u));
949
950 /* Extract the fractional part. */
951 Temp fract_mask = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(-1u), Operand(0x000fffffu));
952 fract_mask = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), fract_mask, exponent);
953
954 Temp fract_mask_lo = bld.tmp(v1), fract_mask_hi = bld.tmp(v1);
955 bld.pseudo(aco_opcode::p_split_vector, Definition(fract_mask_lo), Definition(fract_mask_hi), fract_mask);
956
957 Temp fract_lo = bld.tmp(v1), fract_hi = bld.tmp(v1);
958 Temp tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_lo);
959 fract_lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_lo, tmp);
960 tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_hi);
961 fract_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_hi, tmp);
962
963 /* Get the sign bit. */
964 Temp sign = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x80000000u), val_hi);
965
966 /* Decide the operation to apply depending on the unbiased exponent. */
967 Temp exp_lt0 = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)), exponent, Operand(0u));
968 Temp dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_lo, bld.copy(bld.def(v1), Operand(0u)), exp_lt0);
969 Temp dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_hi, sign, exp_lt0);
970 Temp exp_gt51 = bld.vopc_e64(aco_opcode::v_cmp_gt_i32, bld.def(s2), exponent, Operand(51u));
971 dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_lo, val_lo, exp_gt51);
972 dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_hi, val_hi, exp_gt51);
973
974 return bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst_lo, dst_hi);
975 }
976
977 Temp emit_floor_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val)
978 {
979 if (ctx->options->chip_class >= GFX7)
980 return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
981
982 /* GFX6 doesn't support V_FLOOR_F64, lower it. */
983 Temp src0 = as_vgpr(ctx, val);
984
985 Temp mask = bld.copy(bld.def(s1), Operand(3u)); /* isnan */
986 Temp min_val = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(-1u), Operand(0x3fefffffu));
987
988 Temp isnan = bld.vopc_e64(aco_opcode::v_cmp_class_f64, bld.hint_vcc(bld.def(bld.lm)), src0, mask);
989 Temp fract = bld.vop1(aco_opcode::v_fract_f64, bld.def(v2), src0);
990 Temp min = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), fract, min_val);
991
992 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
993 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), src0);
994 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
995 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), min);
996
997 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, isnan);
998 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, isnan);
999
1000 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1);
1001
1002 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, v);
1003 static_cast<VOP3A_instruction*>(add)->neg[1] = true;
1004
1005 return add->definitions[0].getTemp();
1006 }
1007
1008 Temp convert_int(isel_context *ctx, Builder& bld, Temp src, unsigned src_bits, unsigned dst_bits, bool is_signed, Temp dst=Temp()) {
1009 if (!dst.id()) {
1010 if (dst_bits % 32 == 0 || src.type() == RegType::sgpr)
1011 dst = bld.tmp(src.type(), DIV_ROUND_UP(dst_bits, 32u));
1012 else
1013 dst = bld.tmp(RegClass(RegType::vgpr, dst_bits / 8u).as_subdword());
1014 }
1015
1016 if (dst.bytes() == src.bytes() && dst_bits < src_bits)
1017 return bld.copy(Definition(dst), src);
1018 else if (dst.bytes() < src.bytes())
1019 return bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u));
1020
1021 Temp tmp = dst;
1022 if (dst_bits == 64)
1023 tmp = src_bits == 32 ? src : bld.tmp(src.type(), 1);
1024
1025 if (tmp == src) {
1026 } else if (src.regClass() == s1) {
1027 if (is_signed)
1028 bld.sop1(src_bits == 8 ? aco_opcode::s_sext_i32_i8 : aco_opcode::s_sext_i32_i16, Definition(tmp), src);
1029 else
1030 bld.sop2(aco_opcode::s_and_b32, Definition(tmp), bld.def(s1, scc), Operand(src_bits == 8 ? 0xFFu : 0xFFFFu), src);
1031 } else if (ctx->options->chip_class >= GFX8) {
1032 assert(src_bits != 8 || src.regClass() == v1b);
1033 assert(src_bits != 16 || src.regClass() == v2b);
1034 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
1035 sdwa->operands[0] = Operand(src);
1036 sdwa->definitions[0] = Definition(tmp);
1037 if (is_signed)
1038 sdwa->sel[0] = src_bits == 8 ? sdwa_sbyte : sdwa_sword;
1039 else
1040 sdwa->sel[0] = src_bits == 8 ? sdwa_ubyte : sdwa_uword;
1041 sdwa->dst_sel = tmp.bytes() == 2 ? sdwa_uword : sdwa_udword;
1042 bld.insert(std::move(sdwa));
1043 } else {
1044 assert(ctx->options->chip_class == GFX6 || ctx->options->chip_class == GFX7);
1045 aco_opcode opcode = is_signed ? aco_opcode::v_bfe_i32 : aco_opcode::v_bfe_u32;
1046 bld.vop3(opcode, Definition(tmp), src, Operand(0u), Operand(src_bits == 8 ? 8u : 16u));
1047 }
1048
1049 if (dst_bits == 64) {
1050 if (is_signed && dst.regClass() == s2) {
1051 Temp high = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), tmp, Operand(31u));
1052 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
1053 } else if (is_signed && dst.regClass() == v2) {
1054 Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), tmp);
1055 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
1056 } else {
1057 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand(0u));
1058 }
1059 }
1060
1061 return dst;
1062 }
1063
1064 void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr)
1065 {
1066 if (!instr->dest.dest.is_ssa) {
1067 fprintf(stderr, "nir alu dst not in ssa: ");
1068 nir_print_instr(&instr->instr, stderr);
1069 fprintf(stderr, "\n");
1070 abort();
1071 }
1072 Builder bld(ctx->program, ctx->block);
1073 bld.is_precise = instr->exact;
1074 Temp dst = get_ssa_temp(ctx, &instr->dest.dest.ssa);
1075 switch(instr->op) {
1076 case nir_op_vec2:
1077 case nir_op_vec3:
1078 case nir_op_vec4: {
1079 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
1080 unsigned num = instr->dest.dest.ssa.num_components;
1081 for (unsigned i = 0; i < num; ++i)
1082 elems[i] = get_alu_src(ctx, instr->src[i]);
1083
1084 if (instr->dest.dest.ssa.bit_size >= 32 || dst.type() == RegType::vgpr) {
1085 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.dest.ssa.num_components, 1)};
1086 RegClass elem_rc = RegClass::get(RegType::vgpr, instr->dest.dest.ssa.bit_size / 8u);
1087 for (unsigned i = 0; i < num; ++i) {
1088 if (elems[i].type() == RegType::sgpr && elem_rc.is_subdword())
1089 vec->operands[i] = Operand(emit_extract_vector(ctx, elems[i], 0, elem_rc));
1090 else
1091 vec->operands[i] = Operand{elems[i]};
1092 }
1093 vec->definitions[0] = Definition(dst);
1094 ctx->block->instructions.emplace_back(std::move(vec));
1095 ctx->allocated_vec.emplace(dst.id(), elems);
1096 } else {
1097 // TODO: that is a bit suboptimal..
1098 Temp mask = bld.copy(bld.def(s1), Operand((1u << instr->dest.dest.ssa.bit_size) - 1));
1099 for (unsigned i = 0; i < num - 1; ++i)
1100 if (((i+1) * instr->dest.dest.ssa.bit_size) % 32)
1101 elems[i] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), elems[i], mask);
1102 for (unsigned i = 0; i < num; ++i) {
1103 unsigned bit = i * instr->dest.dest.ssa.bit_size;
1104 if (bit % 32 == 0) {
1105 elems[bit / 32] = elems[i];
1106 } else {
1107 elems[i] = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc),
1108 elems[i], Operand((i * instr->dest.dest.ssa.bit_size) % 32));
1109 elems[bit / 32] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), elems[bit / 32], elems[i]);
1110 }
1111 }
1112 if (dst.size() == 1)
1113 bld.copy(Definition(dst), elems[0]);
1114 else
1115 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), elems[0], elems[1]);
1116 }
1117 break;
1118 }
1119 case nir_op_mov: {
1120 Temp src = get_alu_src(ctx, instr->src[0]);
1121 aco_ptr<Instruction> mov;
1122 if (dst.type() == RegType::sgpr) {
1123 if (src.type() == RegType::vgpr)
1124 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
1125 else if (src.regClass() == s1)
1126 bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src);
1127 else if (src.regClass() == s2)
1128 bld.sop1(aco_opcode::s_mov_b64, Definition(dst), src);
1129 else
1130 unreachable("wrong src register class for nir_op_imov");
1131 } else {
1132 if (dst.regClass() == v1)
1133 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), src);
1134 else if (dst.regClass() == v1b ||
1135 dst.regClass() == v2b ||
1136 dst.regClass() == v2)
1137 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
1138 else
1139 unreachable("wrong src register class for nir_op_imov");
1140 }
1141 break;
1142 }
1143 case nir_op_inot: {
1144 Temp src = get_alu_src(ctx, instr->src[0]);
1145 if (instr->dest.dest.ssa.bit_size == 1) {
1146 assert(src.regClass() == bld.lm);
1147 assert(dst.regClass() == bld.lm);
1148 /* Don't use s_andn2 here, this allows the optimizer to make a better decision */
1149 Temp tmp = bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), src);
1150 bld.sop2(Builder::s_and, Definition(dst), bld.def(s1, scc), tmp, Operand(exec, bld.lm));
1151 } else if (dst.regClass() == v1) {
1152 emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
1153 } else if (dst.regClass() == v2) {
1154 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
1155 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
1156 lo = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), lo);
1157 hi = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), hi);
1158 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
1159 } else if (dst.type() == RegType::sgpr) {
1160 aco_opcode opcode = dst.size() == 1 ? aco_opcode::s_not_b32 : aco_opcode::s_not_b64;
1161 bld.sop1(opcode, Definition(dst), bld.def(s1, scc), src);
1162 } else {
1163 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1164 nir_print_instr(&instr->instr, stderr);
1165 fprintf(stderr, "\n");
1166 }
1167 break;
1168 }
1169 case nir_op_ineg: {
1170 Temp src = get_alu_src(ctx, instr->src[0]);
1171 if (dst.regClass() == v1) {
1172 bld.vsub32(Definition(dst), Operand(0u), Operand(src));
1173 } else if (dst.regClass() == s1) {
1174 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand((uint32_t) -1), src);
1175 } else if (dst.size() == 2) {
1176 Temp src0 = bld.tmp(dst.type(), 1);
1177 Temp src1 = bld.tmp(dst.type(), 1);
1178 bld.pseudo(aco_opcode::p_split_vector, Definition(src0), Definition(src1), src);
1179
1180 if (dst.regClass() == s2) {
1181 Temp carry = bld.tmp(s1);
1182 Temp dst0 = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(carry)), Operand(0u), src0);
1183 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), src1, carry);
1184 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1185 } else {
1186 Temp lower = bld.tmp(v1);
1187 Temp borrow = bld.vsub32(Definition(lower), Operand(0u), src0, true).def(1).getTemp();
1188 Temp upper = bld.vsub32(bld.def(v1), Operand(0u), src1, false, borrow);
1189 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1190 }
1191 } else {
1192 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1193 nir_print_instr(&instr->instr, stderr);
1194 fprintf(stderr, "\n");
1195 }
1196 break;
1197 }
1198 case nir_op_iabs: {
1199 if (dst.regClass() == s1) {
1200 bld.sop1(aco_opcode::s_abs_i32, Definition(dst), bld.def(s1, scc), get_alu_src(ctx, instr->src[0]));
1201 } else if (dst.regClass() == v1) {
1202 Temp src = get_alu_src(ctx, instr->src[0]);
1203 bld.vop2(aco_opcode::v_max_i32, Definition(dst), src, bld.vsub32(bld.def(v1), Operand(0u), src));
1204 } else {
1205 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1206 nir_print_instr(&instr->instr, stderr);
1207 fprintf(stderr, "\n");
1208 }
1209 break;
1210 }
1211 case nir_op_isign: {
1212 Temp src = get_alu_src(ctx, instr->src[0]);
1213 if (dst.regClass() == s1) {
1214 Temp tmp = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), src, Operand((uint32_t)-1));
1215 bld.sop2(aco_opcode::s_min_i32, Definition(dst), bld.def(s1, scc), tmp, Operand(1u));
1216 } else if (dst.regClass() == s2) {
1217 Temp neg = bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand(63u));
1218 Temp neqz;
1219 if (ctx->program->chip_class >= GFX8)
1220 neqz = bld.sopc(aco_opcode::s_cmp_lg_u64, bld.def(s1, scc), src, Operand(0u));
1221 else
1222 neqz = bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), src, Operand(0u)).def(1).getTemp();
1223 /* SCC gets zero-extended to 64 bit */
1224 bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz));
1225 } else if (dst.regClass() == v1) {
1226 bld.vop3(aco_opcode::v_med3_i32, Definition(dst), Operand((uint32_t)-1), src, Operand(1u));
1227 } else if (dst.regClass() == v2) {
1228 Temp upper = emit_extract_vector(ctx, src, 1, v1);
1229 Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), upper);
1230 Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
1231 Temp lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(1u), neg, gtz);
1232 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), neg, gtz);
1233 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1234 } else {
1235 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1236 nir_print_instr(&instr->instr, stderr);
1237 fprintf(stderr, "\n");
1238 }
1239 break;
1240 }
1241 case nir_op_imax: {
1242 if (dst.regClass() == v1) {
1243 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_i32, dst, true);
1244 } else if (dst.regClass() == s1) {
1245 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_i32, dst, true);
1246 } else {
1247 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1248 nir_print_instr(&instr->instr, stderr);
1249 fprintf(stderr, "\n");
1250 }
1251 break;
1252 }
1253 case nir_op_umax: {
1254 if (dst.regClass() == v1) {
1255 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_u32, dst, true);
1256 } else if (dst.regClass() == s1) {
1257 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_u32, dst, true);
1258 } else {
1259 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1260 nir_print_instr(&instr->instr, stderr);
1261 fprintf(stderr, "\n");
1262 }
1263 break;
1264 }
1265 case nir_op_imin: {
1266 if (dst.regClass() == v1) {
1267 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_i32, dst, true);
1268 } else if (dst.regClass() == s1) {
1269 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_i32, dst, true);
1270 } else {
1271 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1272 nir_print_instr(&instr->instr, stderr);
1273 fprintf(stderr, "\n");
1274 }
1275 break;
1276 }
1277 case nir_op_umin: {
1278 if (dst.regClass() == v1) {
1279 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_u32, dst, true);
1280 } else if (dst.regClass() == s1) {
1281 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_u32, dst, true);
1282 } else {
1283 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1284 nir_print_instr(&instr->instr, stderr);
1285 fprintf(stderr, "\n");
1286 }
1287 break;
1288 }
1289 case nir_op_ior: {
1290 if (instr->dest.dest.ssa.bit_size == 1) {
1291 emit_boolean_logic(ctx, instr, Builder::s_or, dst);
1292 } else if (dst.regClass() == v1) {
1293 emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
1294 } else if (dst.regClass() == v2) {
1295 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_or_b32, dst);
1296 } else if (dst.regClass() == s1) {
1297 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b32, dst, true);
1298 } else if (dst.regClass() == s2) {
1299 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b64, dst, true);
1300 } else {
1301 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1302 nir_print_instr(&instr->instr, stderr);
1303 fprintf(stderr, "\n");
1304 }
1305 break;
1306 }
1307 case nir_op_iand: {
1308 if (instr->dest.dest.ssa.bit_size == 1) {
1309 emit_boolean_logic(ctx, instr, Builder::s_and, dst);
1310 } else if (dst.regClass() == v1) {
1311 emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
1312 } else if (dst.regClass() == v2) {
1313 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_and_b32, dst);
1314 } else if (dst.regClass() == s1) {
1315 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b32, dst, true);
1316 } else if (dst.regClass() == s2) {
1317 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b64, dst, true);
1318 } else {
1319 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1320 nir_print_instr(&instr->instr, stderr);
1321 fprintf(stderr, "\n");
1322 }
1323 break;
1324 }
1325 case nir_op_ixor: {
1326 if (instr->dest.dest.ssa.bit_size == 1) {
1327 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
1328 } else if (dst.regClass() == v1) {
1329 emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
1330 } else if (dst.regClass() == v2) {
1331 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_xor_b32, dst);
1332 } else if (dst.regClass() == s1) {
1333 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b32, dst, true);
1334 } else if (dst.regClass() == s2) {
1335 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b64, dst, true);
1336 } else {
1337 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1338 nir_print_instr(&instr->instr, stderr);
1339 fprintf(stderr, "\n");
1340 }
1341 break;
1342 }
1343 case nir_op_ushr: {
1344 if (dst.regClass() == v1) {
1345 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b32, dst, false, true);
1346 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1347 bld.vop3(aco_opcode::v_lshrrev_b64, Definition(dst),
1348 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1349 } else if (dst.regClass() == v2) {
1350 bld.vop3(aco_opcode::v_lshr_b64, Definition(dst),
1351 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1352 } else if (dst.regClass() == s2) {
1353 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b64, dst, true);
1354 } else if (dst.regClass() == s1) {
1355 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b32, dst, true);
1356 } else {
1357 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1358 nir_print_instr(&instr->instr, stderr);
1359 fprintf(stderr, "\n");
1360 }
1361 break;
1362 }
1363 case nir_op_ishl: {
1364 if (dst.regClass() == v1) {
1365 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b32, dst, false, true);
1366 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1367 bld.vop3(aco_opcode::v_lshlrev_b64, Definition(dst),
1368 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1369 } else if (dst.regClass() == v2) {
1370 bld.vop3(aco_opcode::v_lshl_b64, Definition(dst),
1371 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1372 } else if (dst.regClass() == s1) {
1373 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b32, dst, true);
1374 } else if (dst.regClass() == s2) {
1375 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b64, dst, true);
1376 } else {
1377 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1378 nir_print_instr(&instr->instr, stderr);
1379 fprintf(stderr, "\n");
1380 }
1381 break;
1382 }
1383 case nir_op_ishr: {
1384 if (dst.regClass() == v1) {
1385 emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i32, dst, false, true);
1386 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1387 bld.vop3(aco_opcode::v_ashrrev_i64, Definition(dst),
1388 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1389 } else if (dst.regClass() == v2) {
1390 bld.vop3(aco_opcode::v_ashr_i64, Definition(dst),
1391 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1392 } else if (dst.regClass() == s1) {
1393 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i32, dst, true);
1394 } else if (dst.regClass() == s2) {
1395 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i64, dst, true);
1396 } else {
1397 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1398 nir_print_instr(&instr->instr, stderr);
1399 fprintf(stderr, "\n");
1400 }
1401 break;
1402 }
1403 case nir_op_find_lsb: {
1404 Temp src = get_alu_src(ctx, instr->src[0]);
1405 if (src.regClass() == s1) {
1406 bld.sop1(aco_opcode::s_ff1_i32_b32, Definition(dst), src);
1407 } else if (src.regClass() == v1) {
1408 emit_vop1_instruction(ctx, instr, aco_opcode::v_ffbl_b32, dst);
1409 } else if (src.regClass() == s2) {
1410 bld.sop1(aco_opcode::s_ff1_i32_b64, Definition(dst), src);
1411 } else {
1412 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1413 nir_print_instr(&instr->instr, stderr);
1414 fprintf(stderr, "\n");
1415 }
1416 break;
1417 }
1418 case nir_op_ufind_msb:
1419 case nir_op_ifind_msb: {
1420 Temp src = get_alu_src(ctx, instr->src[0]);
1421 if (src.regClass() == s1 || src.regClass() == s2) {
1422 aco_opcode op = src.regClass() == s2 ?
1423 (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b64 : aco_opcode::s_flbit_i32_i64) :
1424 (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b32 : aco_opcode::s_flbit_i32);
1425 Temp msb_rev = bld.sop1(op, bld.def(s1), src);
1426
1427 Builder::Result sub = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
1428 Operand(src.size() * 32u - 1u), msb_rev);
1429 Temp msb = sub.def(0).getTemp();
1430 Temp carry = sub.def(1).getTemp();
1431
1432 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand((uint32_t)-1), msb, bld.scc(carry));
1433 } else if (src.regClass() == v1) {
1434 aco_opcode op = instr->op == nir_op_ufind_msb ? aco_opcode::v_ffbh_u32 : aco_opcode::v_ffbh_i32;
1435 Temp msb_rev = bld.tmp(v1);
1436 emit_vop1_instruction(ctx, instr, op, msb_rev);
1437 Temp msb = bld.tmp(v1);
1438 Temp carry = bld.vsub32(Definition(msb), Operand(31u), Operand(msb_rev), true).def(1).getTemp();
1439 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), msb, Operand((uint32_t)-1), carry);
1440 } else {
1441 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1442 nir_print_instr(&instr->instr, stderr);
1443 fprintf(stderr, "\n");
1444 }
1445 break;
1446 }
1447 case nir_op_bitfield_reverse: {
1448 if (dst.regClass() == s1) {
1449 bld.sop1(aco_opcode::s_brev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1450 } else if (dst.regClass() == v1) {
1451 bld.vop1(aco_opcode::v_bfrev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1452 } else {
1453 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1454 nir_print_instr(&instr->instr, stderr);
1455 fprintf(stderr, "\n");
1456 }
1457 break;
1458 }
1459 case nir_op_iadd: {
1460 if (dst.regClass() == s1) {
1461 emit_sop2_instruction(ctx, instr, aco_opcode::s_add_u32, dst, true);
1462 break;
1463 }
1464
1465 Temp src0 = get_alu_src(ctx, instr->src[0]);
1466 Temp src1 = get_alu_src(ctx, instr->src[1]);
1467 if (dst.regClass() == v1) {
1468 bld.vadd32(Definition(dst), Operand(src0), Operand(src1));
1469 break;
1470 }
1471
1472 assert(src0.size() == 2 && src1.size() == 2);
1473 Temp src00 = bld.tmp(src0.type(), 1);
1474 Temp src01 = bld.tmp(dst.type(), 1);
1475 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1476 Temp src10 = bld.tmp(src1.type(), 1);
1477 Temp src11 = bld.tmp(dst.type(), 1);
1478 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1479
1480 if (dst.regClass() == s2) {
1481 Temp carry = bld.tmp(s1);
1482 Temp dst0 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1483 Temp dst1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), src01, src11, bld.scc(carry));
1484 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1485 } else if (dst.regClass() == v2) {
1486 Temp dst0 = bld.tmp(v1);
1487 Temp carry = bld.vadd32(Definition(dst0), src00, src10, true).def(1).getTemp();
1488 Temp dst1 = bld.vadd32(bld.def(v1), src01, src11, false, carry);
1489 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1490 } else {
1491 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1492 nir_print_instr(&instr->instr, stderr);
1493 fprintf(stderr, "\n");
1494 }
1495 break;
1496 }
1497 case nir_op_uadd_sat: {
1498 Temp src0 = get_alu_src(ctx, instr->src[0]);
1499 Temp src1 = get_alu_src(ctx, instr->src[1]);
1500 if (dst.regClass() == s1) {
1501 Temp tmp = bld.tmp(s1), carry = bld.tmp(s1);
1502 bld.sop2(aco_opcode::s_add_u32, Definition(tmp), bld.scc(Definition(carry)),
1503 src0, src1);
1504 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand((uint32_t) -1), tmp, bld.scc(carry));
1505 } else if (dst.regClass() == v1) {
1506 if (ctx->options->chip_class >= GFX9) {
1507 aco_ptr<VOP3A_instruction> add{create_instruction<VOP3A_instruction>(aco_opcode::v_add_u32, asVOP3(Format::VOP2), 2, 1)};
1508 add->operands[0] = Operand(src0);
1509 add->operands[1] = Operand(src1);
1510 add->definitions[0] = Definition(dst);
1511 add->clamp = 1;
1512 ctx->block->instructions.emplace_back(std::move(add));
1513 } else {
1514 if (src1.regClass() != v1)
1515 std::swap(src0, src1);
1516 assert(src1.regClass() == v1);
1517 Temp tmp = bld.tmp(v1);
1518 Temp carry = bld.vadd32(Definition(tmp), src0, src1, true).def(1).getTemp();
1519 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), tmp, Operand((uint32_t) -1), carry);
1520 }
1521 } else {
1522 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1523 nir_print_instr(&instr->instr, stderr);
1524 fprintf(stderr, "\n");
1525 }
1526 break;
1527 }
1528 case nir_op_uadd_carry: {
1529 Temp src0 = get_alu_src(ctx, instr->src[0]);
1530 Temp src1 = get_alu_src(ctx, instr->src[1]);
1531 if (dst.regClass() == s1) {
1532 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
1533 break;
1534 }
1535 if (dst.regClass() == v1) {
1536 Temp carry = bld.vadd32(bld.def(v1), src0, src1, true).def(1).getTemp();
1537 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), carry);
1538 break;
1539 }
1540
1541 Temp src00 = bld.tmp(src0.type(), 1);
1542 Temp src01 = bld.tmp(dst.type(), 1);
1543 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1544 Temp src10 = bld.tmp(src1.type(), 1);
1545 Temp src11 = bld.tmp(dst.type(), 1);
1546 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1547 if (dst.regClass() == s2) {
1548 Temp carry = bld.tmp(s1);
1549 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1550 carry = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11, bld.scc(carry)).def(1).getTemp();
1551 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand(0u));
1552 } else if (dst.regClass() == v2) {
1553 Temp carry = bld.vadd32(bld.def(v1), src00, src10, true).def(1).getTemp();
1554 carry = bld.vadd32(bld.def(v1), src01, src11, true, carry).def(1).getTemp();
1555 carry = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), carry);
1556 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand(0u));
1557 } else {
1558 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1559 nir_print_instr(&instr->instr, stderr);
1560 fprintf(stderr, "\n");
1561 }
1562 break;
1563 }
1564 case nir_op_isub: {
1565 if (dst.regClass() == s1) {
1566 emit_sop2_instruction(ctx, instr, aco_opcode::s_sub_i32, dst, true);
1567 break;
1568 }
1569
1570 Temp src0 = get_alu_src(ctx, instr->src[0]);
1571 Temp src1 = get_alu_src(ctx, instr->src[1]);
1572 if (dst.regClass() == v1) {
1573 bld.vsub32(Definition(dst), src0, src1);
1574 break;
1575 }
1576
1577 Temp src00 = bld.tmp(src0.type(), 1);
1578 Temp src01 = bld.tmp(dst.type(), 1);
1579 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1580 Temp src10 = bld.tmp(src1.type(), 1);
1581 Temp src11 = bld.tmp(dst.type(), 1);
1582 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1583 if (dst.regClass() == s2) {
1584 Temp carry = bld.tmp(s1);
1585 Temp dst0 = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1586 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), src01, src11, carry);
1587 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1588 } else if (dst.regClass() == v2) {
1589 Temp lower = bld.tmp(v1);
1590 Temp borrow = bld.vsub32(Definition(lower), src00, src10, true).def(1).getTemp();
1591 Temp upper = bld.vsub32(bld.def(v1), src01, src11, false, borrow);
1592 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1593 } else {
1594 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1595 nir_print_instr(&instr->instr, stderr);
1596 fprintf(stderr, "\n");
1597 }
1598 break;
1599 }
1600 case nir_op_usub_borrow: {
1601 Temp src0 = get_alu_src(ctx, instr->src[0]);
1602 Temp src1 = get_alu_src(ctx, instr->src[1]);
1603 if (dst.regClass() == s1) {
1604 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
1605 break;
1606 } else if (dst.regClass() == v1) {
1607 Temp borrow = bld.vsub32(bld.def(v1), src0, src1, true).def(1).getTemp();
1608 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), borrow);
1609 break;
1610 }
1611
1612 Temp src00 = bld.tmp(src0.type(), 1);
1613 Temp src01 = bld.tmp(dst.type(), 1);
1614 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1615 Temp src10 = bld.tmp(src1.type(), 1);
1616 Temp src11 = bld.tmp(dst.type(), 1);
1617 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1618 if (dst.regClass() == s2) {
1619 Temp borrow = bld.tmp(s1);
1620 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), src00, src10);
1621 borrow = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11, bld.scc(borrow)).def(1).getTemp();
1622 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand(0u));
1623 } else if (dst.regClass() == v2) {
1624 Temp borrow = bld.vsub32(bld.def(v1), src00, src10, true).def(1).getTemp();
1625 borrow = bld.vsub32(bld.def(v1), src01, src11, true, Operand(borrow)).def(1).getTemp();
1626 borrow = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), borrow);
1627 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand(0u));
1628 } else {
1629 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1630 nir_print_instr(&instr->instr, stderr);
1631 fprintf(stderr, "\n");
1632 }
1633 break;
1634 }
1635 case nir_op_imul: {
1636 if (dst.regClass() == v1) {
1637 bld.vop3(aco_opcode::v_mul_lo_u32, Definition(dst),
1638 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1639 } else if (dst.regClass() == s1) {
1640 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_i32, dst, false);
1641 } else {
1642 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1643 nir_print_instr(&instr->instr, stderr);
1644 fprintf(stderr, "\n");
1645 }
1646 break;
1647 }
1648 case nir_op_umul_high: {
1649 if (dst.regClass() == v1) {
1650 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1651 } else if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
1652 bld.sop2(aco_opcode::s_mul_hi_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1653 } else if (dst.regClass() == s1) {
1654 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_u32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
1655 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1656 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
1657 } else {
1658 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1659 nir_print_instr(&instr->instr, stderr);
1660 fprintf(stderr, "\n");
1661 }
1662 break;
1663 }
1664 case nir_op_imul_high: {
1665 if (dst.regClass() == v1) {
1666 bld.vop3(aco_opcode::v_mul_hi_i32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1667 } else if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
1668 bld.sop2(aco_opcode::s_mul_hi_i32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1669 } else if (dst.regClass() == s1) {
1670 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
1671 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1672 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
1673 } else {
1674 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1675 nir_print_instr(&instr->instr, stderr);
1676 fprintf(stderr, "\n");
1677 }
1678 break;
1679 }
1680 case nir_op_fmul: {
1681 Temp src0 = get_alu_src(ctx, instr->src[0]);
1682 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1683 if (dst.regClass() == v2b) {
1684 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f16, dst, true);
1685 } else if (dst.regClass() == v1) {
1686 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f32, dst, true);
1687 } else if (dst.regClass() == v2) {
1688 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), src0, src1);
1689 } else {
1690 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1691 nir_print_instr(&instr->instr, stderr);
1692 fprintf(stderr, "\n");
1693 }
1694 break;
1695 }
1696 case nir_op_fadd: {
1697 Temp src0 = get_alu_src(ctx, instr->src[0]);
1698 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1699 if (dst.regClass() == v2b) {
1700 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f16, dst, true);
1701 } else if (dst.regClass() == v1) {
1702 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f32, dst, true);
1703 } else if (dst.regClass() == v2) {
1704 bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, src1);
1705 } else {
1706 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1707 nir_print_instr(&instr->instr, stderr);
1708 fprintf(stderr, "\n");
1709 }
1710 break;
1711 }
1712 case nir_op_fsub: {
1713 Temp src0 = get_alu_src(ctx, instr->src[0]);
1714 Temp src1 = get_alu_src(ctx, instr->src[1]);
1715 if (dst.regClass() == v2b) {
1716 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
1717 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f16, dst, false);
1718 else
1719 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f16, dst, true);
1720 } else if (dst.regClass() == v1) {
1721 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
1722 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f32, dst, false);
1723 else
1724 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f32, dst, true);
1725 } else if (dst.regClass() == v2) {
1726 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst),
1727 as_vgpr(ctx, src0), as_vgpr(ctx, src1));
1728 VOP3A_instruction* sub = static_cast<VOP3A_instruction*>(add);
1729 sub->neg[1] = true;
1730 } else {
1731 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1732 nir_print_instr(&instr->instr, stderr);
1733 fprintf(stderr, "\n");
1734 }
1735 break;
1736 }
1737 case nir_op_fmax: {
1738 Temp src0 = get_alu_src(ctx, instr->src[0]);
1739 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1740 if (dst.regClass() == v2b) {
1741 // TODO: check fp_mode.must_flush_denorms16_64
1742 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f16, dst, true);
1743 } else if (dst.regClass() == v1) {
1744 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
1745 } else if (dst.regClass() == v2) {
1746 if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
1747 Temp tmp = bld.vop3(aco_opcode::v_max_f64, bld.def(v2), src0, src1);
1748 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
1749 } else {
1750 bld.vop3(aco_opcode::v_max_f64, Definition(dst), src0, src1);
1751 }
1752 } else {
1753 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1754 nir_print_instr(&instr->instr, stderr);
1755 fprintf(stderr, "\n");
1756 }
1757 break;
1758 }
1759 case nir_op_fmin: {
1760 Temp src0 = get_alu_src(ctx, instr->src[0]);
1761 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1762 if (dst.regClass() == v2b) {
1763 // TODO: check fp_mode.must_flush_denorms16_64
1764 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f16, dst, true);
1765 } else if (dst.regClass() == v1) {
1766 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
1767 } else if (dst.regClass() == v2) {
1768 if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
1769 Temp tmp = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), src0, src1);
1770 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
1771 } else {
1772 bld.vop3(aco_opcode::v_min_f64, Definition(dst), src0, src1);
1773 }
1774 } else {
1775 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1776 nir_print_instr(&instr->instr, stderr);
1777 fprintf(stderr, "\n");
1778 }
1779 break;
1780 }
1781 case nir_op_fmax3: {
1782 if (dst.regClass() == v2b) {
1783 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f16, dst, false);
1784 } else if (dst.regClass() == v1) {
1785 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1786 } else {
1787 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1788 nir_print_instr(&instr->instr, stderr);
1789 fprintf(stderr, "\n");
1790 }
1791 break;
1792 }
1793 case nir_op_fmin3: {
1794 if (dst.regClass() == v2b) {
1795 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f16, dst, false);
1796 } else if (dst.regClass() == v1) {
1797 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1798 } else {
1799 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1800 nir_print_instr(&instr->instr, stderr);
1801 fprintf(stderr, "\n");
1802 }
1803 break;
1804 }
1805 case nir_op_fmed3: {
1806 if (dst.regClass() == v2b) {
1807 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f16, dst, false);
1808 } else if (dst.regClass() == v1) {
1809 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1810 } else {
1811 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1812 nir_print_instr(&instr->instr, stderr);
1813 fprintf(stderr, "\n");
1814 }
1815 break;
1816 }
1817 case nir_op_umax3: {
1818 if (dst.size() == 1) {
1819 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_u32, dst);
1820 } else {
1821 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1822 nir_print_instr(&instr->instr, stderr);
1823 fprintf(stderr, "\n");
1824 }
1825 break;
1826 }
1827 case nir_op_umin3: {
1828 if (dst.size() == 1) {
1829 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_u32, dst);
1830 } else {
1831 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1832 nir_print_instr(&instr->instr, stderr);
1833 fprintf(stderr, "\n");
1834 }
1835 break;
1836 }
1837 case nir_op_umed3: {
1838 if (dst.size() == 1) {
1839 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_u32, dst);
1840 } else {
1841 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1842 nir_print_instr(&instr->instr, stderr);
1843 fprintf(stderr, "\n");
1844 }
1845 break;
1846 }
1847 case nir_op_imax3: {
1848 if (dst.size() == 1) {
1849 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_i32, dst);
1850 } else {
1851 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1852 nir_print_instr(&instr->instr, stderr);
1853 fprintf(stderr, "\n");
1854 }
1855 break;
1856 }
1857 case nir_op_imin3: {
1858 if (dst.size() == 1) {
1859 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_i32, dst);
1860 } else {
1861 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1862 nir_print_instr(&instr->instr, stderr);
1863 fprintf(stderr, "\n");
1864 }
1865 break;
1866 }
1867 case nir_op_imed3: {
1868 if (dst.size() == 1) {
1869 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_i32, dst);
1870 } else {
1871 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1872 nir_print_instr(&instr->instr, stderr);
1873 fprintf(stderr, "\n");
1874 }
1875 break;
1876 }
1877 case nir_op_cube_face_coord: {
1878 Temp in = get_alu_src(ctx, instr->src[0], 3);
1879 Temp src[3] = { emit_extract_vector(ctx, in, 0, v1),
1880 emit_extract_vector(ctx, in, 1, v1),
1881 emit_extract_vector(ctx, in, 2, v1) };
1882 Temp ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), src[0], src[1], src[2]);
1883 ma = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ma);
1884 Temp sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), src[0], src[1], src[2]);
1885 Temp tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), src[0], src[1], src[2]);
1886 sc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), sc, ma, Operand(0x3f000000u/*0.5*/));
1887 tc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), tc, ma, Operand(0x3f000000u/*0.5*/));
1888 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), sc, tc);
1889 break;
1890 }
1891 case nir_op_cube_face_index: {
1892 Temp in = get_alu_src(ctx, instr->src[0], 3);
1893 Temp src[3] = { emit_extract_vector(ctx, in, 0, v1),
1894 emit_extract_vector(ctx, in, 1, v1),
1895 emit_extract_vector(ctx, in, 2, v1) };
1896 bld.vop3(aco_opcode::v_cubeid_f32, Definition(dst), src[0], src[1], src[2]);
1897 break;
1898 }
1899 case nir_op_bcsel: {
1900 emit_bcsel(ctx, instr, dst);
1901 break;
1902 }
1903 case nir_op_frsq: {
1904 Temp src = get_alu_src(ctx, instr->src[0]);
1905 if (dst.regClass() == v2b) {
1906 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f16, dst);
1907 } else if (dst.regClass() == v1) {
1908 emit_rsq(ctx, bld, Definition(dst), src);
1909 } else if (dst.regClass() == v2) {
1910 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
1911 } else {
1912 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1913 nir_print_instr(&instr->instr, stderr);
1914 fprintf(stderr, "\n");
1915 }
1916 break;
1917 }
1918 case nir_op_fneg: {
1919 Temp src = get_alu_src(ctx, instr->src[0]);
1920 if (dst.regClass() == v2b) {
1921 bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x8000u), as_vgpr(ctx, src));
1922 } else if (dst.regClass() == v1) {
1923 if (ctx->block->fp_mode.must_flush_denorms32)
1924 src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
1925 bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x80000000u), as_vgpr(ctx, src));
1926 } else if (dst.regClass() == v2) {
1927 if (ctx->block->fp_mode.must_flush_denorms16_64)
1928 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
1929 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
1930 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
1931 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand(0x80000000u), upper);
1932 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1933 } else {
1934 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1935 nir_print_instr(&instr->instr, stderr);
1936 fprintf(stderr, "\n");
1937 }
1938 break;
1939 }
1940 case nir_op_fabs: {
1941 Temp src = get_alu_src(ctx, instr->src[0]);
1942 if (dst.regClass() == v2b) {
1943 bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFu), as_vgpr(ctx, src));
1944 } else if (dst.regClass() == v1) {
1945 if (ctx->block->fp_mode.must_flush_denorms32)
1946 src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
1947 bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFFFFFu), as_vgpr(ctx, src));
1948 } else if (dst.regClass() == v2) {
1949 if (ctx->block->fp_mode.must_flush_denorms16_64)
1950 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
1951 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
1952 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
1953 upper = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7FFFFFFFu), upper);
1954 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1955 } else {
1956 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1957 nir_print_instr(&instr->instr, stderr);
1958 fprintf(stderr, "\n");
1959 }
1960 break;
1961 }
1962 case nir_op_fsat: {
1963 Temp src = get_alu_src(ctx, instr->src[0]);
1964 if (dst.regClass() == v2b) {
1965 bld.vop3(aco_opcode::v_med3_f16, Definition(dst), Operand((uint16_t)0u), Operand((uint16_t)0x3c00), src);
1966 } else if (dst.regClass() == v1) {
1967 bld.vop3(aco_opcode::v_med3_f32, Definition(dst), Operand(0u), Operand(0x3f800000u), src);
1968 /* apparently, it is not necessary to flush denorms if this instruction is used with these operands */
1969 // TODO: confirm that this holds under any circumstances
1970 } else if (dst.regClass() == v2) {
1971 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src, Operand(0u));
1972 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(add);
1973 vop3->clamp = true;
1974 } else {
1975 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1976 nir_print_instr(&instr->instr, stderr);
1977 fprintf(stderr, "\n");
1978 }
1979 break;
1980 }
1981 case nir_op_flog2: {
1982 Temp src = get_alu_src(ctx, instr->src[0]);
1983 if (dst.regClass() == v2b) {
1984 emit_vop1_instruction(ctx, instr, aco_opcode::v_log_f16, dst);
1985 } else if (dst.regClass() == v1) {
1986 emit_log2(ctx, bld, Definition(dst), src);
1987 } else {
1988 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1989 nir_print_instr(&instr->instr, stderr);
1990 fprintf(stderr, "\n");
1991 }
1992 break;
1993 }
1994 case nir_op_frcp: {
1995 Temp src = get_alu_src(ctx, instr->src[0]);
1996 if (dst.regClass() == v2b) {
1997 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f16, dst);
1998 } else if (dst.regClass() == v1) {
1999 emit_rcp(ctx, bld, Definition(dst), src);
2000 } else if (dst.regClass() == v2) {
2001 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
2002 } else {
2003 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2004 nir_print_instr(&instr->instr, stderr);
2005 fprintf(stderr, "\n");
2006 }
2007 break;
2008 }
2009 case nir_op_fexp2: {
2010 if (dst.regClass() == v2b) {
2011 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f16, dst);
2012 } else if (dst.regClass() == v1) {
2013 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f32, dst);
2014 } else {
2015 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2016 nir_print_instr(&instr->instr, stderr);
2017 fprintf(stderr, "\n");
2018 }
2019 break;
2020 }
2021 case nir_op_fsqrt: {
2022 Temp src = get_alu_src(ctx, instr->src[0]);
2023 if (dst.regClass() == v2b) {
2024 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f16, dst);
2025 } else if (dst.regClass() == v1) {
2026 emit_sqrt(ctx, bld, Definition(dst), src);
2027 } else if (dst.regClass() == v2) {
2028 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
2029 } else {
2030 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2031 nir_print_instr(&instr->instr, stderr);
2032 fprintf(stderr, "\n");
2033 }
2034 break;
2035 }
2036 case nir_op_ffract: {
2037 if (dst.regClass() == v2b) {
2038 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f16, dst);
2039 } else if (dst.regClass() == v1) {
2040 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f32, dst);
2041 } else if (dst.regClass() == v2) {
2042 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f64, dst);
2043 } else {
2044 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2045 nir_print_instr(&instr->instr, stderr);
2046 fprintf(stderr, "\n");
2047 }
2048 break;
2049 }
2050 case nir_op_ffloor: {
2051 Temp src = get_alu_src(ctx, instr->src[0]);
2052 if (dst.regClass() == v2b) {
2053 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f16, dst);
2054 } else if (dst.regClass() == v1) {
2055 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst);
2056 } else if (dst.regClass() == v2) {
2057 emit_floor_f64(ctx, bld, Definition(dst), src);
2058 } else {
2059 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2060 nir_print_instr(&instr->instr, stderr);
2061 fprintf(stderr, "\n");
2062 }
2063 break;
2064 }
2065 case nir_op_fceil: {
2066 Temp src0 = get_alu_src(ctx, instr->src[0]);
2067 if (dst.regClass() == v2b) {
2068 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f16, dst);
2069 } else if (dst.regClass() == v1) {
2070 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f32, dst);
2071 } else if (dst.regClass() == v2) {
2072 if (ctx->options->chip_class >= GFX7) {
2073 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f64, dst);
2074 } else {
2075 /* GFX6 doesn't support V_CEIL_F64, lower it. */
2076 /* trunc = trunc(src0)
2077 * if (src0 > 0.0 && src0 != trunc)
2078 * trunc += 1.0
2079 */
2080 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src0);
2081 Temp tmp0 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.def(bld.lm), src0, Operand(0u));
2082 Temp tmp1 = bld.vopc(aco_opcode::v_cmp_lg_f64, bld.hint_vcc(bld.def(bld.lm)), src0, trunc);
2083 Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.hint_vcc(bld.def(s2)), bld.def(s1, scc), tmp0, tmp1);
2084 Temp add = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), bld.copy(bld.def(v1), Operand(0u)), bld.copy(bld.def(v1), Operand(0x3ff00000u)), cond);
2085 add = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), bld.copy(bld.def(v1), Operand(0u)), add);
2086 bld.vop3(aco_opcode::v_add_f64, Definition(dst), trunc, add);
2087 }
2088 } else {
2089 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2090 nir_print_instr(&instr->instr, stderr);
2091 fprintf(stderr, "\n");
2092 }
2093 break;
2094 }
2095 case nir_op_ftrunc: {
2096 Temp src = get_alu_src(ctx, instr->src[0]);
2097 if (dst.regClass() == v2b) {
2098 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f16, dst);
2099 } else if (dst.regClass() == v1) {
2100 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f32, dst);
2101 } else if (dst.regClass() == v2) {
2102 emit_trunc_f64(ctx, bld, Definition(dst), src);
2103 } else {
2104 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2105 nir_print_instr(&instr->instr, stderr);
2106 fprintf(stderr, "\n");
2107 }
2108 break;
2109 }
2110 case nir_op_fround_even: {
2111 Temp src0 = get_alu_src(ctx, instr->src[0]);
2112 if (dst.regClass() == v2b) {
2113 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f16, dst);
2114 } else if (dst.regClass() == v1) {
2115 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f32, dst);
2116 } else if (dst.regClass() == v2) {
2117 if (ctx->options->chip_class >= GFX7) {
2118 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f64, dst);
2119 } else {
2120 /* GFX6 doesn't support V_RNDNE_F64, lower it. */
2121 Temp src0_lo = bld.tmp(v1), src0_hi = bld.tmp(v1);
2122 bld.pseudo(aco_opcode::p_split_vector, Definition(src0_lo), Definition(src0_hi), src0);
2123
2124 Temp bitmask = bld.sop1(aco_opcode::s_brev_b32, bld.def(s1), bld.copy(bld.def(s1), Operand(-2u)));
2125 Temp bfi = bld.vop3(aco_opcode::v_bfi_b32, bld.def(v1), bitmask, bld.copy(bld.def(v1), Operand(0x43300000u)), as_vgpr(ctx, src0_hi));
2126 Temp tmp = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), src0, bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), bfi));
2127 Instruction *sub = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), tmp, bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), bfi));
2128 static_cast<VOP3A_instruction*>(sub)->neg[1] = true;
2129 tmp = sub->definitions[0].getTemp();
2130
2131 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(-1u), Operand(0x432fffffu));
2132 Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.hint_vcc(bld.def(bld.lm)), src0, v);
2133 static_cast<VOP3A_instruction*>(vop3)->abs[0] = true;
2134 Temp cond = vop3->definitions[0].getTemp();
2135
2136 Temp tmp_lo = bld.tmp(v1), tmp_hi = bld.tmp(v1);
2137 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp_lo), Definition(tmp_hi), tmp);
2138 Temp dst0 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_lo, as_vgpr(ctx, src0_lo), cond);
2139 Temp dst1 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_hi, as_vgpr(ctx, src0_hi), cond);
2140
2141 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
2142 }
2143 } else {
2144 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2145 nir_print_instr(&instr->instr, stderr);
2146 fprintf(stderr, "\n");
2147 }
2148 break;
2149 }
2150 case nir_op_fsin:
2151 case nir_op_fcos: {
2152 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2153 aco_ptr<Instruction> norm;
2154 if (dst.regClass() == v2b) {
2155 Temp half_pi = bld.copy(bld.def(s1), Operand(0x3118u));
2156 Temp tmp = bld.vop2(aco_opcode::v_mul_f16, bld.def(v1), half_pi, src);
2157 aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f16 : aco_opcode::v_cos_f16;
2158 bld.vop1(opcode, Definition(dst), tmp);
2159 } else if (dst.regClass() == v1) {
2160 Temp half_pi = bld.copy(bld.def(s1), Operand(0x3e22f983u));
2161 Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, src);
2162
2163 /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
2164 if (ctx->options->chip_class < GFX9)
2165 tmp = bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), tmp);
2166
2167 aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f32 : aco_opcode::v_cos_f32;
2168 bld.vop1(opcode, Definition(dst), tmp);
2169 } else {
2170 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2171 nir_print_instr(&instr->instr, stderr);
2172 fprintf(stderr, "\n");
2173 }
2174 break;
2175 }
2176 case nir_op_ldexp: {
2177 Temp src0 = get_alu_src(ctx, instr->src[0]);
2178 Temp src1 = get_alu_src(ctx, instr->src[1]);
2179 if (dst.regClass() == v2b) {
2180 emit_vop2_instruction(ctx, instr, aco_opcode::v_ldexp_f16, dst, false);
2181 } else if (dst.regClass() == v1) {
2182 bld.vop3(aco_opcode::v_ldexp_f32, Definition(dst), as_vgpr(ctx, src0), src1);
2183 } else if (dst.regClass() == v2) {
2184 bld.vop3(aco_opcode::v_ldexp_f64, Definition(dst), as_vgpr(ctx, src0), src1);
2185 } else {
2186 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2187 nir_print_instr(&instr->instr, stderr);
2188 fprintf(stderr, "\n");
2189 }
2190 break;
2191 }
2192 case nir_op_frexp_sig: {
2193 Temp src = get_alu_src(ctx, instr->src[0]);
2194 if (dst.regClass() == v2b) {
2195 bld.vop1(aco_opcode::v_frexp_mant_f16, Definition(dst), src);
2196 } else if (dst.regClass() == v1) {
2197 bld.vop1(aco_opcode::v_frexp_mant_f32, Definition(dst), src);
2198 } else if (dst.regClass() == v2) {
2199 bld.vop1(aco_opcode::v_frexp_mant_f64, Definition(dst), src);
2200 } else {
2201 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2202 nir_print_instr(&instr->instr, stderr);
2203 fprintf(stderr, "\n");
2204 }
2205 break;
2206 }
2207 case nir_op_frexp_exp: {
2208 Temp src = get_alu_src(ctx, instr->src[0]);
2209 if (instr->src[0].src.ssa->bit_size == 16) {
2210 Temp tmp = bld.vop1(aco_opcode::v_frexp_exp_i16_f16, bld.def(v1), src);
2211 tmp = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v1b), tmp, Operand(0u));
2212 convert_int(ctx, bld, tmp, 8, 32, true, dst);
2213 } else if (instr->src[0].src.ssa->bit_size == 32) {
2214 bld.vop1(aco_opcode::v_frexp_exp_i32_f32, Definition(dst), src);
2215 } else if (instr->src[0].src.ssa->bit_size == 64) {
2216 bld.vop1(aco_opcode::v_frexp_exp_i32_f64, Definition(dst), src);
2217 } else {
2218 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2219 nir_print_instr(&instr->instr, stderr);
2220 fprintf(stderr, "\n");
2221 }
2222 break;
2223 }
2224 case nir_op_fsign: {
2225 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2226 if (dst.regClass() == v2b) {
2227 Temp one = bld.copy(bld.def(v1), Operand(0x3c00u));
2228 Temp minus_one = bld.copy(bld.def(v1), Operand(0xbc00u));
2229 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2230 src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), one, src, cond);
2231 cond = bld.vopc(aco_opcode::v_cmp_le_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2232 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), minus_one, src, cond);
2233 } else if (dst.regClass() == v1) {
2234 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2235 src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0x3f800000u), src, cond);
2236 cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2237 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0xbf800000u), src, cond);
2238 } else if (dst.regClass() == v2) {
2239 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2240 Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u));
2241 Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond);
2242
2243 cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2244 tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0xBFF00000u));
2245 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, upper, cond);
2246
2247 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand(0u), upper);
2248 } else {
2249 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2250 nir_print_instr(&instr->instr, stderr);
2251 fprintf(stderr, "\n");
2252 }
2253 break;
2254 }
2255 case nir_op_f2f16:
2256 case nir_op_f2f16_rtne: {
2257 Temp src = get_alu_src(ctx, instr->src[0]);
2258 if (instr->src[0].src.ssa->bit_size == 64)
2259 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2260 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2261 break;
2262 }
2263 case nir_op_f2f16_rtz: {
2264 Temp src = get_alu_src(ctx, instr->src[0]);
2265 if (instr->src[0].src.ssa->bit_size == 64)
2266 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2267 bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src, Operand(0u));
2268 break;
2269 }
2270 case nir_op_f2f32: {
2271 if (instr->src[0].src.ssa->bit_size == 16) {
2272 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, dst);
2273 } else if (instr->src[0].src.ssa->bit_size == 64) {
2274 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f64, dst);
2275 } else {
2276 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2277 nir_print_instr(&instr->instr, stderr);
2278 fprintf(stderr, "\n");
2279 }
2280 break;
2281 }
2282 case nir_op_f2f64: {
2283 Temp src = get_alu_src(ctx, instr->src[0]);
2284 if (instr->src[0].src.ssa->bit_size == 16)
2285 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2286 bld.vop1(aco_opcode::v_cvt_f64_f32, Definition(dst), src);
2287 break;
2288 }
2289 case nir_op_i2f16: {
2290 assert(dst.regClass() == v2b);
2291 Temp src = get_alu_src(ctx, instr->src[0]);
2292 if (instr->src[0].src.ssa->bit_size == 8)
2293 src = convert_int(ctx, bld, src, 8, 16, true);
2294 else if (instr->src[0].src.ssa->bit_size == 64)
2295 src = convert_int(ctx, bld, src, 64, 32, false);
2296 bld.vop1(aco_opcode::v_cvt_f16_i16, Definition(dst), src);
2297 break;
2298 }
2299 case nir_op_i2f32: {
2300 assert(dst.size() == 1);
2301 Temp src = get_alu_src(ctx, instr->src[0]);
2302 if (instr->src[0].src.ssa->bit_size <= 16)
2303 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
2304 bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(dst), src);
2305 break;
2306 }
2307 case nir_op_i2f64: {
2308 if (instr->src[0].src.ssa->bit_size <= 32) {
2309 Temp src = get_alu_src(ctx, instr->src[0]);
2310 if (instr->src[0].src.ssa->bit_size <= 16)
2311 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
2312 bld.vop1(aco_opcode::v_cvt_f64_i32, Definition(dst), src);
2313 } else if (instr->src[0].src.ssa->bit_size == 64) {
2314 Temp src = get_alu_src(ctx, instr->src[0]);
2315 RegClass rc = RegClass(src.type(), 1);
2316 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2317 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2318 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2319 upper = bld.vop1(aco_opcode::v_cvt_f64_i32, bld.def(v2), upper);
2320 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand(32u));
2321 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
2322
2323 } else {
2324 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2325 nir_print_instr(&instr->instr, stderr);
2326 fprintf(stderr, "\n");
2327 }
2328 break;
2329 }
2330 case nir_op_u2f16: {
2331 assert(dst.regClass() == v2b);
2332 Temp src = get_alu_src(ctx, instr->src[0]);
2333 if (instr->src[0].src.ssa->bit_size == 8)
2334 src = convert_int(ctx, bld, src, 8, 16, false);
2335 else if (instr->src[0].src.ssa->bit_size == 64)
2336 src = convert_int(ctx, bld, src, 64, 32, false);
2337 bld.vop1(aco_opcode::v_cvt_f16_u16, Definition(dst), src);
2338 break;
2339 }
2340 case nir_op_u2f32: {
2341 assert(dst.size() == 1);
2342 Temp src = get_alu_src(ctx, instr->src[0]);
2343 if (instr->src[0].src.ssa->bit_size == 8) {
2344 bld.vop1(aco_opcode::v_cvt_f32_ubyte0, Definition(dst), src);
2345 } else {
2346 if (instr->src[0].src.ssa->bit_size == 16)
2347 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
2348 bld.vop1(aco_opcode::v_cvt_f32_u32, Definition(dst), src);
2349 }
2350 break;
2351 }
2352 case nir_op_u2f64: {
2353 if (instr->src[0].src.ssa->bit_size <= 32) {
2354 Temp src = get_alu_src(ctx, instr->src[0]);
2355 if (instr->src[0].src.ssa->bit_size <= 16)
2356 src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, false);
2357 bld.vop1(aco_opcode::v_cvt_f64_u32, Definition(dst), src);
2358 } else if (instr->src[0].src.ssa->bit_size == 64) {
2359 Temp src = get_alu_src(ctx, instr->src[0]);
2360 RegClass rc = RegClass(src.type(), 1);
2361 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2362 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2363 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2364 upper = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), upper);
2365 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand(32u));
2366 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
2367 } else {
2368 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2369 nir_print_instr(&instr->instr, stderr);
2370 fprintf(stderr, "\n");
2371 }
2372 break;
2373 }
2374 case nir_op_f2i8:
2375 case nir_op_f2i16: {
2376 if (instr->src[0].src.ssa->bit_size == 16)
2377 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i16_f16, dst);
2378 else if (instr->src[0].src.ssa->bit_size == 32)
2379 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
2380 else
2381 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
2382 break;
2383 }
2384 case nir_op_f2u8:
2385 case nir_op_f2u16: {
2386 if (instr->src[0].src.ssa->bit_size == 16)
2387 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u16_f16, dst);
2388 else if (instr->src[0].src.ssa->bit_size == 32)
2389 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
2390 else
2391 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
2392 break;
2393 }
2394 case nir_op_f2i32: {
2395 Temp src = get_alu_src(ctx, instr->src[0]);
2396 if (instr->src[0].src.ssa->bit_size == 16) {
2397 Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2398 if (dst.type() == RegType::vgpr) {
2399 bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), tmp);
2400 } else {
2401 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2402 bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), tmp));
2403 }
2404 } else if (instr->src[0].src.ssa->bit_size == 32) {
2405 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
2406 } else if (instr->src[0].src.ssa->bit_size == 64) {
2407 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
2408 } else {
2409 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2410 nir_print_instr(&instr->instr, stderr);
2411 fprintf(stderr, "\n");
2412 }
2413 break;
2414 }
2415 case nir_op_f2u32: {
2416 Temp src = get_alu_src(ctx, instr->src[0]);
2417 if (instr->src[0].src.ssa->bit_size == 16) {
2418 Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2419 if (dst.type() == RegType::vgpr) {
2420 bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), tmp);
2421 } else {
2422 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2423 bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), tmp));
2424 }
2425 } else if (instr->src[0].src.ssa->bit_size == 32) {
2426 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
2427 } else if (instr->src[0].src.ssa->bit_size == 64) {
2428 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
2429 } else {
2430 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2431 nir_print_instr(&instr->instr, stderr);
2432 fprintf(stderr, "\n");
2433 }
2434 break;
2435 }
2436 case nir_op_f2i64: {
2437 Temp src = get_alu_src(ctx, instr->src[0]);
2438 if (instr->src[0].src.ssa->bit_size == 16)
2439 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2440
2441 if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
2442 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
2443 exponent = bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand(0x0u), exponent, Operand(64u));
2444 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src);
2445 Temp sign = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
2446 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(0x800000u), mantissa);
2447 mantissa = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(7u), mantissa);
2448 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), mantissa);
2449 Temp new_exponent = bld.tmp(v1);
2450 Temp borrow = bld.vsub32(Definition(new_exponent), Operand(63u), exponent, true).def(1).getTemp();
2451 if (ctx->program->chip_class >= GFX8)
2452 mantissa = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), new_exponent, mantissa);
2453 else
2454 mantissa = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), mantissa, new_exponent);
2455 Temp saturate = bld.vop1(aco_opcode::v_bfrev_b32, bld.def(v1), Operand(0xfffffffeu));
2456 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
2457 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2458 lower = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), lower, Operand(0xffffffffu), borrow);
2459 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), upper, saturate, borrow);
2460 lower = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, lower);
2461 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, upper);
2462 Temp new_lower = bld.tmp(v1);
2463 borrow = bld.vsub32(Definition(new_lower), lower, sign, true).def(1).getTemp();
2464 Temp new_upper = bld.vsub32(bld.def(v1), upper, sign, false, borrow);
2465 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), new_lower, new_upper);
2466
2467 } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
2468 if (src.type() == RegType::vgpr)
2469 src = bld.as_uniform(src);
2470 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u));
2471 exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
2472 exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
2473 exponent = bld.sop2(aco_opcode::s_min_i32, bld.def(s1), bld.def(s1, scc), Operand(64u), exponent);
2474 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src);
2475 Temp sign = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u));
2476 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa);
2477 mantissa = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), mantissa, Operand(7u));
2478 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), mantissa);
2479 exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), Operand(63u), exponent);
2480 mantissa = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), mantissa, exponent);
2481 Temp cond = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), exponent, Operand(0xffffffffu)); // exp >= 64
2482 Temp saturate = bld.sop1(aco_opcode::s_brev_b64, bld.def(s2), Operand(0xfffffffeu));
2483 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), saturate, mantissa, cond);
2484 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
2485 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2486 lower = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, lower);
2487 upper = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, upper);
2488 Temp borrow = bld.tmp(s1);
2489 lower = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), lower, sign);
2490 upper = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), upper, sign, borrow);
2491 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2492
2493 } else if (instr->src[0].src.ssa->bit_size == 64) {
2494 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0x3df00000u));
2495 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
2496 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
2497 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0xc1f00000u));
2498 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
2499 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
2500 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
2501 Temp upper = bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), floor);
2502 if (dst.type() == RegType::sgpr) {
2503 lower = bld.as_uniform(lower);
2504 upper = bld.as_uniform(upper);
2505 }
2506 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2507
2508 } else {
2509 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2510 nir_print_instr(&instr->instr, stderr);
2511 fprintf(stderr, "\n");
2512 }
2513 break;
2514 }
2515 case nir_op_f2u64: {
2516 Temp src = get_alu_src(ctx, instr->src[0]);
2517 if (instr->src[0].src.ssa->bit_size == 16)
2518 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2519
2520 if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
2521 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
2522 Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(64u), exponent);
2523 exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand(0x0u), exponent);
2524 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src);
2525 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(0x800000u), mantissa);
2526 Temp exponent_small = bld.vsub32(bld.def(v1), Operand(24u), exponent);
2527 Temp small = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), exponent_small, mantissa);
2528 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), mantissa);
2529 Temp new_exponent = bld.tmp(v1);
2530 Temp cond_small = bld.vsub32(Definition(new_exponent), exponent, Operand(24u), true).def(1).getTemp();
2531 if (ctx->program->chip_class >= GFX8)
2532 mantissa = bld.vop3(aco_opcode::v_lshlrev_b64, bld.def(v2), new_exponent, mantissa);
2533 else
2534 mantissa = bld.vop3(aco_opcode::v_lshl_b64, bld.def(v2), mantissa, new_exponent);
2535 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
2536 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2537 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), lower, small, cond_small);
2538 upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), upper, Operand(0u), cond_small);
2539 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xffffffffu), lower, exponent_in_range);
2540 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xffffffffu), upper, exponent_in_range);
2541 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2542
2543 } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
2544 if (src.type() == RegType::vgpr)
2545 src = bld.as_uniform(src);
2546 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u));
2547 exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
2548 exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
2549 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src);
2550 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa);
2551 Temp exponent_small = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), Operand(24u), exponent);
2552 Temp small = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), mantissa, exponent_small);
2553 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), mantissa);
2554 Temp exponent_large = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(24u));
2555 mantissa = bld.sop2(aco_opcode::s_lshl_b64, bld.def(s2), bld.def(s1, scc), mantissa, exponent_large);
2556 Temp cond = bld.sopc(aco_opcode::s_cmp_ge_i32, bld.def(s1, scc), Operand(64u), exponent);
2557 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), mantissa, Operand(0xffffffffu), cond);
2558 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
2559 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2560 Temp cond_small = bld.sopc(aco_opcode::s_cmp_le_i32, bld.def(s1, scc), exponent, Operand(24u));
2561 lower = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), small, lower, cond_small);
2562 upper = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand(0u), upper, cond_small);
2563 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2564
2565 } else if (instr->src[0].src.ssa->bit_size == 64) {
2566 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0x3df00000u));
2567 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
2568 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
2569 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0xc1f00000u));
2570 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
2571 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
2572 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
2573 Temp upper = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), floor);
2574 if (dst.type() == RegType::sgpr) {
2575 lower = bld.as_uniform(lower);
2576 upper = bld.as_uniform(upper);
2577 }
2578 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2579
2580 } else {
2581 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2582 nir_print_instr(&instr->instr, stderr);
2583 fprintf(stderr, "\n");
2584 }
2585 break;
2586 }
2587 case nir_op_b2f16: {
2588 Temp src = get_alu_src(ctx, instr->src[0]);
2589 assert(src.regClass() == bld.lm);
2590
2591 if (dst.regClass() == s1) {
2592 src = bool_to_scalar_condition(ctx, src);
2593 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand(0x3c00u), src);
2594 } else if (dst.regClass() == v2b) {
2595 Temp one = bld.copy(bld.def(v1), Operand(0x3c00u));
2596 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), one, src);
2597 } else {
2598 unreachable("Wrong destination register class for nir_op_b2f16.");
2599 }
2600 break;
2601 }
2602 case nir_op_b2f32: {
2603 Temp src = get_alu_src(ctx, instr->src[0]);
2604 assert(src.regClass() == bld.lm);
2605
2606 if (dst.regClass() == s1) {
2607 src = bool_to_scalar_condition(ctx, src);
2608 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand(0x3f800000u), src);
2609 } else if (dst.regClass() == v1) {
2610 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(0x3f800000u), src);
2611 } else {
2612 unreachable("Wrong destination register class for nir_op_b2f32.");
2613 }
2614 break;
2615 }
2616 case nir_op_b2f64: {
2617 Temp src = get_alu_src(ctx, instr->src[0]);
2618 assert(src.regClass() == bld.lm);
2619
2620 if (dst.regClass() == s2) {
2621 src = bool_to_scalar_condition(ctx, src);
2622 bld.sop2(aco_opcode::s_cselect_b64, Definition(dst), Operand(0x3f800000u), Operand(0u), bld.scc(src));
2623 } else if (dst.regClass() == v2) {
2624 Temp one = bld.vop1(aco_opcode::v_mov_b32, bld.def(v2), Operand(0x3FF00000u));
2625 Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), one, src);
2626 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand(0u), upper);
2627 } else {
2628 unreachable("Wrong destination register class for nir_op_b2f64.");
2629 }
2630 break;
2631 }
2632 case nir_op_i2i8:
2633 case nir_op_i2i16:
2634 case nir_op_i2i32:
2635 case nir_op_i2i64: {
2636 convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]),
2637 instr->src[0].src.ssa->bit_size, instr->dest.dest.ssa.bit_size, true, dst);
2638 break;
2639 }
2640 case nir_op_u2u8:
2641 case nir_op_u2u16:
2642 case nir_op_u2u32:
2643 case nir_op_u2u64: {
2644 convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]),
2645 instr->src[0].src.ssa->bit_size, instr->dest.dest.ssa.bit_size, false, dst);
2646 break;
2647 }
2648 case nir_op_b2b32:
2649 case nir_op_b2i32: {
2650 Temp src = get_alu_src(ctx, instr->src[0]);
2651 assert(src.regClass() == bld.lm);
2652
2653 if (dst.regClass() == s1) {
2654 // TODO: in a post-RA optimization, we can check if src is in VCC, and directly use VCCNZ
2655 bool_to_scalar_condition(ctx, src, dst);
2656 } else if (dst.regClass() == v1) {
2657 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), src);
2658 } else {
2659 unreachable("Invalid register class for b2i32");
2660 }
2661 break;
2662 }
2663 case nir_op_b2b1:
2664 case nir_op_i2b1: {
2665 Temp src = get_alu_src(ctx, instr->src[0]);
2666 assert(dst.regClass() == bld.lm);
2667
2668 if (src.type() == RegType::vgpr) {
2669 assert(src.regClass() == v1 || src.regClass() == v2);
2670 assert(dst.regClass() == bld.lm);
2671 bld.vopc(src.size() == 2 ? aco_opcode::v_cmp_lg_u64 : aco_opcode::v_cmp_lg_u32,
2672 Definition(dst), Operand(0u), src).def(0).setHint(vcc);
2673 } else {
2674 assert(src.regClass() == s1 || src.regClass() == s2);
2675 Temp tmp;
2676 if (src.regClass() == s2 && ctx->program->chip_class <= GFX7) {
2677 tmp = bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), Operand(0u), src).def(1).getTemp();
2678 } else {
2679 tmp = bld.sopc(src.size() == 2 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::s_cmp_lg_u32,
2680 bld.scc(bld.def(s1)), Operand(0u), src);
2681 }
2682 bool_to_vector_condition(ctx, tmp, dst);
2683 }
2684 break;
2685 }
2686 case nir_op_pack_64_2x32_split: {
2687 Temp src0 = get_alu_src(ctx, instr->src[0]);
2688 Temp src1 = get_alu_src(ctx, instr->src[1]);
2689
2690 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
2691 break;
2692 }
2693 case nir_op_unpack_64_2x32_split_x:
2694 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()), get_alu_src(ctx, instr->src[0]));
2695 break;
2696 case nir_op_unpack_64_2x32_split_y:
2697 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst), get_alu_src(ctx, instr->src[0]));
2698 break;
2699 case nir_op_unpack_32_2x16_split_x:
2700 if (dst.type() == RegType::vgpr) {
2701 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()), get_alu_src(ctx, instr->src[0]));
2702 } else {
2703 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
2704 }
2705 break;
2706 case nir_op_unpack_32_2x16_split_y:
2707 if (dst.type() == RegType::vgpr) {
2708 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst), get_alu_src(ctx, instr->src[0]));
2709 } else {
2710 bld.sop2(aco_opcode::s_bfe_u32, Definition(dst), bld.def(s1, scc), get_alu_src(ctx, instr->src[0]), Operand(uint32_t(16 << 16 | 16)));
2711 }
2712 break;
2713 case nir_op_pack_32_2x16_split: {
2714 Temp src0 = get_alu_src(ctx, instr->src[0]);
2715 Temp src1 = get_alu_src(ctx, instr->src[1]);
2716 if (dst.regClass() == v1) {
2717 src0 = emit_extract_vector(ctx, src0, 0, v2b);
2718 src1 = emit_extract_vector(ctx, src1, 0, v2b);
2719 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
2720 } else {
2721 src0 = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), src0, Operand(0xFFFFu));
2722 src1 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), src1, Operand(16u));
2723 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), src0, src1);
2724 }
2725 break;
2726 }
2727 case nir_op_pack_half_2x16: {
2728 Temp src = get_alu_src(ctx, instr->src[0], 2);
2729
2730 if (dst.regClass() == v1) {
2731 Temp src0 = bld.tmp(v1);
2732 Temp src1 = bld.tmp(v1);
2733 bld.pseudo(aco_opcode::p_split_vector, Definition(src0), Definition(src1), src);
2734 if (!ctx->block->fp_mode.care_about_round32 || ctx->block->fp_mode.round32 == fp_round_tz)
2735 bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src0, src1);
2736 else
2737 bld.vop3(aco_opcode::v_cvt_pk_u16_u32, Definition(dst),
2738 bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src0),
2739 bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src1));
2740 } else {
2741 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2742 nir_print_instr(&instr->instr, stderr);
2743 fprintf(stderr, "\n");
2744 }
2745 break;
2746 }
2747 case nir_op_unpack_half_2x16_split_x: {
2748 if (dst.regClass() == v1) {
2749 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst), get_alu_src(ctx, instr->src[0]));
2750 } else {
2751 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2752 nir_print_instr(&instr->instr, stderr);
2753 fprintf(stderr, "\n");
2754 }
2755 break;
2756 }
2757 case nir_op_unpack_half_2x16_split_y: {
2758 if (dst.regClass() == v1) {
2759 /* TODO: use SDWA here */
2760 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst),
2761 bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), Operand(16u), as_vgpr(ctx, get_alu_src(ctx, instr->src[0]))));
2762 } else {
2763 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2764 nir_print_instr(&instr->instr, stderr);
2765 fprintf(stderr, "\n");
2766 }
2767 break;
2768 }
2769 case nir_op_fquantize2f16: {
2770 Temp src = get_alu_src(ctx, instr->src[0]);
2771 Temp f16 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), src);
2772 Temp f32, cmp_res;
2773
2774 if (ctx->program->chip_class >= GFX8) {
2775 Temp mask = bld.copy(bld.def(s1), Operand(0x36Fu)); /* value is NOT negative/positive denormal value */
2776 cmp_res = bld.vopc_e64(aco_opcode::v_cmp_class_f16, bld.hint_vcc(bld.def(bld.lm)), f16, mask);
2777 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
2778 } else {
2779 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
2780 * so compare the result and flush to 0 if it's smaller.
2781 */
2782 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
2783 Temp smallest = bld.copy(bld.def(s1), Operand(0x38800000u));
2784 Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), f32, smallest);
2785 static_cast<VOP3A_instruction*>(vop3)->abs[0] = true;
2786 cmp_res = vop3->definitions[0].getTemp();
2787 }
2788
2789 if (ctx->block->fp_mode.preserve_signed_zero_inf_nan32 || ctx->program->chip_class < GFX8) {
2790 Temp copysign_0 = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0u), as_vgpr(ctx, src));
2791 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), copysign_0, f32, cmp_res);
2792 } else {
2793 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), f32, cmp_res);
2794 }
2795 break;
2796 }
2797 case nir_op_bfm: {
2798 Temp bits = get_alu_src(ctx, instr->src[0]);
2799 Temp offset = get_alu_src(ctx, instr->src[1]);
2800
2801 if (dst.regClass() == s1) {
2802 bld.sop2(aco_opcode::s_bfm_b32, Definition(dst), bits, offset);
2803 } else if (dst.regClass() == v1) {
2804 bld.vop3(aco_opcode::v_bfm_b32, Definition(dst), bits, offset);
2805 } else {
2806 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2807 nir_print_instr(&instr->instr, stderr);
2808 fprintf(stderr, "\n");
2809 }
2810 break;
2811 }
2812 case nir_op_bitfield_select: {
2813 /* (mask & insert) | (~mask & base) */
2814 Temp bitmask = get_alu_src(ctx, instr->src[0]);
2815 Temp insert = get_alu_src(ctx, instr->src[1]);
2816 Temp base = get_alu_src(ctx, instr->src[2]);
2817
2818 /* dst = (insert & bitmask) | (base & ~bitmask) */
2819 if (dst.regClass() == s1) {
2820 aco_ptr<Instruction> sop2;
2821 nir_const_value* const_bitmask = nir_src_as_const_value(instr->src[0].src);
2822 nir_const_value* const_insert = nir_src_as_const_value(instr->src[1].src);
2823 Operand lhs;
2824 if (const_insert && const_bitmask) {
2825 lhs = Operand(const_insert->u32 & const_bitmask->u32);
2826 } else {
2827 insert = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), insert, bitmask);
2828 lhs = Operand(insert);
2829 }
2830
2831 Operand rhs;
2832 nir_const_value* const_base = nir_src_as_const_value(instr->src[2].src);
2833 if (const_base && const_bitmask) {
2834 rhs = Operand(const_base->u32 & ~const_bitmask->u32);
2835 } else {
2836 base = bld.sop2(aco_opcode::s_andn2_b32, bld.def(s1), bld.def(s1, scc), base, bitmask);
2837 rhs = Operand(base);
2838 }
2839
2840 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), rhs, lhs);
2841
2842 } else if (dst.regClass() == v1) {
2843 if (base.type() == RegType::sgpr && (bitmask.type() == RegType::sgpr || (insert.type() == RegType::sgpr)))
2844 base = as_vgpr(ctx, base);
2845 if (insert.type() == RegType::sgpr && bitmask.type() == RegType::sgpr)
2846 insert = as_vgpr(ctx, insert);
2847
2848 bld.vop3(aco_opcode::v_bfi_b32, Definition(dst), bitmask, insert, base);
2849
2850 } else {
2851 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2852 nir_print_instr(&instr->instr, stderr);
2853 fprintf(stderr, "\n");
2854 }
2855 break;
2856 }
2857 case nir_op_ubfe:
2858 case nir_op_ibfe: {
2859 Temp base = get_alu_src(ctx, instr->src[0]);
2860 Temp offset = get_alu_src(ctx, instr->src[1]);
2861 Temp bits = get_alu_src(ctx, instr->src[2]);
2862
2863 if (dst.type() == RegType::sgpr) {
2864 Operand extract;
2865 nir_const_value* const_offset = nir_src_as_const_value(instr->src[1].src);
2866 nir_const_value* const_bits = nir_src_as_const_value(instr->src[2].src);
2867 if (const_offset && const_bits) {
2868 uint32_t const_extract = (const_bits->u32 << 16) | const_offset->u32;
2869 extract = Operand(const_extract);
2870 } else {
2871 Operand width;
2872 if (const_bits) {
2873 width = Operand(const_bits->u32 << 16);
2874 } else {
2875 width = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), bits, Operand(16u));
2876 }
2877 extract = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), offset, width);
2878 }
2879
2880 aco_opcode opcode;
2881 if (dst.regClass() == s1) {
2882 if (instr->op == nir_op_ubfe)
2883 opcode = aco_opcode::s_bfe_u32;
2884 else
2885 opcode = aco_opcode::s_bfe_i32;
2886 } else if (dst.regClass() == s2) {
2887 if (instr->op == nir_op_ubfe)
2888 opcode = aco_opcode::s_bfe_u64;
2889 else
2890 opcode = aco_opcode::s_bfe_i64;
2891 } else {
2892 unreachable("Unsupported BFE bit size");
2893 }
2894
2895 bld.sop2(opcode, Definition(dst), bld.def(s1, scc), base, extract);
2896
2897 } else {
2898 aco_opcode opcode;
2899 if (dst.regClass() == v1) {
2900 if (instr->op == nir_op_ubfe)
2901 opcode = aco_opcode::v_bfe_u32;
2902 else
2903 opcode = aco_opcode::v_bfe_i32;
2904 } else {
2905 unreachable("Unsupported BFE bit size");
2906 }
2907
2908 emit_vop3a_instruction(ctx, instr, opcode, dst);
2909 }
2910 break;
2911 }
2912 case nir_op_bit_count: {
2913 Temp src = get_alu_src(ctx, instr->src[0]);
2914 if (src.regClass() == s1) {
2915 bld.sop1(aco_opcode::s_bcnt1_i32_b32, Definition(dst), bld.def(s1, scc), src);
2916 } else if (src.regClass() == v1) {
2917 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst), src, Operand(0u));
2918 } else if (src.regClass() == v2) {
2919 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst),
2920 emit_extract_vector(ctx, src, 1, v1),
2921 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1),
2922 emit_extract_vector(ctx, src, 0, v1), Operand(0u)));
2923 } else if (src.regClass() == s2) {
2924 bld.sop1(aco_opcode::s_bcnt1_i32_b64, Definition(dst), bld.def(s1, scc), src);
2925 } else {
2926 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2927 nir_print_instr(&instr->instr, stderr);
2928 fprintf(stderr, "\n");
2929 }
2930 break;
2931 }
2932 case nir_op_flt: {
2933 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f16, aco_opcode::v_cmp_lt_f32, aco_opcode::v_cmp_lt_f64);
2934 break;
2935 }
2936 case nir_op_fge: {
2937 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f16, aco_opcode::v_cmp_ge_f32, aco_opcode::v_cmp_ge_f64);
2938 break;
2939 }
2940 case nir_op_feq: {
2941 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f16, aco_opcode::v_cmp_eq_f32, aco_opcode::v_cmp_eq_f64);
2942 break;
2943 }
2944 case nir_op_fne: {
2945 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f16, aco_opcode::v_cmp_neq_f32, aco_opcode::v_cmp_neq_f64);
2946 break;
2947 }
2948 case nir_op_ilt: {
2949 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i16, aco_opcode::v_cmp_lt_i32, aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32);
2950 break;
2951 }
2952 case nir_op_ige: {
2953 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i16, aco_opcode::v_cmp_ge_i32, aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32);
2954 break;
2955 }
2956 case nir_op_ieq: {
2957 if (instr->src[0].src.ssa->bit_size == 1)
2958 emit_boolean_logic(ctx, instr, Builder::s_xnor, dst);
2959 else
2960 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i16, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32,
2961 ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_eq_u64 : aco_opcode::num_opcodes);
2962 break;
2963 }
2964 case nir_op_ine: {
2965 if (instr->src[0].src.ssa->bit_size == 1)
2966 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
2967 else
2968 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i16, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32,
2969 ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::num_opcodes);
2970 break;
2971 }
2972 case nir_op_ult: {
2973 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u16, aco_opcode::v_cmp_lt_u32, aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32);
2974 break;
2975 }
2976 case nir_op_uge: {
2977 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u16, aco_opcode::v_cmp_ge_u32, aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32);
2978 break;
2979 }
2980 case nir_op_fddx:
2981 case nir_op_fddy:
2982 case nir_op_fddx_fine:
2983 case nir_op_fddy_fine:
2984 case nir_op_fddx_coarse:
2985 case nir_op_fddy_coarse: {
2986 Temp src = get_alu_src(ctx, instr->src[0]);
2987 uint16_t dpp_ctrl1, dpp_ctrl2;
2988 if (instr->op == nir_op_fddx_fine) {
2989 dpp_ctrl1 = dpp_quad_perm(0, 0, 2, 2);
2990 dpp_ctrl2 = dpp_quad_perm(1, 1, 3, 3);
2991 } else if (instr->op == nir_op_fddy_fine) {
2992 dpp_ctrl1 = dpp_quad_perm(0, 1, 0, 1);
2993 dpp_ctrl2 = dpp_quad_perm(2, 3, 2, 3);
2994 } else {
2995 dpp_ctrl1 = dpp_quad_perm(0, 0, 0, 0);
2996 if (instr->op == nir_op_fddx || instr->op == nir_op_fddx_coarse)
2997 dpp_ctrl2 = dpp_quad_perm(1, 1, 1, 1);
2998 else
2999 dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
3000 }
3001
3002 Temp tmp;
3003 if (ctx->program->chip_class >= GFX8) {
3004 Temp tl = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl1);
3005 tmp = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), src, tl, dpp_ctrl2);
3006 } else {
3007 Temp tl = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl1);
3008 Temp tr = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl2);
3009 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), tr, tl);
3010 }
3011 emit_wqm(ctx, tmp, dst, true);
3012 break;
3013 }
3014 default:
3015 fprintf(stderr, "Unknown NIR ALU instr: ");
3016 nir_print_instr(&instr->instr, stderr);
3017 fprintf(stderr, "\n");
3018 }
3019 }
3020
3021 void visit_load_const(isel_context *ctx, nir_load_const_instr *instr)
3022 {
3023 Temp dst = get_ssa_temp(ctx, &instr->def);
3024
3025 // TODO: we really want to have the resulting type as this would allow for 64bit literals
3026 // which get truncated the lsb if double and msb if int
3027 // for now, we only use s_mov_b64 with 64bit inline constants
3028 assert(instr->def.num_components == 1 && "Vector load_const should be lowered to scalar.");
3029 assert(dst.type() == RegType::sgpr);
3030
3031 Builder bld(ctx->program, ctx->block);
3032
3033 if (instr->def.bit_size == 1) {
3034 assert(dst.regClass() == bld.lm);
3035 int val = instr->value[0].b ? -1 : 0;
3036 Operand op = bld.lm.size() == 1 ? Operand((uint32_t) val) : Operand((uint64_t) val);
3037 bld.sop1(Builder::s_mov, Definition(dst), op);
3038 } else if (instr->def.bit_size == 8) {
3039 /* ensure that the value is correctly represented in the low byte of the register */
3040 bld.sopk(aco_opcode::s_movk_i32, Definition(dst), instr->value[0].u8);
3041 } else if (instr->def.bit_size == 16) {
3042 /* ensure that the value is correctly represented in the low half of the register */
3043 bld.sopk(aco_opcode::s_movk_i32, Definition(dst), instr->value[0].u16);
3044 } else if (dst.size() == 1) {
3045 bld.copy(Definition(dst), Operand(instr->value[0].u32));
3046 } else {
3047 assert(dst.size() != 1);
3048 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
3049 if (instr->def.bit_size == 64)
3050 for (unsigned i = 0; i < dst.size(); i++)
3051 vec->operands[i] = Operand{(uint32_t)(instr->value[0].u64 >> i * 32)};
3052 else {
3053 for (unsigned i = 0; i < dst.size(); i++)
3054 vec->operands[i] = Operand{instr->value[i].u32};
3055 }
3056 vec->definitions[0] = Definition(dst);
3057 ctx->block->instructions.emplace_back(std::move(vec));
3058 }
3059 }
3060
3061 uint32_t widen_mask(uint32_t mask, unsigned multiplier)
3062 {
3063 uint32_t new_mask = 0;
3064 for(unsigned i = 0; i < 32 && (1u << i) <= mask; ++i)
3065 if (mask & (1u << i))
3066 new_mask |= ((1u << multiplier) - 1u) << (i * multiplier);
3067 return new_mask;
3068 }
3069
3070 struct LoadEmitInfo {
3071 Operand offset;
3072 Temp dst;
3073 unsigned num_components;
3074 unsigned component_size;
3075 Temp resource = Temp(0, s1);
3076 unsigned component_stride = 0;
3077 unsigned const_offset = 0;
3078 unsigned align_mul = 0;
3079 unsigned align_offset = 0;
3080
3081 bool glc = false;
3082 unsigned swizzle_component_size = 0;
3083 barrier_interaction barrier = barrier_none;
3084 bool can_reorder = true;
3085 Temp soffset = Temp(0, s1);
3086 };
3087
3088 using LoadCallback = Temp(*)(
3089 Builder& bld, const LoadEmitInfo* info, Temp offset, unsigned bytes_needed,
3090 unsigned align, unsigned const_offset, Temp dst_hint);
3091
3092 template <LoadCallback callback, bool byte_align_loads, bool supports_8bit_16bit_loads, unsigned max_const_offset_plus_one>
3093 void emit_load(isel_context *ctx, Builder& bld, const LoadEmitInfo *info)
3094 {
3095 unsigned load_size = info->num_components * info->component_size;
3096 unsigned component_size = info->component_size;
3097
3098 unsigned num_vals = 0;
3099 Temp vals[info->dst.bytes()];
3100
3101 unsigned const_offset = info->const_offset;
3102
3103 unsigned align_mul = info->align_mul ? info->align_mul : component_size;
3104 unsigned align_offset = (info->align_offset + const_offset) % align_mul;
3105
3106 unsigned bytes_read = 0;
3107 while (bytes_read < load_size) {
3108 unsigned bytes_needed = load_size - bytes_read;
3109
3110 /* add buffer for unaligned loads */
3111 int byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1;
3112
3113 if (byte_align) {
3114 if ((bytes_needed > 2 || !supports_8bit_16bit_loads) && byte_align_loads) {
3115 if (info->component_stride) {
3116 assert(supports_8bit_16bit_loads && "unimplemented");
3117 bytes_needed = 2;
3118 byte_align = 0;
3119 } else {
3120 bytes_needed += byte_align == -1 ? 4 - info->align_mul : byte_align;
3121 bytes_needed = align(bytes_needed, 4);
3122 }
3123 } else {
3124 byte_align = 0;
3125 }
3126 }
3127
3128 if (info->swizzle_component_size)
3129 bytes_needed = MIN2(bytes_needed, info->swizzle_component_size);
3130 if (info->component_stride)
3131 bytes_needed = MIN2(bytes_needed, info->component_size);
3132
3133 bool need_to_align_offset = byte_align && (align_mul % 4 || align_offset % 4);
3134
3135 /* reduce constant offset */
3136 Operand offset = info->offset;
3137 unsigned reduced_const_offset = const_offset;
3138 bool remove_const_offset_completely = need_to_align_offset;
3139 if (const_offset && (remove_const_offset_completely || const_offset >= max_const_offset_plus_one)) {
3140 unsigned to_add = const_offset;
3141 if (remove_const_offset_completely) {
3142 reduced_const_offset = 0;
3143 } else {
3144 to_add = const_offset / max_const_offset_plus_one * max_const_offset_plus_one;
3145 reduced_const_offset %= max_const_offset_plus_one;
3146 }
3147 Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
3148 if (offset.isConstant()) {
3149 offset = Operand(offset.constantValue() + to_add);
3150 } else if (offset_tmp.regClass() == s1) {
3151 offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
3152 offset_tmp, Operand(to_add));
3153 } else if (offset_tmp.regClass() == v1) {
3154 offset = bld.vadd32(bld.def(v1), offset_tmp, Operand(to_add));
3155 } else {
3156 Temp lo = bld.tmp(offset_tmp.type(), 1);
3157 Temp hi = bld.tmp(offset_tmp.type(), 1);
3158 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
3159
3160 if (offset_tmp.regClass() == s2) {
3161 Temp carry = bld.tmp(s1);
3162 lo = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), lo, Operand(to_add));
3163 hi = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), hi, carry);
3164 offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), lo, hi);
3165 } else {
3166 Temp new_lo = bld.tmp(v1);
3167 Temp carry = bld.vadd32(Definition(new_lo), lo, Operand(to_add), true).def(1).getTemp();
3168 hi = bld.vadd32(bld.def(v1), hi, Operand(0u), false, carry);
3169 offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_lo, hi);
3170 }
3171 }
3172 }
3173
3174 /* align offset down if needed */
3175 Operand aligned_offset = offset;
3176 if (need_to_align_offset) {
3177 Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
3178 if (offset.isConstant()) {
3179 aligned_offset = Operand(offset.constantValue() & 0xfffffffcu);
3180 } else if (offset_tmp.regClass() == s1) {
3181 aligned_offset = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfffffffcu), offset_tmp);
3182 } else if (offset_tmp.regClass() == s2) {
3183 aligned_offset = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand((uint64_t)0xfffffffffffffffcllu), offset_tmp);
3184 } else if (offset_tmp.regClass() == v1) {
3185 aligned_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xfffffffcu), offset_tmp);
3186 } else if (offset_tmp.regClass() == v2) {
3187 Temp hi = bld.tmp(v1), lo = bld.tmp(v1);
3188 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
3189 lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xfffffffcu), lo);
3190 aligned_offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), lo, hi);
3191 }
3192 }
3193 Temp aligned_offset_tmp = aligned_offset.isTemp() ? aligned_offset.getTemp() :
3194 bld.copy(bld.def(s1), aligned_offset);
3195
3196 unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
3197 Temp val = callback(bld, info, aligned_offset_tmp, bytes_needed, align,
3198 reduced_const_offset, byte_align ? Temp() : info->dst);
3199
3200 /* the callback wrote directly to dst */
3201 if (val == info->dst) {
3202 assert(num_vals == 0);
3203 emit_split_vector(ctx, info->dst, info->num_components);
3204 return;
3205 }
3206
3207 /* shift result right if needed */
3208 if (info->component_size < 4 && byte_align_loads) {
3209 Operand align((uint32_t)byte_align);
3210 if (byte_align == -1) {
3211 if (offset.isConstant())
3212 align = Operand(offset.constantValue() % 4u);
3213 else if (offset.size() == 2)
3214 align = Operand(emit_extract_vector(ctx, offset.getTemp(), 0, RegClass(offset.getTemp().type(), 1)));
3215 else
3216 align = offset;
3217 }
3218
3219 assert(val.bytes() >= load_size && "unimplemented");
3220 if (val.type() == RegType::sgpr)
3221 byte_align_scalar(ctx, val, align, info->dst);
3222 else
3223 byte_align_vector(ctx, val, align, info->dst, component_size);
3224 return;
3225 }
3226
3227 /* add result to list and advance */
3228 if (info->component_stride) {
3229 assert(val.bytes() == info->component_size && "unimplemented");
3230 const_offset += info->component_stride;
3231 align_offset = (align_offset + info->component_stride) % align_mul;
3232 } else {
3233 const_offset += val.bytes();
3234 align_offset = (align_offset + val.bytes()) % align_mul;
3235 }
3236 bytes_read += val.bytes();
3237 vals[num_vals++] = val;
3238 }
3239
3240 /* create array of components */
3241 unsigned components_split = 0;
3242 std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
3243 bool has_vgprs = false;
3244 for (unsigned i = 0; i < num_vals;) {
3245 Temp tmp[num_vals];
3246 unsigned num_tmps = 0;
3247 unsigned tmp_size = 0;
3248 RegType reg_type = RegType::sgpr;
3249 while ((!tmp_size || (tmp_size % component_size)) && i < num_vals) {
3250 if (vals[i].type() == RegType::vgpr)
3251 reg_type = RegType::vgpr;
3252 tmp_size += vals[i].bytes();
3253 tmp[num_tmps++] = vals[i++];
3254 }
3255 if (num_tmps > 1) {
3256 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
3257 aco_opcode::p_create_vector, Format::PSEUDO, num_tmps, 1)};
3258 for (unsigned i = 0; i < num_vals; i++)
3259 vec->operands[i] = Operand(tmp[i]);
3260 tmp[0] = bld.tmp(RegClass::get(reg_type, tmp_size));
3261 vec->definitions[0] = Definition(tmp[0]);
3262 bld.insert(std::move(vec));
3263 }
3264
3265 if (tmp[0].bytes() % component_size) {
3266 /* trim tmp[0] */
3267 assert(i == num_vals);
3268 RegClass new_rc = RegClass::get(reg_type, tmp[0].bytes() / component_size * component_size);
3269 tmp[0] = bld.pseudo(aco_opcode::p_extract_vector, bld.def(new_rc), tmp[0], Operand(0u));
3270 }
3271
3272 RegClass elem_rc = RegClass::get(reg_type, component_size);
3273
3274 unsigned start = components_split;
3275
3276 if (tmp_size == elem_rc.bytes()) {
3277 allocated_vec[components_split++] = tmp[0];
3278 } else {
3279 assert(tmp_size % elem_rc.bytes() == 0);
3280 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(
3281 aco_opcode::p_split_vector, Format::PSEUDO, 1, tmp_size / elem_rc.bytes())};
3282 for (unsigned i = 0; i < split->definitions.size(); i++) {
3283 Temp component = bld.tmp(elem_rc);
3284 allocated_vec[components_split++] = component;
3285 split->definitions[i] = Definition(component);
3286 }
3287 split->operands[0] = Operand(tmp[0]);
3288 bld.insert(std::move(split));
3289 }
3290
3291 /* try to p_as_uniform early so we can create more optimizable code and
3292 * also update allocated_vec */
3293 for (unsigned j = start; j < components_split; j++) {
3294 if (allocated_vec[j].bytes() % 4 == 0 && info->dst.type() == RegType::sgpr)
3295 allocated_vec[j] = bld.as_uniform(allocated_vec[j]);
3296 has_vgprs |= allocated_vec[j].type() == RegType::vgpr;
3297 }
3298 }
3299
3300 /* concatenate components and p_as_uniform() result if needed */
3301 if (info->dst.type() == RegType::vgpr || !has_vgprs)
3302 ctx->allocated_vec.emplace(info->dst.id(), allocated_vec);
3303
3304 int padding_bytes = MAX2((int)info->dst.bytes() - int(allocated_vec[0].bytes() * info->num_components), 0);
3305
3306 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
3307 aco_opcode::p_create_vector, Format::PSEUDO, info->num_components + !!padding_bytes, 1)};
3308 for (unsigned i = 0; i < info->num_components; i++)
3309 vec->operands[i] = Operand(allocated_vec[i]);
3310 if (padding_bytes)
3311 vec->operands[info->num_components] = Operand(RegClass::get(RegType::vgpr, padding_bytes));
3312 if (info->dst.type() == RegType::sgpr && has_vgprs) {
3313 Temp tmp = bld.tmp(RegType::vgpr, info->dst.size());
3314 vec->definitions[0] = Definition(tmp);
3315 bld.insert(std::move(vec));
3316 bld.pseudo(aco_opcode::p_as_uniform, Definition(info->dst), tmp);
3317 } else {
3318 vec->definitions[0] = Definition(info->dst);
3319 bld.insert(std::move(vec));
3320 }
3321 }
3322
3323 Operand load_lds_size_m0(Builder& bld)
3324 {
3325 /* TODO: m0 does not need to be initialized on GFX9+ */
3326 return bld.m0((Temp)bld.sopk(aco_opcode::s_movk_i32, bld.def(s1, m0), 0xffff));
3327 }
3328
3329 Temp lds_load_callback(Builder& bld, const LoadEmitInfo *info,
3330 Temp offset, unsigned bytes_needed,
3331 unsigned align, unsigned const_offset,
3332 Temp dst_hint)
3333 {
3334 offset = offset.regClass() == s1 ? bld.copy(bld.def(v1), offset) : offset;
3335
3336 Operand m = load_lds_size_m0(bld);
3337
3338 bool large_ds_read = bld.program->chip_class >= GFX7;
3339 bool usable_read2 = bld.program->chip_class >= GFX7;
3340
3341 bool read2 = false;
3342 unsigned size = 0;
3343 aco_opcode op;
3344 //TODO: use ds_read_u8_d16_hi/ds_read_u16_d16_hi if beneficial
3345 if (bytes_needed >= 16 && align % 16 == 0 && large_ds_read) {
3346 size = 16;
3347 op = aco_opcode::ds_read_b128;
3348 } else if (bytes_needed >= 16 && align % 8 == 0 && const_offset % 8 == 0 && usable_read2) {
3349 size = 16;
3350 read2 = true;
3351 op = aco_opcode::ds_read2_b64;
3352 } else if (bytes_needed >= 12 && align % 16 == 0 && large_ds_read) {
3353 size = 12;
3354 op = aco_opcode::ds_read_b96;
3355 } else if (bytes_needed >= 8 && align % 8 == 0) {
3356 size = 8;
3357 op = aco_opcode::ds_read_b64;
3358 } else if (bytes_needed >= 8 && align % 4 == 0 && const_offset % 4 == 0) {
3359 size = 8;
3360 read2 = true;
3361 op = aco_opcode::ds_read2_b32;
3362 } else if (bytes_needed >= 4 && align % 4 == 0) {
3363 size = 4;
3364 op = aco_opcode::ds_read_b32;
3365 } else if (bytes_needed >= 2 && align % 2 == 0) {
3366 size = 2;
3367 op = aco_opcode::ds_read_u16;
3368 } else {
3369 size = 1;
3370 op = aco_opcode::ds_read_u8;
3371 }
3372
3373 unsigned max_offset_plus_one = read2 ? 254 * (size / 2u) + 1 : 65536;
3374 if (const_offset >= max_offset_plus_one) {
3375 offset = bld.vadd32(bld.def(v1), offset, Operand(const_offset / max_offset_plus_one));
3376 const_offset %= max_offset_plus_one;
3377 }
3378
3379 if (read2)
3380 const_offset /= (size / 2u);
3381
3382 RegClass rc = RegClass(RegType::vgpr, DIV_ROUND_UP(size, 4));
3383 Temp val = rc == info->dst.regClass() && dst_hint.id() ? dst_hint : bld.tmp(rc);
3384 if (read2)
3385 bld.ds(op, Definition(val), offset, m, const_offset, const_offset + 1);
3386 else
3387 bld.ds(op, Definition(val), offset, m, const_offset);
3388
3389 if (size < 4)
3390 val = bld.pseudo(aco_opcode::p_extract_vector, bld.def(RegClass::get(RegType::vgpr, size)), val, Operand(0u));
3391
3392 return val;
3393 }
3394
3395 static auto emit_lds_load = emit_load<lds_load_callback, false, true, UINT32_MAX>;
3396
3397 Temp smem_load_callback(Builder& bld, const LoadEmitInfo *info,
3398 Temp offset, unsigned bytes_needed,
3399 unsigned align, unsigned const_offset,
3400 Temp dst_hint)
3401 {
3402 unsigned size = 0;
3403 aco_opcode op;
3404 if (bytes_needed <= 4) {
3405 size = 1;
3406 op = info->resource.id() ? aco_opcode::s_buffer_load_dword : aco_opcode::s_load_dword;
3407 } else if (bytes_needed <= 8) {
3408 size = 2;
3409 op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx2 : aco_opcode::s_load_dwordx2;
3410 } else if (bytes_needed <= 16) {
3411 size = 4;
3412 op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx4 : aco_opcode::s_load_dwordx4;
3413 } else if (bytes_needed <= 32) {
3414 size = 8;
3415 op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx8 : aco_opcode::s_load_dwordx8;
3416 } else {
3417 size = 16;
3418 op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx16 : aco_opcode::s_load_dwordx16;
3419 }
3420 aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
3421 if (info->resource.id()) {
3422 load->operands[0] = Operand(info->resource);
3423 load->operands[1] = Operand(offset);
3424 } else {
3425 load->operands[0] = Operand(offset);
3426 load->operands[1] = Operand(0u);
3427 }
3428 RegClass rc(RegType::sgpr, size);
3429 Temp val = dst_hint.id() && dst_hint.regClass() == rc ? dst_hint : bld.tmp(rc);
3430 load->definitions[0] = Definition(val);
3431 load->glc = info->glc;
3432 load->dlc = info->glc && bld.program->chip_class >= GFX10;
3433 load->barrier = info->barrier;
3434 load->can_reorder = false; // FIXME: currently, it doesn't seem beneficial due to how our scheduler works
3435 bld.insert(std::move(load));
3436 return val;
3437 }
3438
3439 static auto emit_smem_load = emit_load<smem_load_callback, true, false, 1024>;
3440
3441 Temp mubuf_load_callback(Builder& bld, const LoadEmitInfo *info,
3442 Temp offset, unsigned bytes_needed,
3443 unsigned align_, unsigned const_offset,
3444 Temp dst_hint)
3445 {
3446 Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
3447 Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
3448
3449 if (info->soffset.id()) {
3450 if (soffset.isTemp())
3451 vaddr = bld.copy(bld.def(v1), soffset);
3452 soffset = Operand(info->soffset);
3453 }
3454
3455 unsigned bytes_size = 0;
3456 aco_opcode op;
3457 if (bytes_needed == 1) {
3458 bytes_size = 1;
3459 op = aco_opcode::buffer_load_ubyte;
3460 } else if (bytes_needed == 2) {
3461 bytes_size = 2;
3462 op = aco_opcode::buffer_load_ushort;
3463 } else if (bytes_needed <= 4) {
3464 bytes_size = 4;
3465 op = aco_opcode::buffer_load_dword;
3466 } else if (bytes_needed <= 8) {
3467 bytes_size = 8;
3468 op = aco_opcode::buffer_load_dwordx2;
3469 } else if (bytes_needed <= 12 && bld.program->chip_class > GFX6) {
3470 bytes_size = 12;
3471 op = aco_opcode::buffer_load_dwordx3;
3472 } else {
3473 bytes_size = 16;
3474 op = aco_opcode::buffer_load_dwordx4;
3475 }
3476 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
3477 mubuf->operands[0] = Operand(info->resource);
3478 mubuf->operands[1] = vaddr;
3479 mubuf->operands[2] = soffset;
3480 mubuf->offen = (offset.type() == RegType::vgpr);
3481 mubuf->glc = info->glc;
3482 mubuf->dlc = info->glc && bld.program->chip_class >= GFX10;
3483 mubuf->barrier = info->barrier;
3484 mubuf->can_reorder = info->can_reorder;
3485 mubuf->offset = const_offset;
3486 RegClass rc = RegClass::get(RegType::vgpr, align(bytes_size, 4));
3487 Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
3488 mubuf->definitions[0] = Definition(val);
3489 bld.insert(std::move(mubuf));
3490
3491 return val;
3492 }
3493
3494 static auto emit_mubuf_load = emit_load<mubuf_load_callback, true, true, 4096>;
3495
3496 Temp get_gfx6_global_rsrc(Builder& bld, Temp addr)
3497 {
3498 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3499 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3500
3501 if (addr.type() == RegType::vgpr)
3502 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand(0u), Operand(0u), Operand(-1u), Operand(rsrc_conf));
3503 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand(-1u), Operand(rsrc_conf));
3504 }
3505
3506 Temp global_load_callback(Builder& bld, const LoadEmitInfo *info,
3507 Temp offset, unsigned bytes_needed,
3508 unsigned align_, unsigned const_offset,
3509 Temp dst_hint)
3510 {
3511 unsigned bytes_size = 0;
3512 bool mubuf = bld.program->chip_class == GFX6;
3513 bool global = bld.program->chip_class >= GFX9;
3514 aco_opcode op;
3515 if (bytes_needed == 1) {
3516 bytes_size = 1;
3517 op = mubuf ? aco_opcode::buffer_load_ubyte : global ? aco_opcode::global_load_ubyte : aco_opcode::flat_load_ubyte;
3518 } else if (bytes_needed == 2) {
3519 bytes_size = 2;
3520 op = mubuf ? aco_opcode::buffer_load_ushort : global ? aco_opcode::global_load_ushort : aco_opcode::flat_load_ushort;
3521 } else if (bytes_needed <= 4) {
3522 bytes_size = 4;
3523 op = mubuf ? aco_opcode::buffer_load_dword : global ? aco_opcode::global_load_dword : aco_opcode::flat_load_dword;
3524 } else if (bytes_needed <= 8) {
3525 bytes_size = 8;
3526 op = mubuf ? aco_opcode::buffer_load_dwordx2 : global ? aco_opcode::global_load_dwordx2 : aco_opcode::flat_load_dwordx2;
3527 } else if (bytes_needed <= 12 && !mubuf) {
3528 bytes_size = 12;
3529 op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3;
3530 } else {
3531 bytes_size = 16;
3532 op = mubuf ? aco_opcode::buffer_load_dwordx4 : global ? aco_opcode::global_load_dwordx4 : aco_opcode::flat_load_dwordx4;
3533 }
3534 RegClass rc = RegClass::get(RegType::vgpr, align(bytes_size, 4));
3535 Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
3536 if (mubuf) {
3537 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
3538 mubuf->operands[0] = Operand(get_gfx6_global_rsrc(bld, offset));
3539 mubuf->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
3540 mubuf->operands[2] = Operand(0u);
3541 mubuf->glc = info->glc;
3542 mubuf->dlc = false;
3543 mubuf->offset = 0;
3544 mubuf->addr64 = offset.type() == RegType::vgpr;
3545 mubuf->disable_wqm = false;
3546 mubuf->barrier = info->barrier;
3547 mubuf->definitions[0] = Definition(val);
3548 bld.insert(std::move(mubuf));
3549 } else {
3550 offset = offset.regClass() == s2 ? bld.copy(bld.def(v2), offset) : offset;
3551
3552 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)};
3553 flat->operands[0] = Operand(offset);
3554 flat->operands[1] = Operand(s1);
3555 flat->glc = info->glc;
3556 flat->dlc = info->glc && bld.program->chip_class >= GFX10;
3557 flat->barrier = info->barrier;
3558 flat->offset = 0u;
3559 flat->definitions[0] = Definition(val);
3560 bld.insert(std::move(flat));
3561 }
3562
3563 return val;
3564 }
3565
3566 static auto emit_global_load = emit_load<global_load_callback, true, true, 1>;
3567
3568 Temp load_lds(isel_context *ctx, unsigned elem_size_bytes, Temp dst,
3569 Temp address, unsigned base_offset, unsigned align)
3570 {
3571 assert(util_is_power_of_two_nonzero(align));
3572
3573 Builder bld(ctx->program, ctx->block);
3574
3575 unsigned num_components = dst.bytes() / elem_size_bytes;
3576 LoadEmitInfo info = {Operand(as_vgpr(ctx, address)), dst, num_components, elem_size_bytes};
3577 info.align_mul = align;
3578 info.align_offset = 0;
3579 info.barrier = barrier_shared;
3580 info.can_reorder = false;
3581 info.const_offset = base_offset;
3582 emit_lds_load(ctx, bld, &info);
3583
3584 return dst;
3585 }
3586
3587 void split_store_data(isel_context *ctx, RegType dst_type, unsigned count, Temp *dst, unsigned *offsets, Temp src)
3588 {
3589 if (!count)
3590 return;
3591
3592 Builder bld(ctx->program, ctx->block);
3593
3594 ASSERTED bool is_subdword = false;
3595 for (unsigned i = 0; i < count; i++)
3596 is_subdword |= offsets[i] % 4;
3597 is_subdword |= (src.bytes() - offsets[count - 1]) % 4;
3598 assert(!is_subdword || dst_type == RegType::vgpr);
3599
3600 /* count == 1 fast path */
3601 if (count == 1) {
3602 if (dst_type == RegType::sgpr)
3603 dst[0] = bld.as_uniform(src);
3604 else
3605 dst[0] = as_vgpr(ctx, src);
3606 return;
3607 }
3608
3609 for (unsigned i = 0; i < count - 1; i++)
3610 dst[i] = bld.tmp(RegClass::get(dst_type, offsets[i + 1] - offsets[i]));
3611 dst[count - 1] = bld.tmp(RegClass::get(dst_type, src.bytes() - offsets[count - 1]));
3612
3613 if (is_subdword && src.type() == RegType::sgpr) {
3614 src = as_vgpr(ctx, src);
3615 } else {
3616 /* use allocated_vec if possible */
3617 auto it = ctx->allocated_vec.find(src.id());
3618 if (it != ctx->allocated_vec.end()) {
3619 unsigned total_size = 0;
3620 for (unsigned i = 0; it->second[i].bytes() && (i < NIR_MAX_VEC_COMPONENTS); i++)
3621 total_size += it->second[i].bytes();
3622 if (total_size != src.bytes())
3623 goto split;
3624
3625 unsigned elem_size = it->second[0].bytes();
3626
3627 for (unsigned i = 0; i < count; i++) {
3628 if (offsets[i] % elem_size || dst[i].bytes() % elem_size)
3629 goto split;
3630 }
3631
3632 for (unsigned i = 0; i < count; i++) {
3633 unsigned start_idx = offsets[i] / elem_size;
3634 unsigned op_count = dst[i].bytes() / elem_size;
3635 if (op_count == 1) {
3636 if (dst_type == RegType::sgpr)
3637 dst[i] = bld.as_uniform(it->second[start_idx]);
3638 else
3639 dst[i] = as_vgpr(ctx, it->second[start_idx]);
3640 continue;
3641 }
3642
3643 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, op_count, 1)};
3644 for (unsigned j = 0; j < op_count; j++) {
3645 Temp tmp = it->second[start_idx + j];
3646 if (dst_type == RegType::sgpr)
3647 tmp = bld.as_uniform(tmp);
3648 vec->operands[j] = Operand(tmp);
3649 }
3650 vec->definitions[0] = Definition(dst[i]);
3651 bld.insert(std::move(vec));
3652 }
3653 return;
3654 }
3655 }
3656
3657 if (dst_type == RegType::sgpr)
3658 src = bld.as_uniform(src);
3659
3660 split:
3661 /* just split it */
3662 aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, count)};
3663 split->operands[0] = Operand(src);
3664 for (unsigned i = 0; i < count; i++)
3665 split->definitions[i] = Definition(dst[i]);
3666 bld.insert(std::move(split));
3667 }
3668
3669 bool scan_write_mask(uint32_t mask, uint32_t todo_mask,
3670 int *start, int *count)
3671 {
3672 unsigned start_elem = ffs(todo_mask) - 1;
3673 bool skip = !(mask & (1 << start_elem));
3674 if (skip)
3675 mask = ~mask & todo_mask;
3676
3677 mask &= todo_mask;
3678
3679 u_bit_scan_consecutive_range(&mask, start, count);
3680
3681 return !skip;
3682 }
3683
3684 void advance_write_mask(uint32_t *todo_mask, int start, int count)
3685 {
3686 *todo_mask &= ~u_bit_consecutive(0, count) << start;
3687 }
3688
3689 void store_lds(isel_context *ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask,
3690 Temp address, unsigned base_offset, unsigned align)
3691 {
3692 assert(util_is_power_of_two_nonzero(align));
3693 assert(util_is_power_of_two_nonzero(elem_size_bytes) && elem_size_bytes <= 8);
3694
3695 Builder bld(ctx->program, ctx->block);
3696 bool large_ds_write = ctx->options->chip_class >= GFX7;
3697 bool usable_write2 = ctx->options->chip_class >= GFX7;
3698
3699 unsigned write_count = 0;
3700 Temp write_datas[32];
3701 unsigned offsets[32];
3702 aco_opcode opcodes[32];
3703
3704 wrmask = widen_mask(wrmask, elem_size_bytes);
3705
3706 uint32_t todo = u_bit_consecutive(0, data.bytes());
3707 while (todo) {
3708 int offset, bytes;
3709 if (!scan_write_mask(wrmask, todo, &offset, &bytes)) {
3710 offsets[write_count] = offset;
3711 opcodes[write_count] = aco_opcode::num_opcodes;
3712 write_count++;
3713 advance_write_mask(&todo, offset, bytes);
3714 continue;
3715 }
3716
3717 bool aligned2 = offset % 2 == 0 && align % 2 == 0;
3718 bool aligned4 = offset % 4 == 0 && align % 4 == 0;
3719 bool aligned8 = offset % 8 == 0 && align % 8 == 0;
3720 bool aligned16 = offset % 16 == 0 && align % 16 == 0;
3721
3722 //TODO: use ds_write_b8_d16_hi/ds_write_b16_d16_hi if beneficial
3723 aco_opcode op = aco_opcode::num_opcodes;
3724 if (bytes >= 16 && aligned16 && large_ds_write) {
3725 op = aco_opcode::ds_write_b128;
3726 bytes = 16;
3727 } else if (bytes >= 12 && aligned16 && large_ds_write) {
3728 op = aco_opcode::ds_write_b96;
3729 bytes = 12;
3730 } else if (bytes >= 8 && aligned8) {
3731 op = aco_opcode::ds_write_b64;
3732 bytes = 8;
3733 } else if (bytes >= 4 && aligned4) {
3734 op = aco_opcode::ds_write_b32;
3735 bytes = 4;
3736 } else if (bytes >= 2 && aligned2) {
3737 op = aco_opcode::ds_write_b16;
3738 bytes = 2;
3739 } else if (bytes >= 1) {
3740 op = aco_opcode::ds_write_b8;
3741 bytes = 1;
3742 } else {
3743 assert(false);
3744 }
3745
3746 offsets[write_count] = offset;
3747 opcodes[write_count] = op;
3748 write_count++;
3749 advance_write_mask(&todo, offset, bytes);
3750 }
3751
3752 Operand m = load_lds_size_m0(bld);
3753
3754 split_store_data(ctx, RegType::vgpr, write_count, write_datas, offsets, data);
3755
3756 for (unsigned i = 0; i < write_count; i++) {
3757 aco_opcode op = opcodes[i];
3758 if (op == aco_opcode::num_opcodes)
3759 continue;
3760
3761 Temp data = write_datas[i];
3762
3763 unsigned second = write_count;
3764 if (usable_write2 && (op == aco_opcode::ds_write_b32 || op == aco_opcode::ds_write_b64)) {
3765 for (second = i + 1; second < write_count; second++) {
3766 if (opcodes[second] == op && (offsets[second] - offsets[i]) % data.bytes() == 0) {
3767 op = data.bytes() == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64;
3768 opcodes[second] = aco_opcode::num_opcodes;
3769 break;
3770 }
3771 }
3772 }
3773
3774 bool write2 = op == aco_opcode::ds_write2_b32 || op == aco_opcode::ds_write2_b64;
3775 unsigned write2_off = (offsets[second] - offsets[i]) / data.bytes();
3776
3777 unsigned inline_offset = base_offset + offsets[i];
3778 unsigned max_offset = write2 ? (255 - write2_off) * data.bytes() : 65535;
3779 Temp address_offset = address;
3780 if (inline_offset > max_offset) {
3781 address_offset = bld.vadd32(bld.def(v1), Operand(base_offset), address_offset);
3782 inline_offset = offsets[i];
3783 }
3784 assert(inline_offset <= max_offset); /* offsets[i] shouldn't be large enough for this to happen */
3785
3786 if (write2) {
3787 Temp second_data = write_datas[second];
3788 inline_offset /= data.bytes();
3789 bld.ds(op, address_offset, data, second_data, m, inline_offset, inline_offset + write2_off);
3790 } else {
3791 bld.ds(op, address_offset, data, m, inline_offset);
3792 }
3793 }
3794 }
3795
3796 unsigned calculate_lds_alignment(isel_context *ctx, unsigned const_offset)
3797 {
3798 unsigned align = 16;
3799 if (const_offset)
3800 align = std::min(align, 1u << (ffs(const_offset) - 1));
3801
3802 return align;
3803 }
3804
3805
3806 aco_opcode get_buffer_store_op(bool smem, unsigned bytes)
3807 {
3808 switch (bytes) {
3809 case 1:
3810 assert(!smem);
3811 return aco_opcode::buffer_store_byte;
3812 case 2:
3813 assert(!smem);
3814 return aco_opcode::buffer_store_short;
3815 case 4:
3816 return smem ? aco_opcode::s_buffer_store_dword : aco_opcode::buffer_store_dword;
3817 case 8:
3818 return smem ? aco_opcode::s_buffer_store_dwordx2 : aco_opcode::buffer_store_dwordx2;
3819 case 12:
3820 assert(!smem);
3821 return aco_opcode::buffer_store_dwordx3;
3822 case 16:
3823 return smem ? aco_opcode::s_buffer_store_dwordx4 : aco_opcode::buffer_store_dwordx4;
3824 }
3825 unreachable("Unexpected store size");
3826 return aco_opcode::num_opcodes;
3827 }
3828
3829 void split_buffer_store(isel_context *ctx, nir_intrinsic_instr *instr, bool smem, RegType dst_type,
3830 Temp data, unsigned writemask, int swizzle_element_size,
3831 unsigned *write_count, Temp *write_datas, unsigned *offsets)
3832 {
3833 unsigned write_count_with_skips = 0;
3834 bool skips[16];
3835
3836 /* determine how to split the data */
3837 unsigned todo = u_bit_consecutive(0, data.bytes());
3838 while (todo) {
3839 int offset, bytes;
3840 skips[write_count_with_skips] = !scan_write_mask(writemask, todo, &offset, &bytes);
3841 offsets[write_count_with_skips] = offset;
3842 if (skips[write_count_with_skips]) {
3843 advance_write_mask(&todo, offset, bytes);
3844 write_count_with_skips++;
3845 continue;
3846 }
3847
3848 /* only supported sizes are 1, 2, 4, 8, 12 and 16 bytes and can't be
3849 * larger than swizzle_element_size */
3850 bytes = MIN2(bytes, swizzle_element_size);
3851 if (bytes % 4)
3852 bytes = bytes > 4 ? bytes & ~0x3 : MIN2(bytes, 2);
3853
3854 /* SMEM and GFX6 VMEM can't emit 12-byte stores */
3855 if ((ctx->program->chip_class == GFX6 || smem) && bytes == 12)
3856 bytes = 8;
3857
3858 /* dword or larger stores have to be dword-aligned */
3859 unsigned align_mul = instr ? nir_intrinsic_align_mul(instr) : 4;
3860 unsigned align_offset = instr ? nir_intrinsic_align_mul(instr) : 0;
3861 bool dword_aligned = (align_offset + offset) % 4 == 0 && align_mul % 4 == 0;
3862 if (bytes >= 4 && !dword_aligned)
3863 bytes = MIN2(bytes, 2);
3864
3865 advance_write_mask(&todo, offset, bytes);
3866 write_count_with_skips++;
3867 }
3868
3869 /* actually split data */
3870 split_store_data(ctx, dst_type, write_count_with_skips, write_datas, offsets, data);
3871
3872 /* remove skips */
3873 for (unsigned i = 0; i < write_count_with_skips; i++) {
3874 if (skips[i])
3875 continue;
3876 write_datas[*write_count] = write_datas[i];
3877 offsets[*write_count] = offsets[i];
3878 (*write_count)++;
3879 }
3880 }
3881
3882 Temp create_vec_from_array(isel_context *ctx, Temp arr[], unsigned cnt, RegType reg_type, unsigned elem_size_bytes,
3883 unsigned split_cnt = 0u, Temp dst = Temp())
3884 {
3885 Builder bld(ctx->program, ctx->block);
3886 unsigned dword_size = elem_size_bytes / 4;
3887
3888 if (!dst.id())
3889 dst = bld.tmp(RegClass(reg_type, cnt * dword_size));
3890
3891 std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
3892 aco_ptr<Pseudo_instruction> instr {create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, cnt, 1)};
3893 instr->definitions[0] = Definition(dst);
3894
3895 for (unsigned i = 0; i < cnt; ++i) {
3896 if (arr[i].id()) {
3897 assert(arr[i].size() == dword_size);
3898 allocated_vec[i] = arr[i];
3899 instr->operands[i] = Operand(arr[i]);
3900 } else {
3901 Temp zero = bld.copy(bld.def(RegClass(reg_type, dword_size)), Operand(0u, dword_size == 2));
3902 allocated_vec[i] = zero;
3903 instr->operands[i] = Operand(zero);
3904 }
3905 }
3906
3907 bld.insert(std::move(instr));
3908
3909 if (split_cnt)
3910 emit_split_vector(ctx, dst, split_cnt);
3911 else
3912 ctx->allocated_vec.emplace(dst.id(), allocated_vec); /* emit_split_vector already does this */
3913
3914 return dst;
3915 }
3916
3917 inline unsigned resolve_excess_vmem_const_offset(Builder &bld, Temp &voffset, unsigned const_offset)
3918 {
3919 if (const_offset >= 4096) {
3920 unsigned excess_const_offset = const_offset / 4096u * 4096u;
3921 const_offset %= 4096u;
3922
3923 if (!voffset.id())
3924 voffset = bld.copy(bld.def(v1), Operand(excess_const_offset));
3925 else if (unlikely(voffset.regClass() == s1))
3926 voffset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), Operand(excess_const_offset), Operand(voffset));
3927 else if (likely(voffset.regClass() == v1))
3928 voffset = bld.vadd32(bld.def(v1), Operand(voffset), Operand(excess_const_offset));
3929 else
3930 unreachable("Unsupported register class of voffset");
3931 }
3932
3933 return const_offset;
3934 }
3935
3936 void emit_single_mubuf_store(isel_context *ctx, Temp descriptor, Temp voffset, Temp soffset, Temp vdata,
3937 unsigned const_offset = 0u, bool allow_reorder = true, bool slc = false)
3938 {
3939 assert(vdata.id());
3940 assert(vdata.size() != 3 || ctx->program->chip_class != GFX6);
3941 assert(vdata.size() >= 1 && vdata.size() <= 4);
3942
3943 Builder bld(ctx->program, ctx->block);
3944 aco_opcode op = get_buffer_store_op(false, vdata.bytes());
3945 const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset);
3946
3947 Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
3948 Operand soffset_op = soffset.id() ? Operand(soffset) : Operand(0u);
3949 Builder::Result r = bld.mubuf(op, Operand(descriptor), voffset_op, soffset_op, Operand(vdata), const_offset,
3950 /* offen */ !voffset_op.isUndefined(), /* idxen*/ false, /* addr64 */ false,
3951 /* disable_wqm */ false, /* glc */ true, /* dlc*/ false, /* slc */ slc);
3952
3953 static_cast<MUBUF_instruction *>(r.instr)->can_reorder = allow_reorder;
3954 }
3955
3956 void store_vmem_mubuf(isel_context *ctx, Temp src, Temp descriptor, Temp voffset, Temp soffset,
3957 unsigned base_const_offset, unsigned elem_size_bytes, unsigned write_mask,
3958 bool allow_combining = true, bool reorder = true, bool slc = false)
3959 {
3960 Builder bld(ctx->program, ctx->block);
3961 assert(elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
3962 assert(write_mask);
3963 write_mask = widen_mask(write_mask, elem_size_bytes);
3964
3965 unsigned write_count = 0;
3966 Temp write_datas[32];
3967 unsigned offsets[32];
3968 split_buffer_store(ctx, NULL, false, RegType::vgpr, src, write_mask,
3969 allow_combining ? 16 : 4, &write_count, write_datas, offsets);
3970
3971 for (unsigned i = 0; i < write_count; i++) {
3972 unsigned const_offset = offsets[i] + base_const_offset;
3973 emit_single_mubuf_store(ctx, descriptor, voffset, soffset, write_datas[i], const_offset, reorder, slc);
3974 }
3975 }
3976
3977 void load_vmem_mubuf(isel_context *ctx, Temp dst, Temp descriptor, Temp voffset, Temp soffset,
3978 unsigned base_const_offset, unsigned elem_size_bytes, unsigned num_components,
3979 unsigned stride = 0u, bool allow_combining = true, bool allow_reorder = true)
3980 {
3981 assert(elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
3982 assert((num_components * elem_size_bytes) == dst.bytes());
3983 assert(!!stride != allow_combining);
3984
3985 Builder bld(ctx->program, ctx->block);
3986
3987 LoadEmitInfo info = {Operand(voffset), dst, num_components, elem_size_bytes, descriptor};
3988 info.component_stride = allow_combining ? 0 : stride;
3989 info.glc = true;
3990 info.swizzle_component_size = allow_combining ? 0 : 4;
3991 info.align_mul = MIN2(elem_size_bytes, 4);
3992 info.align_offset = 0;
3993 info.soffset = soffset;
3994 info.const_offset = base_const_offset;
3995 emit_mubuf_load(ctx, bld, &info);
3996 }
3997
3998 std::pair<Temp, unsigned> offset_add_from_nir(isel_context *ctx, const std::pair<Temp, unsigned> &base_offset, nir_src *off_src, unsigned stride = 1u)
3999 {
4000 Builder bld(ctx->program, ctx->block);
4001 Temp offset = base_offset.first;
4002 unsigned const_offset = base_offset.second;
4003
4004 if (!nir_src_is_const(*off_src)) {
4005 Temp indirect_offset_arg = get_ssa_temp(ctx, off_src->ssa);
4006 Temp with_stride;
4007
4008 /* Calculate indirect offset with stride */
4009 if (likely(indirect_offset_arg.regClass() == v1))
4010 with_stride = bld.v_mul24_imm(bld.def(v1), indirect_offset_arg, stride);
4011 else if (indirect_offset_arg.regClass() == s1)
4012 with_stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), indirect_offset_arg);
4013 else
4014 unreachable("Unsupported register class of indirect offset");
4015
4016 /* Add to the supplied base offset */
4017 if (offset.id() == 0)
4018 offset = with_stride;
4019 else if (unlikely(offset.regClass() == s1 && with_stride.regClass() == s1))
4020 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), with_stride, offset);
4021 else if (offset.size() == 1 && with_stride.size() == 1)
4022 offset = bld.vadd32(bld.def(v1), with_stride, offset);
4023 else
4024 unreachable("Unsupported register class of indirect offset");
4025 } else {
4026 unsigned const_offset_arg = nir_src_as_uint(*off_src);
4027 const_offset += const_offset_arg * stride;
4028 }
4029
4030 return std::make_pair(offset, const_offset);
4031 }
4032
4033 std::pair<Temp, unsigned> offset_add(isel_context *ctx, const std::pair<Temp, unsigned> &off1, const std::pair<Temp, unsigned> &off2)
4034 {
4035 Builder bld(ctx->program, ctx->block);
4036 Temp offset;
4037
4038 if (off1.first.id() && off2.first.id()) {
4039 if (unlikely(off1.first.regClass() == s1 && off2.first.regClass() == s1))
4040 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), off1.first, off2.first);
4041 else if (off1.first.size() == 1 && off2.first.size() == 1)
4042 offset = bld.vadd32(bld.def(v1), off1.first, off2.first);
4043 else
4044 unreachable("Unsupported register class of indirect offset");
4045 } else {
4046 offset = off1.first.id() ? off1.first : off2.first;
4047 }
4048
4049 return std::make_pair(offset, off1.second + off2.second);
4050 }
4051
4052 std::pair<Temp, unsigned> offset_mul(isel_context *ctx, const std::pair<Temp, unsigned> &offs, unsigned multiplier)
4053 {
4054 Builder bld(ctx->program, ctx->block);
4055 unsigned const_offset = offs.second * multiplier;
4056
4057 if (!offs.first.id())
4058 return std::make_pair(offs.first, const_offset);
4059
4060 Temp offset = unlikely(offs.first.regClass() == s1)
4061 ? bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(multiplier), offs.first)
4062 : bld.v_mul24_imm(bld.def(v1), offs.first, multiplier);
4063
4064 return std::make_pair(offset, const_offset);
4065 }
4066
4067 std::pair<Temp, unsigned> get_intrinsic_io_basic_offset(isel_context *ctx, nir_intrinsic_instr *instr, unsigned base_stride, unsigned component_stride)
4068 {
4069 Builder bld(ctx->program, ctx->block);
4070
4071 /* base is the driver_location, which is already multiplied by 4, so is in dwords */
4072 unsigned const_offset = nir_intrinsic_base(instr) * base_stride;
4073 /* component is in bytes */
4074 const_offset += nir_intrinsic_component(instr) * component_stride;
4075
4076 /* offset should be interpreted in relation to the base, so the instruction effectively reads/writes another input/output when it has an offset */
4077 nir_src *off_src = nir_get_io_offset_src(instr);
4078 return offset_add_from_nir(ctx, std::make_pair(Temp(), const_offset), off_src, 4u * base_stride);
4079 }
4080
4081 std::pair<Temp, unsigned> get_intrinsic_io_basic_offset(isel_context *ctx, nir_intrinsic_instr *instr, unsigned stride = 1u)
4082 {
4083 return get_intrinsic_io_basic_offset(ctx, instr, stride, stride);
4084 }
4085
4086 Temp get_tess_rel_patch_id(isel_context *ctx)
4087 {
4088 Builder bld(ctx->program, ctx->block);
4089
4090 switch (ctx->shader->info.stage) {
4091 case MESA_SHADER_TESS_CTRL:
4092 return bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffu),
4093 get_arg(ctx, ctx->args->ac.tcs_rel_ids));
4094 case MESA_SHADER_TESS_EVAL:
4095 return get_arg(ctx, ctx->args->tes_rel_patch_id);
4096 default:
4097 unreachable("Unsupported stage in get_tess_rel_patch_id");
4098 }
4099 }
4100
4101 std::pair<Temp, unsigned> get_tcs_per_vertex_input_lds_offset(isel_context *ctx, nir_intrinsic_instr *instr)
4102 {
4103 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4104 Builder bld(ctx->program, ctx->block);
4105
4106 uint32_t tcs_in_patch_stride = ctx->args->options->key.tcs.input_vertices * ctx->tcs_num_inputs * 4;
4107 uint32_t tcs_in_vertex_stride = ctx->tcs_num_inputs * 4;
4108
4109 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr);
4110
4111 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
4112 offs = offset_add_from_nir(ctx, offs, vertex_index_src, tcs_in_vertex_stride);
4113
4114 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
4115 Temp tcs_in_current_patch_offset = bld.v_mul24_imm(bld.def(v1), rel_patch_id, tcs_in_patch_stride);
4116 offs = offset_add(ctx, offs, std::make_pair(tcs_in_current_patch_offset, 0));
4117
4118 return offset_mul(ctx, offs, 4u);
4119 }
4120
4121 std::pair<Temp, unsigned> get_tcs_output_lds_offset(isel_context *ctx, nir_intrinsic_instr *instr = nullptr, bool per_vertex = false)
4122 {
4123 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4124 Builder bld(ctx->program, ctx->block);
4125
4126 uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * ctx->tcs_num_inputs * 16;
4127 uint32_t output_vertex_size = ctx->tcs_num_outputs * 16;
4128 uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
4129 uint32_t output_patch_stride = pervertex_output_patch_size + ctx->tcs_num_patch_outputs * 16;
4130
4131 std::pair<Temp, unsigned> offs = instr
4132 ? get_intrinsic_io_basic_offset(ctx, instr, 4u)
4133 : std::make_pair(Temp(), 0u);
4134
4135 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
4136 Temp patch_off = bld.v_mul24_imm(bld.def(v1), rel_patch_id, output_patch_stride);
4137
4138 if (per_vertex) {
4139 assert(instr);
4140
4141 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
4142 offs = offset_add_from_nir(ctx, offs, vertex_index_src, output_vertex_size);
4143
4144 uint32_t output_patch0_offset = (input_patch_size * ctx->tcs_num_patches);
4145 offs = offset_add(ctx, offs, std::make_pair(patch_off, output_patch0_offset));
4146 } else {
4147 uint32_t output_patch0_patch_data_offset = (input_patch_size * ctx->tcs_num_patches + pervertex_output_patch_size);
4148 offs = offset_add(ctx, offs, std::make_pair(patch_off, output_patch0_patch_data_offset));
4149 }
4150
4151 return offs;
4152 }
4153
4154 std::pair<Temp, unsigned> get_tcs_per_vertex_output_vmem_offset(isel_context *ctx, nir_intrinsic_instr *instr)
4155 {
4156 Builder bld(ctx->program, ctx->block);
4157
4158 unsigned vertices_per_patch = ctx->shader->info.tess.tcs_vertices_out;
4159 unsigned attr_stride = vertices_per_patch * ctx->tcs_num_patches;
4160
4161 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr, attr_stride * 4u, 4u);
4162
4163 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
4164 Temp patch_off = bld.v_mul24_imm(bld.def(v1), rel_patch_id, vertices_per_patch * 16u);
4165 offs = offset_add(ctx, offs, std::make_pair(patch_off, 0u));
4166
4167 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
4168 offs = offset_add_from_nir(ctx, offs, vertex_index_src, 16u);
4169
4170 return offs;
4171 }
4172
4173 std::pair<Temp, unsigned> get_tcs_per_patch_output_vmem_offset(isel_context *ctx, nir_intrinsic_instr *instr = nullptr, unsigned const_base_offset = 0u)
4174 {
4175 Builder bld(ctx->program, ctx->block);
4176
4177 unsigned output_vertex_size = ctx->tcs_num_outputs * 16;
4178 unsigned per_vertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
4179 unsigned per_patch_data_offset = per_vertex_output_patch_size * ctx->tcs_num_patches;
4180 unsigned attr_stride = ctx->tcs_num_patches;
4181
4182 std::pair<Temp, unsigned> offs = instr
4183 ? get_intrinsic_io_basic_offset(ctx, instr, attr_stride * 4u, 4u)
4184 : std::make_pair(Temp(), 0u);
4185
4186 if (const_base_offset)
4187 offs.second += const_base_offset * attr_stride;
4188
4189 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
4190 Temp patch_off = bld.v_mul24_imm(bld.def(v1), rel_patch_id, 16u);
4191 offs = offset_add(ctx, offs, std::make_pair(patch_off, per_patch_data_offset));
4192
4193 return offs;
4194 }
4195
4196 bool tcs_driver_location_matches_api_mask(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex, uint64_t mask, bool *indirect)
4197 {
4198 assert(per_vertex || ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4199
4200 if (mask == 0)
4201 return false;
4202
4203 unsigned drv_loc = nir_intrinsic_base(instr);
4204 nir_src *off_src = nir_get_io_offset_src(instr);
4205
4206 if (!nir_src_is_const(*off_src)) {
4207 *indirect = true;
4208 return false;
4209 }
4210
4211 *indirect = false;
4212 uint64_t slot = per_vertex
4213 ? ctx->output_drv_loc_to_var_slot[ctx->shader->info.stage][drv_loc / 4]
4214 : (ctx->output_tcs_patch_drv_loc_to_var_slot[drv_loc / 4] - VARYING_SLOT_PATCH0);
4215 return (((uint64_t) 1) << slot) & mask;
4216 }
4217
4218 bool store_output_to_temps(isel_context *ctx, nir_intrinsic_instr *instr)
4219 {
4220 unsigned write_mask = nir_intrinsic_write_mask(instr);
4221 unsigned component = nir_intrinsic_component(instr);
4222 unsigned idx = nir_intrinsic_base(instr) + component;
4223
4224 nir_instr *off_instr = instr->src[1].ssa->parent_instr;
4225 if (off_instr->type != nir_instr_type_load_const)
4226 return false;
4227
4228 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
4229 idx += nir_src_as_uint(instr->src[1]) * 4u;
4230
4231 if (instr->src[0].ssa->bit_size == 64)
4232 write_mask = widen_mask(write_mask, 2);
4233
4234 RegClass rc = instr->src[0].ssa->bit_size == 16 ? v2b : v1;
4235
4236 for (unsigned i = 0; i < 8; ++i) {
4237 if (write_mask & (1 << i)) {
4238 ctx->outputs.mask[idx / 4u] |= 1 << (idx % 4u);
4239 ctx->outputs.temps[idx] = emit_extract_vector(ctx, src, i, rc);
4240 }
4241 idx++;
4242 }
4243
4244 return true;
4245 }
4246
4247 bool load_input_from_temps(isel_context *ctx, nir_intrinsic_instr *instr, Temp dst)
4248 {
4249 /* Only TCS per-vertex inputs are supported by this function.
4250 * Per-vertex inputs only match between the VS/TCS invocation id when the number of invocations is the same.
4251 */
4252 if (ctx->shader->info.stage != MESA_SHADER_TESS_CTRL || !ctx->tcs_in_out_eq)
4253 return false;
4254
4255 nir_src *off_src = nir_get_io_offset_src(instr);
4256 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
4257 nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
4258 bool can_use_temps = nir_src_is_const(*off_src) &&
4259 vertex_index_instr->type == nir_instr_type_intrinsic &&
4260 nir_instr_as_intrinsic(vertex_index_instr)->intrinsic == nir_intrinsic_load_invocation_id;
4261
4262 if (!can_use_temps)
4263 return false;
4264
4265 unsigned idx = nir_intrinsic_base(instr) + nir_intrinsic_component(instr) + 4 * nir_src_as_uint(*off_src);
4266 Temp *src = &ctx->inputs.temps[idx];
4267 create_vec_from_array(ctx, src, dst.size(), dst.regClass().type(), 4u, 0, dst);
4268
4269 return true;
4270 }
4271
4272 void visit_store_ls_or_es_output(isel_context *ctx, nir_intrinsic_instr *instr)
4273 {
4274 Builder bld(ctx->program, ctx->block);
4275
4276 if (ctx->tcs_in_out_eq && store_output_to_temps(ctx, instr)) {
4277 /* When the TCS only reads this output directly and for the same vertices as its invocation id, it is unnecessary to store the VS output to LDS. */
4278 bool indirect_write;
4279 bool temp_only_input = tcs_driver_location_matches_api_mask(ctx, instr, true, ctx->tcs_temp_only_inputs, &indirect_write);
4280 if (temp_only_input && !indirect_write)
4281 return;
4282 }
4283
4284 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr, 4u);
4285 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
4286 unsigned write_mask = nir_intrinsic_write_mask(instr);
4287 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8u;
4288
4289 if (ctx->stage == vertex_es || ctx->stage == tess_eval_es) {
4290 /* GFX6-8: ES stage is not merged into GS, data is passed from ES to GS in VMEM. */
4291 Temp esgs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_ESGS_VS * 16u));
4292 Temp es2gs_offset = get_arg(ctx, ctx->args->es2gs_offset);
4293 store_vmem_mubuf(ctx, src, esgs_ring, offs.first, es2gs_offset, offs.second, elem_size_bytes, write_mask, false, true, true);
4294 } else {
4295 Temp lds_base;
4296
4297 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
4298 /* GFX9+: ES stage is merged into GS, data is passed between them using LDS. */
4299 unsigned itemsize = ctx->stage == vertex_geometry_gs
4300 ? ctx->program->info->vs.es_info.esgs_itemsize
4301 : ctx->program->info->tes.es_info.esgs_itemsize;
4302 Temp thread_id = emit_mbcnt(ctx, bld.def(v1));
4303 Temp wave_idx = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), get_arg(ctx, ctx->args->merged_wave_info), Operand(4u << 16 | 24));
4304 Temp vertex_idx = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), thread_id,
4305 bld.v_mul24_imm(bld.def(v1), as_vgpr(ctx, wave_idx), ctx->program->wave_size));
4306 lds_base = bld.v_mul24_imm(bld.def(v1), vertex_idx, itemsize);
4307 } else if (ctx->stage == vertex_ls || ctx->stage == vertex_tess_control_hs) {
4308 /* GFX6-8: VS runs on LS stage when tessellation is used, but LS shares LDS space with HS.
4309 * GFX9+: LS is merged into HS, but still uses the same LDS layout.
4310 */
4311 Temp vertex_idx = get_arg(ctx, ctx->args->rel_auto_id);
4312 lds_base = bld.v_mul24_imm(bld.def(v1), vertex_idx, ctx->tcs_num_inputs * 16u);
4313 } else {
4314 unreachable("Invalid LS or ES stage");
4315 }
4316
4317 offs = offset_add(ctx, offs, std::make_pair(lds_base, 0u));
4318 unsigned lds_align = calculate_lds_alignment(ctx, offs.second);
4319 store_lds(ctx, elem_size_bytes, src, write_mask, offs.first, offs.second, lds_align);
4320 }
4321 }
4322
4323 bool tcs_output_is_tess_factor(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
4324 {
4325 if (per_vertex)
4326 return false;
4327
4328 unsigned off = nir_intrinsic_base(instr) * 4u;
4329 return off == ctx->tcs_tess_lvl_out_loc ||
4330 off == ctx->tcs_tess_lvl_in_loc;
4331
4332 }
4333
4334 bool tcs_output_is_read_by_tes(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
4335 {
4336 uint64_t mask = per_vertex
4337 ? ctx->program->info->tcs.tes_inputs_read
4338 : ctx->program->info->tcs.tes_patch_inputs_read;
4339
4340 bool indirect_write = false;
4341 bool output_read_by_tes = tcs_driver_location_matches_api_mask(ctx, instr, per_vertex, mask, &indirect_write);
4342 return indirect_write || output_read_by_tes;
4343 }
4344
4345 bool tcs_output_is_read_by_tcs(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
4346 {
4347 uint64_t mask = per_vertex
4348 ? ctx->shader->info.outputs_read
4349 : ctx->shader->info.patch_outputs_read;
4350
4351 bool indirect_write = false;
4352 bool output_read = tcs_driver_location_matches_api_mask(ctx, instr, per_vertex, mask, &indirect_write);
4353 return indirect_write || output_read;
4354 }
4355
4356 void visit_store_tcs_output(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
4357 {
4358 assert(ctx->stage == tess_control_hs || ctx->stage == vertex_tess_control_hs);
4359 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4360
4361 Builder bld(ctx->program, ctx->block);
4362
4363 Temp store_val = get_ssa_temp(ctx, instr->src[0].ssa);
4364 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
4365 unsigned write_mask = nir_intrinsic_write_mask(instr);
4366
4367 bool is_tess_factor = tcs_output_is_tess_factor(ctx, instr, per_vertex);
4368 bool write_to_vmem = !is_tess_factor && tcs_output_is_read_by_tes(ctx, instr, per_vertex);
4369 bool write_to_lds = is_tess_factor || tcs_output_is_read_by_tcs(ctx, instr, per_vertex);
4370
4371 if (write_to_vmem) {
4372 std::pair<Temp, unsigned> vmem_offs = per_vertex
4373 ? get_tcs_per_vertex_output_vmem_offset(ctx, instr)
4374 : get_tcs_per_patch_output_vmem_offset(ctx, instr);
4375
4376 Temp hs_ring_tess_offchip = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
4377 Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
4378 store_vmem_mubuf(ctx, store_val, hs_ring_tess_offchip, vmem_offs.first, oc_lds, vmem_offs.second, elem_size_bytes, write_mask, true, false);
4379 }
4380
4381 if (write_to_lds) {
4382 std::pair<Temp, unsigned> lds_offs = get_tcs_output_lds_offset(ctx, instr, per_vertex);
4383 unsigned lds_align = calculate_lds_alignment(ctx, lds_offs.second);
4384 store_lds(ctx, elem_size_bytes, store_val, write_mask, lds_offs.first, lds_offs.second, lds_align);
4385 }
4386 }
4387
4388 void visit_load_tcs_output(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
4389 {
4390 assert(ctx->stage == tess_control_hs || ctx->stage == vertex_tess_control_hs);
4391 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4392
4393 Builder bld(ctx->program, ctx->block);
4394
4395 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4396 std::pair<Temp, unsigned> lds_offs = get_tcs_output_lds_offset(ctx, instr, per_vertex);
4397 unsigned lds_align = calculate_lds_alignment(ctx, lds_offs.second);
4398 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
4399
4400 load_lds(ctx, elem_size_bytes, dst, lds_offs.first, lds_offs.second, lds_align);
4401 }
4402
4403 void visit_store_output(isel_context *ctx, nir_intrinsic_instr *instr)
4404 {
4405 if (ctx->stage == vertex_vs ||
4406 ctx->stage == tess_eval_vs ||
4407 ctx->stage == fragment_fs ||
4408 ctx->stage == ngg_vertex_gs ||
4409 ctx->stage == ngg_tess_eval_gs ||
4410 ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
4411 bool stored_to_temps = store_output_to_temps(ctx, instr);
4412 if (!stored_to_temps) {
4413 fprintf(stderr, "Unimplemented output offset instruction:\n");
4414 nir_print_instr(instr->src[1].ssa->parent_instr, stderr);
4415 fprintf(stderr, "\n");
4416 abort();
4417 }
4418 } else if (ctx->stage == vertex_es ||
4419 ctx->stage == vertex_ls ||
4420 ctx->stage == tess_eval_es ||
4421 (ctx->stage == vertex_tess_control_hs && ctx->shader->info.stage == MESA_SHADER_VERTEX) ||
4422 (ctx->stage == vertex_geometry_gs && ctx->shader->info.stage == MESA_SHADER_VERTEX) ||
4423 (ctx->stage == tess_eval_geometry_gs && ctx->shader->info.stage == MESA_SHADER_TESS_EVAL)) {
4424 visit_store_ls_or_es_output(ctx, instr);
4425 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
4426 visit_store_tcs_output(ctx, instr, false);
4427 } else {
4428 unreachable("Shader stage not implemented");
4429 }
4430 }
4431
4432 void visit_load_output(isel_context *ctx, nir_intrinsic_instr *instr)
4433 {
4434 visit_load_tcs_output(ctx, instr, false);
4435 }
4436
4437 void emit_interp_instr(isel_context *ctx, unsigned idx, unsigned component, Temp src, Temp dst, Temp prim_mask)
4438 {
4439 Temp coord1 = emit_extract_vector(ctx, src, 0, v1);
4440 Temp coord2 = emit_extract_vector(ctx, src, 1, v1);
4441
4442 Builder bld(ctx->program, ctx->block);
4443
4444 if (dst.regClass() == v2b) {
4445 if (ctx->program->has_16bank_lds) {
4446 assert(ctx->options->chip_class <= GFX8);
4447 Builder::Result interp_p1 =
4448 bld.vintrp(aco_opcode::v_interp_mov_f32, bld.def(v1),
4449 Operand(2u) /* P0 */, bld.m0(prim_mask), idx, component);
4450 interp_p1 = bld.vintrp(aco_opcode::v_interp_p1lv_f16, bld.def(v2b),
4451 coord1, bld.m0(prim_mask), interp_p1, idx, component);
4452 bld.vintrp(aco_opcode::v_interp_p2_legacy_f16, Definition(dst), coord2,
4453 bld.m0(prim_mask), interp_p1, idx, component);
4454 } else {
4455 aco_opcode interp_p2_op = aco_opcode::v_interp_p2_f16;
4456
4457 if (ctx->options->chip_class == GFX8)
4458 interp_p2_op = aco_opcode::v_interp_p2_legacy_f16;
4459
4460 Builder::Result interp_p1 =
4461 bld.vintrp(aco_opcode::v_interp_p1ll_f16, bld.def(v1),
4462 coord1, bld.m0(prim_mask), idx, component);
4463 bld.vintrp(interp_p2_op, Definition(dst), coord2, bld.m0(prim_mask),
4464 interp_p1, idx, component);
4465 }
4466 } else {
4467 Builder::Result interp_p1 =
4468 bld.vintrp(aco_opcode::v_interp_p1_f32, bld.def(v1), coord1,
4469 bld.m0(prim_mask), idx, component);
4470
4471 if (ctx->program->has_16bank_lds)
4472 interp_p1.instr->operands[0].setLateKill(true);
4473
4474 bld.vintrp(aco_opcode::v_interp_p2_f32, Definition(dst), coord2,
4475 bld.m0(prim_mask), interp_p1, idx, component);
4476 }
4477 }
4478
4479 void emit_load_frag_coord(isel_context *ctx, Temp dst, unsigned num_components)
4480 {
4481 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1));
4482 for (unsigned i = 0; i < num_components; i++)
4483 vec->operands[i] = Operand(get_arg(ctx, ctx->args->ac.frag_pos[i]));
4484 if (G_0286CC_POS_W_FLOAT_ENA(ctx->program->config->spi_ps_input_ena)) {
4485 assert(num_components == 4);
4486 Builder bld(ctx->program, ctx->block);
4487 vec->operands[3] = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), get_arg(ctx, ctx->args->ac.frag_pos[3]));
4488 }
4489
4490 for (Operand& op : vec->operands)
4491 op = op.isUndefined() ? Operand(0u) : op;
4492
4493 vec->definitions[0] = Definition(dst);
4494 ctx->block->instructions.emplace_back(std::move(vec));
4495 emit_split_vector(ctx, dst, num_components);
4496 return;
4497 }
4498
4499 void visit_load_interpolated_input(isel_context *ctx, nir_intrinsic_instr *instr)
4500 {
4501 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4502 Temp coords = get_ssa_temp(ctx, instr->src[0].ssa);
4503 unsigned idx = nir_intrinsic_base(instr);
4504 unsigned component = nir_intrinsic_component(instr);
4505 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
4506
4507 nir_const_value* offset = nir_src_as_const_value(instr->src[1]);
4508 if (offset) {
4509 assert(offset->u32 == 0);
4510 } else {
4511 /* the lower 15bit of the prim_mask contain the offset into LDS
4512 * while the upper bits contain the number of prims */
4513 Temp offset_src = get_ssa_temp(ctx, instr->src[1].ssa);
4514 assert(offset_src.regClass() == s1 && "TODO: divergent offsets...");
4515 Builder bld(ctx->program, ctx->block);
4516 Temp stride = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), prim_mask, Operand(16u));
4517 stride = bld.sop1(aco_opcode::s_bcnt1_i32_b32, bld.def(s1), bld.def(s1, scc), stride);
4518 stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, Operand(48u));
4519 offset_src = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, offset_src);
4520 prim_mask = bld.sop2(aco_opcode::s_add_i32, bld.def(s1, m0), bld.def(s1, scc), offset_src, prim_mask);
4521 }
4522
4523 if (instr->dest.ssa.num_components == 1) {
4524 emit_interp_instr(ctx, idx, component, coords, dst, prim_mask);
4525 } else {
4526 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.ssa.num_components, 1));
4527 for (unsigned i = 0; i < instr->dest.ssa.num_components; i++)
4528 {
4529 Temp tmp = {ctx->program->allocateId(), v1};
4530 emit_interp_instr(ctx, idx, component+i, coords, tmp, prim_mask);
4531 vec->operands[i] = Operand(tmp);
4532 }
4533 vec->definitions[0] = Definition(dst);
4534 ctx->block->instructions.emplace_back(std::move(vec));
4535 }
4536 }
4537
4538 bool check_vertex_fetch_size(isel_context *ctx, const ac_data_format_info *vtx_info,
4539 unsigned offset, unsigned stride, unsigned channels)
4540 {
4541 unsigned vertex_byte_size = vtx_info->chan_byte_size * channels;
4542 if (vtx_info->chan_byte_size != 4 && channels == 3)
4543 return false;
4544 return (ctx->options->chip_class != GFX6 && ctx->options->chip_class != GFX10) ||
4545 (offset % vertex_byte_size == 0 && stride % vertex_byte_size == 0);
4546 }
4547
4548 uint8_t get_fetch_data_format(isel_context *ctx, const ac_data_format_info *vtx_info,
4549 unsigned offset, unsigned stride, unsigned *channels)
4550 {
4551 if (!vtx_info->chan_byte_size) {
4552 *channels = vtx_info->num_channels;
4553 return vtx_info->chan_format;
4554 }
4555
4556 unsigned num_channels = *channels;
4557 if (!check_vertex_fetch_size(ctx, vtx_info, offset, stride, *channels)) {
4558 unsigned new_channels = num_channels + 1;
4559 /* first, assume more loads is worse and try using a larger data format */
4560 while (new_channels <= 4 && !check_vertex_fetch_size(ctx, vtx_info, offset, stride, new_channels)) {
4561 new_channels++;
4562 /* don't make the attribute potentially out-of-bounds */
4563 if (offset + new_channels * vtx_info->chan_byte_size > stride)
4564 new_channels = 5;
4565 }
4566
4567 if (new_channels == 5) {
4568 /* then try decreasing load size (at the cost of more loads) */
4569 new_channels = *channels;
4570 while (new_channels > 1 && !check_vertex_fetch_size(ctx, vtx_info, offset, stride, new_channels))
4571 new_channels--;
4572 }
4573
4574 if (new_channels < *channels)
4575 *channels = new_channels;
4576 num_channels = new_channels;
4577 }
4578
4579 switch (vtx_info->chan_format) {
4580 case V_008F0C_BUF_DATA_FORMAT_8:
4581 return (uint8_t[]){V_008F0C_BUF_DATA_FORMAT_8, V_008F0C_BUF_DATA_FORMAT_8_8,
4582 V_008F0C_BUF_DATA_FORMAT_INVALID, V_008F0C_BUF_DATA_FORMAT_8_8_8_8}[num_channels - 1];
4583 case V_008F0C_BUF_DATA_FORMAT_16:
4584 return (uint8_t[]){V_008F0C_BUF_DATA_FORMAT_16, V_008F0C_BUF_DATA_FORMAT_16_16,
4585 V_008F0C_BUF_DATA_FORMAT_INVALID, V_008F0C_BUF_DATA_FORMAT_16_16_16_16}[num_channels - 1];
4586 case V_008F0C_BUF_DATA_FORMAT_32:
4587 return (uint8_t[]){V_008F0C_BUF_DATA_FORMAT_32, V_008F0C_BUF_DATA_FORMAT_32_32,
4588 V_008F0C_BUF_DATA_FORMAT_32_32_32, V_008F0C_BUF_DATA_FORMAT_32_32_32_32}[num_channels - 1];
4589 }
4590 unreachable("shouldn't reach here");
4591 return V_008F0C_BUF_DATA_FORMAT_INVALID;
4592 }
4593
4594 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
4595 * so we may need to fix it up. */
4596 Temp adjust_vertex_fetch_alpha(isel_context *ctx, unsigned adjustment, Temp alpha)
4597 {
4598 Builder bld(ctx->program, ctx->block);
4599
4600 if (adjustment == RADV_ALPHA_ADJUST_SSCALED)
4601 alpha = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), alpha);
4602
4603 /* For the integer-like cases, do a natural sign extension.
4604 *
4605 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
4606 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
4607 * exponent.
4608 */
4609 alpha = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(adjustment == RADV_ALPHA_ADJUST_SNORM ? 7u : 30u), alpha);
4610 alpha = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(30u), alpha);
4611
4612 /* Convert back to the right type. */
4613 if (adjustment == RADV_ALPHA_ADJUST_SNORM) {
4614 alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha);
4615 Temp clamp = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0xbf800000u), alpha);
4616 alpha = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xbf800000u), alpha, clamp);
4617 } else if (adjustment == RADV_ALPHA_ADJUST_SSCALED) {
4618 alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha);
4619 }
4620
4621 return alpha;
4622 }
4623
4624 void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr)
4625 {
4626 Builder bld(ctx->program, ctx->block);
4627 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4628 if (ctx->shader->info.stage == MESA_SHADER_VERTEX) {
4629
4630 nir_instr *off_instr = instr->src[0].ssa->parent_instr;
4631 if (off_instr->type != nir_instr_type_load_const) {
4632 fprintf(stderr, "Unimplemented nir_intrinsic_load_input offset\n");
4633 nir_print_instr(off_instr, stderr);
4634 fprintf(stderr, "\n");
4635 }
4636 uint32_t offset = nir_instr_as_load_const(off_instr)->value[0].u32;
4637
4638 Temp vertex_buffers = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->vertex_buffers));
4639
4640 unsigned location = nir_intrinsic_base(instr) / 4 - VERT_ATTRIB_GENERIC0 + offset;
4641 unsigned component = nir_intrinsic_component(instr);
4642 unsigned bitsize = instr->dest.ssa.bit_size;
4643 unsigned attrib_binding = ctx->options->key.vs.vertex_attribute_bindings[location];
4644 uint32_t attrib_offset = ctx->options->key.vs.vertex_attribute_offsets[location];
4645 uint32_t attrib_stride = ctx->options->key.vs.vertex_attribute_strides[location];
4646 unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[location];
4647
4648 unsigned dfmt = attrib_format & 0xf;
4649 unsigned nfmt = (attrib_format >> 4) & 0x7;
4650 const struct ac_data_format_info *vtx_info = ac_get_data_format_info(dfmt);
4651
4652 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa) << component;
4653 unsigned num_channels = MIN2(util_last_bit(mask), vtx_info->num_channels);
4654 unsigned alpha_adjust = (ctx->options->key.vs.alpha_adjust >> (location * 2)) & 3;
4655 bool post_shuffle = ctx->options->key.vs.post_shuffle & (1 << location);
4656 if (post_shuffle)
4657 num_channels = MAX2(num_channels, 3);
4658
4659 Operand off = bld.copy(bld.def(s1), Operand(attrib_binding * 16u));
4660 Temp list = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), vertex_buffers, off);
4661
4662 Temp index;
4663 if (ctx->options->key.vs.instance_rate_inputs & (1u << location)) {
4664 uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[location];
4665 Temp start_instance = get_arg(ctx, ctx->args->ac.start_instance);
4666 if (divisor) {
4667 Temp instance_id = get_arg(ctx, ctx->args->ac.instance_id);
4668 if (divisor != 1) {
4669 Temp divided = bld.tmp(v1);
4670 emit_v_div_u32(ctx, divided, as_vgpr(ctx, instance_id), divisor);
4671 index = bld.vadd32(bld.def(v1), start_instance, divided);
4672 } else {
4673 index = bld.vadd32(bld.def(v1), start_instance, instance_id);
4674 }
4675 } else {
4676 index = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), start_instance);
4677 }
4678 } else {
4679 index = bld.vadd32(bld.def(v1),
4680 get_arg(ctx, ctx->args->ac.base_vertex),
4681 get_arg(ctx, ctx->args->ac.vertex_id));
4682 }
4683
4684 Temp channels[num_channels];
4685 unsigned channel_start = 0;
4686 bool direct_fetch = false;
4687
4688 /* skip unused channels at the start */
4689 if (vtx_info->chan_byte_size && !post_shuffle) {
4690 channel_start = ffs(mask) - 1;
4691 for (unsigned i = 0; i < channel_start; i++)
4692 channels[i] = Temp(0, s1);
4693 } else if (vtx_info->chan_byte_size && post_shuffle && !(mask & 0x8)) {
4694 num_channels = 3 - (ffs(mask) - 1);
4695 }
4696
4697 /* load channels */
4698 while (channel_start < num_channels) {
4699 unsigned fetch_component = num_channels - channel_start;
4700 unsigned fetch_offset = attrib_offset + channel_start * vtx_info->chan_byte_size;
4701 bool expanded = false;
4702
4703 /* use MUBUF when possible to avoid possible alignment issues */
4704 /* TODO: we could use SDWA to unpack 8/16-bit attributes without extra instructions */
4705 bool use_mubuf = (nfmt == V_008F0C_BUF_NUM_FORMAT_FLOAT ||
4706 nfmt == V_008F0C_BUF_NUM_FORMAT_UINT ||
4707 nfmt == V_008F0C_BUF_NUM_FORMAT_SINT) &&
4708 vtx_info->chan_byte_size == 4;
4709 unsigned fetch_dfmt = V_008F0C_BUF_DATA_FORMAT_INVALID;
4710 if (!use_mubuf) {
4711 fetch_dfmt = get_fetch_data_format(ctx, vtx_info, fetch_offset, attrib_stride, &fetch_component);
4712 } else {
4713 if (fetch_component == 3 && ctx->options->chip_class == GFX6) {
4714 /* GFX6 only supports loading vec3 with MTBUF, expand to vec4. */
4715 fetch_component = 4;
4716 expanded = true;
4717 }
4718 }
4719
4720 unsigned fetch_bytes = fetch_component * bitsize / 8;
4721
4722 Temp fetch_index = index;
4723 if (attrib_stride != 0 && fetch_offset > attrib_stride) {
4724 fetch_index = bld.vadd32(bld.def(v1), Operand(fetch_offset / attrib_stride), fetch_index);
4725 fetch_offset = fetch_offset % attrib_stride;
4726 }
4727
4728 Operand soffset(0u);
4729 if (fetch_offset >= 4096) {
4730 soffset = bld.copy(bld.def(s1), Operand(fetch_offset / 4096 * 4096));
4731 fetch_offset %= 4096;
4732 }
4733
4734 aco_opcode opcode;
4735 switch (fetch_bytes) {
4736 case 2:
4737 assert(!use_mubuf && bitsize == 16);
4738 opcode = aco_opcode::tbuffer_load_format_d16_x;
4739 break;
4740 case 4:
4741 if (bitsize == 16) {
4742 assert(!use_mubuf);
4743 opcode = aco_opcode::tbuffer_load_format_d16_xy;
4744 } else {
4745 opcode = use_mubuf ? aco_opcode::buffer_load_dword : aco_opcode::tbuffer_load_format_x;
4746 }
4747 break;
4748 case 6:
4749 assert(!use_mubuf && bitsize == 16);
4750 opcode = aco_opcode::tbuffer_load_format_d16_xyz;
4751 break;
4752 case 8:
4753 if (bitsize == 16) {
4754 assert(!use_mubuf);
4755 opcode = aco_opcode::tbuffer_load_format_d16_xyzw;
4756 } else {
4757 opcode = use_mubuf ? aco_opcode::buffer_load_dwordx2 : aco_opcode::tbuffer_load_format_xy;
4758 }
4759 break;
4760 case 12:
4761 assert(ctx->options->chip_class >= GFX7 ||
4762 (!use_mubuf && ctx->options->chip_class == GFX6));
4763 opcode = use_mubuf ? aco_opcode::buffer_load_dwordx3 : aco_opcode::tbuffer_load_format_xyz;
4764 break;
4765 case 16:
4766 opcode = use_mubuf ? aco_opcode::buffer_load_dwordx4 : aco_opcode::tbuffer_load_format_xyzw;
4767 break;
4768 default:
4769 unreachable("Unimplemented load_input vector size");
4770 }
4771
4772 Temp fetch_dst;
4773 if (channel_start == 0 && fetch_bytes == dst.bytes() && !post_shuffle &&
4774 !expanded && (alpha_adjust == RADV_ALPHA_ADJUST_NONE ||
4775 num_channels <= 3)) {
4776 direct_fetch = true;
4777 fetch_dst = dst;
4778 } else {
4779 fetch_dst = bld.tmp(RegClass::get(RegType::vgpr, fetch_bytes));
4780 }
4781
4782 if (use_mubuf) {
4783 Instruction *mubuf = bld.mubuf(opcode,
4784 Definition(fetch_dst), list, fetch_index, soffset,
4785 fetch_offset, false, true).instr;
4786 static_cast<MUBUF_instruction*>(mubuf)->can_reorder = true;
4787 } else {
4788 Instruction *mtbuf = bld.mtbuf(opcode,
4789 Definition(fetch_dst), list, fetch_index, soffset,
4790 fetch_dfmt, nfmt, fetch_offset, false, true).instr;
4791 static_cast<MTBUF_instruction*>(mtbuf)->can_reorder = true;
4792 }
4793
4794 emit_split_vector(ctx, fetch_dst, fetch_dst.size());
4795
4796 if (fetch_component == 1) {
4797 channels[channel_start] = fetch_dst;
4798 } else {
4799 for (unsigned i = 0; i < MIN2(fetch_component, num_channels - channel_start); i++)
4800 channels[channel_start + i] = emit_extract_vector(ctx, fetch_dst, i,
4801 bitsize == 16 ? v2b : v1);
4802 }
4803
4804 channel_start += fetch_component;
4805 }
4806
4807 if (!direct_fetch) {
4808 bool is_float = nfmt != V_008F0C_BUF_NUM_FORMAT_UINT &&
4809 nfmt != V_008F0C_BUF_NUM_FORMAT_SINT;
4810
4811 static const unsigned swizzle_normal[4] = {0, 1, 2, 3};
4812 static const unsigned swizzle_post_shuffle[4] = {2, 1, 0, 3};
4813 const unsigned *swizzle = post_shuffle ? swizzle_post_shuffle : swizzle_normal;
4814
4815 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
4816 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
4817 unsigned num_temp = 0;
4818 for (unsigned i = 0; i < dst.size(); i++) {
4819 unsigned idx = i + component;
4820 if (swizzle[idx] < num_channels && channels[swizzle[idx]].id()) {
4821 Temp channel = channels[swizzle[idx]];
4822 if (idx == 3 && alpha_adjust != RADV_ALPHA_ADJUST_NONE)
4823 channel = adjust_vertex_fetch_alpha(ctx, alpha_adjust, channel);
4824 vec->operands[i] = Operand(channel);
4825
4826 num_temp++;
4827 elems[i] = channel;
4828 } else if (is_float && idx == 3) {
4829 vec->operands[i] = Operand(0x3f800000u);
4830 } else if (!is_float && idx == 3) {
4831 vec->operands[i] = Operand(1u);
4832 } else {
4833 vec->operands[i] = Operand(0u);
4834 }
4835 }
4836 vec->definitions[0] = Definition(dst);
4837 ctx->block->instructions.emplace_back(std::move(vec));
4838 emit_split_vector(ctx, dst, dst.size());
4839
4840 if (num_temp == dst.size())
4841 ctx->allocated_vec.emplace(dst.id(), elems);
4842 }
4843 } else if (ctx->shader->info.stage == MESA_SHADER_FRAGMENT) {
4844 unsigned offset_idx = instr->intrinsic == nir_intrinsic_load_input ? 0 : 1;
4845 nir_instr *off_instr = instr->src[offset_idx].ssa->parent_instr;
4846 if (off_instr->type != nir_instr_type_load_const ||
4847 nir_instr_as_load_const(off_instr)->value[0].u32 != 0) {
4848 fprintf(stderr, "Unimplemented nir_intrinsic_load_input offset\n");
4849 nir_print_instr(off_instr, stderr);
4850 fprintf(stderr, "\n");
4851 }
4852
4853 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
4854 nir_const_value* offset = nir_src_as_const_value(instr->src[offset_idx]);
4855 if (offset) {
4856 assert(offset->u32 == 0);
4857 } else {
4858 /* the lower 15bit of the prim_mask contain the offset into LDS
4859 * while the upper bits contain the number of prims */
4860 Temp offset_src = get_ssa_temp(ctx, instr->src[offset_idx].ssa);
4861 assert(offset_src.regClass() == s1 && "TODO: divergent offsets...");
4862 Builder bld(ctx->program, ctx->block);
4863 Temp stride = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), prim_mask, Operand(16u));
4864 stride = bld.sop1(aco_opcode::s_bcnt1_i32_b32, bld.def(s1), bld.def(s1, scc), stride);
4865 stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, Operand(48u));
4866 offset_src = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, offset_src);
4867 prim_mask = bld.sop2(aco_opcode::s_add_i32, bld.def(s1, m0), bld.def(s1, scc), offset_src, prim_mask);
4868 }
4869
4870 unsigned idx = nir_intrinsic_base(instr);
4871 unsigned component = nir_intrinsic_component(instr);
4872 unsigned vertex_id = 2; /* P0 */
4873
4874 if (instr->intrinsic == nir_intrinsic_load_input_vertex) {
4875 nir_const_value* src0 = nir_src_as_const_value(instr->src[0]);
4876 switch (src0->u32) {
4877 case 0:
4878 vertex_id = 2; /* P0 */
4879 break;
4880 case 1:
4881 vertex_id = 0; /* P10 */
4882 break;
4883 case 2:
4884 vertex_id = 1; /* P20 */
4885 break;
4886 default:
4887 unreachable("invalid vertex index");
4888 }
4889 }
4890
4891 if (dst.size() == 1) {
4892 bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(dst), Operand(vertex_id), bld.m0(prim_mask), idx, component);
4893 } else {
4894 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
4895 for (unsigned i = 0; i < dst.size(); i++)
4896 vec->operands[i] = bld.vintrp(aco_opcode::v_interp_mov_f32, bld.def(v1), Operand(vertex_id), bld.m0(prim_mask), idx, component + i);
4897 vec->definitions[0] = Definition(dst);
4898 bld.insert(std::move(vec));
4899 }
4900
4901 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4902 Temp ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
4903 Temp soffset = get_arg(ctx, ctx->args->oc_lds);
4904 std::pair<Temp, unsigned> offs = get_tcs_per_patch_output_vmem_offset(ctx, instr);
4905 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8u;
4906
4907 load_vmem_mubuf(ctx, dst, ring, offs.first, soffset, offs.second, elem_size_bytes, instr->dest.ssa.num_components);
4908 } else {
4909 unreachable("Shader stage not implemented");
4910 }
4911 }
4912
4913 std::pair<Temp, unsigned> get_gs_per_vertex_input_offset(isel_context *ctx, nir_intrinsic_instr *instr, unsigned base_stride = 1u)
4914 {
4915 assert(ctx->shader->info.stage == MESA_SHADER_GEOMETRY);
4916
4917 Builder bld(ctx->program, ctx->block);
4918 nir_src *vertex_src = nir_get_io_vertex_index_src(instr);
4919 Temp vertex_offset;
4920
4921 if (!nir_src_is_const(*vertex_src)) {
4922 /* better code could be created, but this case probably doesn't happen
4923 * much in practice */
4924 Temp indirect_vertex = as_vgpr(ctx, get_ssa_temp(ctx, vertex_src->ssa));
4925 for (unsigned i = 0; i < ctx->shader->info.gs.vertices_in; i++) {
4926 Temp elem;
4927
4928 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
4929 elem = get_arg(ctx, ctx->args->gs_vtx_offset[i / 2u * 2u]);
4930 if (i % 2u)
4931 elem = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), Operand(16u), elem);
4932 } else {
4933 elem = get_arg(ctx, ctx->args->gs_vtx_offset[i]);
4934 }
4935
4936 if (vertex_offset.id()) {
4937 Temp cond = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.hint_vcc(bld.def(bld.lm)),
4938 Operand(i), indirect_vertex);
4939 vertex_offset = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), vertex_offset, elem, cond);
4940 } else {
4941 vertex_offset = elem;
4942 }
4943 }
4944
4945 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs)
4946 vertex_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu), vertex_offset);
4947 } else {
4948 unsigned vertex = nir_src_as_uint(*vertex_src);
4949 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs)
4950 vertex_offset = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
4951 get_arg(ctx, ctx->args->gs_vtx_offset[vertex / 2u * 2u]),
4952 Operand((vertex % 2u) * 16u), Operand(16u));
4953 else
4954 vertex_offset = get_arg(ctx, ctx->args->gs_vtx_offset[vertex]);
4955 }
4956
4957 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr, base_stride);
4958 offs = offset_add(ctx, offs, std::make_pair(vertex_offset, 0u));
4959 return offset_mul(ctx, offs, 4u);
4960 }
4961
4962 void visit_load_gs_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
4963 {
4964 assert(ctx->shader->info.stage == MESA_SHADER_GEOMETRY);
4965
4966 Builder bld(ctx->program, ctx->block);
4967 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4968 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
4969
4970 if (ctx->stage == geometry_gs) {
4971 std::pair<Temp, unsigned> offs = get_gs_per_vertex_input_offset(ctx, instr, ctx->program->wave_size);
4972 Temp ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_ESGS_GS * 16u));
4973 load_vmem_mubuf(ctx, dst, ring, offs.first, Temp(), offs.second, elem_size_bytes, instr->dest.ssa.num_components, 4u * ctx->program->wave_size, false, true);
4974 } else if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
4975 std::pair<Temp, unsigned> offs = get_gs_per_vertex_input_offset(ctx, instr);
4976 unsigned lds_align = calculate_lds_alignment(ctx, offs.second);
4977 load_lds(ctx, elem_size_bytes, dst, offs.first, offs.second, lds_align);
4978 } else {
4979 unreachable("Unsupported GS stage.");
4980 }
4981 }
4982
4983 void visit_load_tcs_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
4984 {
4985 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4986
4987 Builder bld(ctx->program, ctx->block);
4988 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4989
4990 if (load_input_from_temps(ctx, instr, dst))
4991 return;
4992
4993 std::pair<Temp, unsigned> offs = get_tcs_per_vertex_input_lds_offset(ctx, instr);
4994 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
4995 unsigned lds_align = calculate_lds_alignment(ctx, offs.second);
4996
4997 load_lds(ctx, elem_size_bytes, dst, offs.first, offs.second, lds_align);
4998 }
4999
5000 void visit_load_tes_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
5001 {
5002 assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
5003
5004 Builder bld(ctx->program, ctx->block);
5005
5006 Temp ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
5007 Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
5008 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5009
5010 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
5011 std::pair<Temp, unsigned> offs = get_tcs_per_vertex_output_vmem_offset(ctx, instr);
5012
5013 load_vmem_mubuf(ctx, dst, ring, offs.first, oc_lds, offs.second, elem_size_bytes, instr->dest.ssa.num_components, 0u, true, true);
5014 }
5015
5016 void visit_load_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
5017 {
5018 switch (ctx->shader->info.stage) {
5019 case MESA_SHADER_GEOMETRY:
5020 visit_load_gs_per_vertex_input(ctx, instr);
5021 break;
5022 case MESA_SHADER_TESS_CTRL:
5023 visit_load_tcs_per_vertex_input(ctx, instr);
5024 break;
5025 case MESA_SHADER_TESS_EVAL:
5026 visit_load_tes_per_vertex_input(ctx, instr);
5027 break;
5028 default:
5029 unreachable("Unimplemented shader stage");
5030 }
5031 }
5032
5033 void visit_load_per_vertex_output(isel_context *ctx, nir_intrinsic_instr *instr)
5034 {
5035 visit_load_tcs_output(ctx, instr, true);
5036 }
5037
5038 void visit_store_per_vertex_output(isel_context *ctx, nir_intrinsic_instr *instr)
5039 {
5040 assert(ctx->stage == tess_control_hs || ctx->stage == vertex_tess_control_hs);
5041 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
5042
5043 visit_store_tcs_output(ctx, instr, true);
5044 }
5045
5046 void visit_load_tess_coord(isel_context *ctx, nir_intrinsic_instr *instr)
5047 {
5048 assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
5049
5050 Builder bld(ctx->program, ctx->block);
5051 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5052
5053 Operand tes_u(get_arg(ctx, ctx->args->tes_u));
5054 Operand tes_v(get_arg(ctx, ctx->args->tes_v));
5055 Operand tes_w(0u);
5056
5057 if (ctx->shader->info.tess.primitive_mode == GL_TRIANGLES) {
5058 Temp tmp = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), tes_u, tes_v);
5059 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand(0x3f800000u /* 1.0f */), tmp);
5060 tes_w = Operand(tmp);
5061 }
5062
5063 Temp tess_coord = bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tes_u, tes_v, tes_w);
5064 emit_split_vector(ctx, tess_coord, 3);
5065 }
5066
5067 Temp load_desc_ptr(isel_context *ctx, unsigned desc_set)
5068 {
5069 if (ctx->program->info->need_indirect_descriptor_sets) {
5070 Builder bld(ctx->program, ctx->block);
5071 Temp ptr64 = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->descriptor_sets[0]));
5072 Operand off = bld.copy(bld.def(s1), Operand(desc_set << 2));
5073 return bld.smem(aco_opcode::s_load_dword, bld.def(s1), ptr64, off);//, false, false, false);
5074 }
5075
5076 return get_arg(ctx, ctx->args->descriptor_sets[desc_set]);
5077 }
5078
5079
5080 void visit_load_resource(isel_context *ctx, nir_intrinsic_instr *instr)
5081 {
5082 Builder bld(ctx->program, ctx->block);
5083 Temp index = get_ssa_temp(ctx, instr->src[0].ssa);
5084 if (!nir_dest_is_divergent(instr->dest))
5085 index = bld.as_uniform(index);
5086 unsigned desc_set = nir_intrinsic_desc_set(instr);
5087 unsigned binding = nir_intrinsic_binding(instr);
5088
5089 Temp desc_ptr;
5090 radv_pipeline_layout *pipeline_layout = ctx->options->layout;
5091 radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
5092 unsigned offset = layout->binding[binding].offset;
5093 unsigned stride;
5094 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
5095 layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
5096 unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
5097 desc_ptr = get_arg(ctx, ctx->args->ac.push_constants);
5098 offset = pipeline_layout->push_constant_size + 16 * idx;
5099 stride = 16;
5100 } else {
5101 desc_ptr = load_desc_ptr(ctx, desc_set);
5102 stride = layout->binding[binding].size;
5103 }
5104
5105 nir_const_value* nir_const_index = nir_src_as_const_value(instr->src[0]);
5106 unsigned const_index = nir_const_index ? nir_const_index->u32 : 0;
5107 if (stride != 1) {
5108 if (nir_const_index) {
5109 const_index = const_index * stride;
5110 } else if (index.type() == RegType::vgpr) {
5111 bool index24bit = layout->binding[binding].array_size <= 0x1000000;
5112 index = bld.v_mul_imm(bld.def(v1), index, stride, index24bit);
5113 } else {
5114 index = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), Operand(index));
5115 }
5116 }
5117 if (offset) {
5118 if (nir_const_index) {
5119 const_index = const_index + offset;
5120 } else if (index.type() == RegType::vgpr) {
5121 index = bld.vadd32(bld.def(v1), Operand(offset), index);
5122 } else {
5123 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), Operand(index));
5124 }
5125 }
5126
5127 if (nir_const_index && const_index == 0) {
5128 index = desc_ptr;
5129 } else if (index.type() == RegType::vgpr) {
5130 index = bld.vadd32(bld.def(v1),
5131 nir_const_index ? Operand(const_index) : Operand(index),
5132 Operand(desc_ptr));
5133 } else {
5134 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
5135 nir_const_index ? Operand(const_index) : Operand(index),
5136 Operand(desc_ptr));
5137 }
5138
5139 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), index);
5140 }
5141
5142 void load_buffer(isel_context *ctx, unsigned num_components, unsigned component_size,
5143 Temp dst, Temp rsrc, Temp offset, unsigned align_mul, unsigned align_offset,
5144 bool glc=false, bool readonly=true)
5145 {
5146 Builder bld(ctx->program, ctx->block);
5147
5148 bool use_smem = dst.type() != RegType::vgpr && ((ctx->options->chip_class >= GFX8 && component_size >= 4) || readonly);
5149 if (use_smem)
5150 offset = bld.as_uniform(offset);
5151
5152 LoadEmitInfo info = {Operand(offset), dst, num_components, component_size, rsrc};
5153 info.glc = glc;
5154 info.barrier = readonly ? barrier_none : barrier_buffer;
5155 info.can_reorder = readonly;
5156 info.align_mul = align_mul;
5157 info.align_offset = align_offset;
5158 if (use_smem)
5159 emit_smem_load(ctx, bld, &info);
5160 else
5161 emit_mubuf_load(ctx, bld, &info);
5162 }
5163
5164 void visit_load_ubo(isel_context *ctx, nir_intrinsic_instr *instr)
5165 {
5166 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5167 Temp rsrc = get_ssa_temp(ctx, instr->src[0].ssa);
5168
5169 Builder bld(ctx->program, ctx->block);
5170
5171 nir_intrinsic_instr* idx_instr = nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
5172 unsigned desc_set = nir_intrinsic_desc_set(idx_instr);
5173 unsigned binding = nir_intrinsic_binding(idx_instr);
5174 radv_descriptor_set_layout *layout = ctx->options->layout->set[desc_set].layout;
5175
5176 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
5177 uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
5178 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
5179 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
5180 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
5181 if (ctx->options->chip_class >= GFX10) {
5182 desc_type |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
5183 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
5184 S_008F0C_RESOURCE_LEVEL(1);
5185 } else {
5186 desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5187 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
5188 }
5189 Temp upper_dwords = bld.pseudo(aco_opcode::p_create_vector, bld.def(s3),
5190 Operand(S_008F04_BASE_ADDRESS_HI(ctx->options->address32_hi)),
5191 Operand(0xFFFFFFFFu),
5192 Operand(desc_type));
5193 rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
5194 rsrc, upper_dwords);
5195 } else {
5196 rsrc = convert_pointer_to_64_bit(ctx, rsrc);
5197 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
5198 }
5199 unsigned size = instr->dest.ssa.bit_size / 8;
5200 load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
5201 nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr));
5202 }
5203
5204 void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr)
5205 {
5206 Builder bld(ctx->program, ctx->block);
5207 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5208 unsigned offset = nir_intrinsic_base(instr);
5209 unsigned count = instr->dest.ssa.num_components;
5210 nir_const_value *index_cv = nir_src_as_const_value(instr->src[0]);
5211
5212 if (index_cv && instr->dest.ssa.bit_size == 32) {
5213 unsigned start = (offset + index_cv->u32) / 4u;
5214 start -= ctx->args->ac.base_inline_push_consts;
5215 if (start + count <= ctx->args->ac.num_inline_push_consts) {
5216 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
5217 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
5218 for (unsigned i = 0; i < count; ++i) {
5219 elems[i] = get_arg(ctx, ctx->args->ac.inline_push_consts[start + i]);
5220 vec->operands[i] = Operand{elems[i]};
5221 }
5222 vec->definitions[0] = Definition(dst);
5223 ctx->block->instructions.emplace_back(std::move(vec));
5224 ctx->allocated_vec.emplace(dst.id(), elems);
5225 return;
5226 }
5227 }
5228
5229 Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
5230 if (offset != 0) // TODO check if index != 0 as well
5231 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index);
5232 Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants));
5233 Temp vec = dst;
5234 bool trim = false;
5235 bool aligned = true;
5236
5237 if (instr->dest.ssa.bit_size == 8) {
5238 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
5239 bool fits_in_dword = count == 1 || (index_cv && ((offset + index_cv->u32) % 4 + count) <= 4);
5240 if (!aligned)
5241 vec = fits_in_dword ? bld.tmp(s1) : bld.tmp(s2);
5242 } else if (instr->dest.ssa.bit_size == 16) {
5243 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
5244 if (!aligned)
5245 vec = count == 4 ? bld.tmp(s4) : count > 1 ? bld.tmp(s2) : bld.tmp(s1);
5246 }
5247
5248 aco_opcode op;
5249
5250 switch (vec.size()) {
5251 case 1:
5252 op = aco_opcode::s_load_dword;
5253 break;
5254 case 2:
5255 op = aco_opcode::s_load_dwordx2;
5256 break;
5257 case 3:
5258 vec = bld.tmp(s4);
5259 trim = true;
5260 case 4:
5261 op = aco_opcode::s_load_dwordx4;
5262 break;
5263 case 6:
5264 vec = bld.tmp(s8);
5265 trim = true;
5266 case 8:
5267 op = aco_opcode::s_load_dwordx8;
5268 break;
5269 default:
5270 unreachable("unimplemented or forbidden load_push_constant.");
5271 }
5272
5273 bld.smem(op, Definition(vec), ptr, index);
5274
5275 if (!aligned) {
5276 Operand byte_offset = index_cv ? Operand((offset + index_cv->u32) % 4) : Operand(index);
5277 byte_align_scalar(ctx, vec, byte_offset, dst);
5278 return;
5279 }
5280
5281 if (trim) {
5282 emit_split_vector(ctx, vec, 4);
5283 RegClass rc = dst.size() == 3 ? s1 : s2;
5284 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
5285 emit_extract_vector(ctx, vec, 0, rc),
5286 emit_extract_vector(ctx, vec, 1, rc),
5287 emit_extract_vector(ctx, vec, 2, rc));
5288
5289 }
5290 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
5291 }
5292
5293 void visit_load_constant(isel_context *ctx, nir_intrinsic_instr *instr)
5294 {
5295 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5296
5297 Builder bld(ctx->program, ctx->block);
5298
5299 uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
5300 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
5301 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
5302 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
5303 if (ctx->options->chip_class >= GFX10) {
5304 desc_type |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
5305 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
5306 S_008F0C_RESOURCE_LEVEL(1);
5307 } else {
5308 desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5309 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
5310 }
5311
5312 unsigned base = nir_intrinsic_base(instr);
5313 unsigned range = nir_intrinsic_range(instr);
5314
5315 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
5316 if (base && offset.type() == RegType::sgpr)
5317 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset, Operand(base));
5318 else if (base && offset.type() == RegType::vgpr)
5319 offset = bld.vadd32(bld.def(v1), Operand(base), offset);
5320
5321 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
5322 bld.sop1(aco_opcode::p_constaddr, bld.def(s2), bld.def(s1, scc), Operand(ctx->constant_data_offset)),
5323 Operand(MIN2(base + range, ctx->shader->constant_data_size)),
5324 Operand(desc_type));
5325 unsigned size = instr->dest.ssa.bit_size / 8;
5326 // TODO: get alignment information for subdword constants
5327 load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, size, 0);
5328 }
5329
5330 void visit_discard_if(isel_context *ctx, nir_intrinsic_instr *instr)
5331 {
5332 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
5333 ctx->cf_info.exec_potentially_empty_discard = true;
5334
5335 ctx->program->needs_exact = true;
5336
5337 // TODO: optimize uniform conditions
5338 Builder bld(ctx->program, ctx->block);
5339 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
5340 assert(src.regClass() == bld.lm);
5341 src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
5342 bld.pseudo(aco_opcode::p_discard_if, src);
5343 ctx->block->kind |= block_kind_uses_discard_if;
5344 return;
5345 }
5346
5347 void visit_discard(isel_context* ctx, nir_intrinsic_instr *instr)
5348 {
5349 Builder bld(ctx->program, ctx->block);
5350
5351 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
5352 ctx->cf_info.exec_potentially_empty_discard = true;
5353
5354 bool divergent = ctx->cf_info.parent_if.is_divergent ||
5355 ctx->cf_info.parent_loop.has_divergent_continue;
5356
5357 if (ctx->block->loop_nest_depth &&
5358 ((nir_instr_is_last(&instr->instr) && !divergent) || divergent)) {
5359 /* we handle discards the same way as jump instructions */
5360 append_logical_end(ctx->block);
5361
5362 /* in loops, discard behaves like break */
5363 Block *linear_target = ctx->cf_info.parent_loop.exit;
5364 ctx->block->kind |= block_kind_discard;
5365
5366 if (!divergent) {
5367 /* uniform discard - loop ends here */
5368 assert(nir_instr_is_last(&instr->instr));
5369 ctx->block->kind |= block_kind_uniform;
5370 ctx->cf_info.has_branch = true;
5371 bld.branch(aco_opcode::p_branch);
5372 add_linear_edge(ctx->block->index, linear_target);
5373 return;
5374 }
5375
5376 /* we add a break right behind the discard() instructions */
5377 ctx->block->kind |= block_kind_break;
5378 unsigned idx = ctx->block->index;
5379
5380 ctx->cf_info.parent_loop.has_divergent_branch = true;
5381 ctx->cf_info.nir_to_aco[instr->instr.block->index] = idx;
5382
5383 /* remove critical edges from linear CFG */
5384 bld.branch(aco_opcode::p_branch);
5385 Block* break_block = ctx->program->create_and_insert_block();
5386 break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
5387 break_block->kind |= block_kind_uniform;
5388 add_linear_edge(idx, break_block);
5389 add_linear_edge(break_block->index, linear_target);
5390 bld.reset(break_block);
5391 bld.branch(aco_opcode::p_branch);
5392
5393 Block* continue_block = ctx->program->create_and_insert_block();
5394 continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
5395 add_linear_edge(idx, continue_block);
5396 append_logical_start(continue_block);
5397 ctx->block = continue_block;
5398
5399 return;
5400 }
5401
5402 /* it can currently happen that NIR doesn't remove the unreachable code */
5403 if (!nir_instr_is_last(&instr->instr)) {
5404 ctx->program->needs_exact = true;
5405 /* save exec somewhere temporarily so that it doesn't get
5406 * overwritten before the discard from outer exec masks */
5407 Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), Operand(0xFFFFFFFF), Operand(exec, bld.lm));
5408 bld.pseudo(aco_opcode::p_discard_if, cond);
5409 ctx->block->kind |= block_kind_uses_discard_if;
5410 return;
5411 }
5412
5413 /* This condition is incorrect for uniformly branched discards in a loop
5414 * predicated by a divergent condition, but the above code catches that case
5415 * and the discard would end up turning into a discard_if.
5416 * For example:
5417 * if (divergent) {
5418 * while (...) {
5419 * if (uniform) {
5420 * discard;
5421 * }
5422 * }
5423 * }
5424 */
5425 if (!ctx->cf_info.parent_if.is_divergent) {
5426 /* program just ends here */
5427 ctx->block->kind |= block_kind_uniform;
5428 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
5429 0 /* enabled mask */, 9 /* dest */,
5430 false /* compressed */, true/* done */, true /* valid mask */);
5431 bld.sopp(aco_opcode::s_endpgm);
5432 // TODO: it will potentially be followed by a branch which is dead code to sanitize NIR phis
5433 } else {
5434 ctx->block->kind |= block_kind_discard;
5435 /* branch and linear edge is added by visit_if() */
5436 }
5437 }
5438
5439 enum aco_descriptor_type {
5440 ACO_DESC_IMAGE,
5441 ACO_DESC_FMASK,
5442 ACO_DESC_SAMPLER,
5443 ACO_DESC_BUFFER,
5444 ACO_DESC_PLANE_0,
5445 ACO_DESC_PLANE_1,
5446 ACO_DESC_PLANE_2,
5447 };
5448
5449 static bool
5450 should_declare_array(isel_context *ctx, enum glsl_sampler_dim sampler_dim, bool is_array) {
5451 if (sampler_dim == GLSL_SAMPLER_DIM_BUF)
5452 return false;
5453 ac_image_dim dim = ac_get_sampler_dim(ctx->options->chip_class, sampler_dim, is_array);
5454 return dim == ac_image_cube ||
5455 dim == ac_image_1darray ||
5456 dim == ac_image_2darray ||
5457 dim == ac_image_2darraymsaa;
5458 }
5459
5460 Temp get_sampler_desc(isel_context *ctx, nir_deref_instr *deref_instr,
5461 enum aco_descriptor_type desc_type,
5462 const nir_tex_instr *tex_instr, bool image, bool write)
5463 {
5464 /* FIXME: we should lower the deref with some new nir_intrinsic_load_desc
5465 std::unordered_map<uint64_t, Temp>::iterator it = ctx->tex_desc.find((uint64_t) desc_type << 32 | deref_instr->dest.ssa.index);
5466 if (it != ctx->tex_desc.end())
5467 return it->second;
5468 */
5469 Temp index = Temp();
5470 bool index_set = false;
5471 unsigned constant_index = 0;
5472 unsigned descriptor_set;
5473 unsigned base_index;
5474 Builder bld(ctx->program, ctx->block);
5475
5476 if (!deref_instr) {
5477 assert(tex_instr && !image);
5478 descriptor_set = 0;
5479 base_index = tex_instr->sampler_index;
5480 } else {
5481 while(deref_instr->deref_type != nir_deref_type_var) {
5482 unsigned array_size = glsl_get_aoa_size(deref_instr->type);
5483 if (!array_size)
5484 array_size = 1;
5485
5486 assert(deref_instr->deref_type == nir_deref_type_array);
5487 nir_const_value *const_value = nir_src_as_const_value(deref_instr->arr.index);
5488 if (const_value) {
5489 constant_index += array_size * const_value->u32;
5490 } else {
5491 Temp indirect = get_ssa_temp(ctx, deref_instr->arr.index.ssa);
5492 if (indirect.type() == RegType::vgpr)
5493 indirect = bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), indirect);
5494
5495 if (array_size != 1)
5496 indirect = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(array_size), indirect);
5497
5498 if (!index_set) {
5499 index = indirect;
5500 index_set = true;
5501 } else {
5502 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), index, indirect);
5503 }
5504 }
5505
5506 deref_instr = nir_src_as_deref(deref_instr->parent);
5507 }
5508 descriptor_set = deref_instr->var->data.descriptor_set;
5509 base_index = deref_instr->var->data.binding;
5510 }
5511
5512 Temp list = load_desc_ptr(ctx, descriptor_set);
5513 list = convert_pointer_to_64_bit(ctx, list);
5514
5515 struct radv_descriptor_set_layout *layout = ctx->options->layout->set[descriptor_set].layout;
5516 struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
5517 unsigned offset = binding->offset;
5518 unsigned stride = binding->size;
5519 aco_opcode opcode;
5520 RegClass type;
5521
5522 assert(base_index < layout->binding_count);
5523
5524 switch (desc_type) {
5525 case ACO_DESC_IMAGE:
5526 type = s8;
5527 opcode = aco_opcode::s_load_dwordx8;
5528 break;
5529 case ACO_DESC_FMASK:
5530 type = s8;
5531 opcode = aco_opcode::s_load_dwordx8;
5532 offset += 32;
5533 break;
5534 case ACO_DESC_SAMPLER:
5535 type = s4;
5536 opcode = aco_opcode::s_load_dwordx4;
5537 if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
5538 offset += radv_combined_image_descriptor_sampler_offset(binding);
5539 break;
5540 case ACO_DESC_BUFFER:
5541 type = s4;
5542 opcode = aco_opcode::s_load_dwordx4;
5543 break;
5544 case ACO_DESC_PLANE_0:
5545 case ACO_DESC_PLANE_1:
5546 type = s8;
5547 opcode = aco_opcode::s_load_dwordx8;
5548 offset += 32 * (desc_type - ACO_DESC_PLANE_0);
5549 break;
5550 case ACO_DESC_PLANE_2:
5551 type = s4;
5552 opcode = aco_opcode::s_load_dwordx4;
5553 offset += 64;
5554 break;
5555 default:
5556 unreachable("invalid desc_type\n");
5557 }
5558
5559 offset += constant_index * stride;
5560
5561 if (desc_type == ACO_DESC_SAMPLER && binding->immutable_samplers_offset &&
5562 (!index_set || binding->immutable_samplers_equal)) {
5563 if (binding->immutable_samplers_equal)
5564 constant_index = 0;
5565
5566 const uint32_t *samplers = radv_immutable_samplers(layout, binding);
5567 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
5568 Operand(samplers[constant_index * 4 + 0]),
5569 Operand(samplers[constant_index * 4 + 1]),
5570 Operand(samplers[constant_index * 4 + 2]),
5571 Operand(samplers[constant_index * 4 + 3]));
5572 }
5573
5574 Operand off;
5575 if (!index_set) {
5576 off = bld.copy(bld.def(s1), Operand(offset));
5577 } else {
5578 off = Operand((Temp)bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset),
5579 bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), index)));
5580 }
5581
5582 Temp res = bld.smem(opcode, bld.def(type), list, off);
5583
5584 if (desc_type == ACO_DESC_PLANE_2) {
5585 Temp components[8];
5586 for (unsigned i = 0; i < 8; i++)
5587 components[i] = bld.tmp(s1);
5588 bld.pseudo(aco_opcode::p_split_vector,
5589 Definition(components[0]),
5590 Definition(components[1]),
5591 Definition(components[2]),
5592 Definition(components[3]),
5593 res);
5594
5595 Temp desc2 = get_sampler_desc(ctx, deref_instr, ACO_DESC_PLANE_1, tex_instr, image, write);
5596 bld.pseudo(aco_opcode::p_split_vector,
5597 bld.def(s1), bld.def(s1), bld.def(s1), bld.def(s1),
5598 Definition(components[4]),
5599 Definition(components[5]),
5600 Definition(components[6]),
5601 Definition(components[7]),
5602 desc2);
5603
5604 res = bld.pseudo(aco_opcode::p_create_vector, bld.def(s8),
5605 components[0], components[1], components[2], components[3],
5606 components[4], components[5], components[6], components[7]);
5607 }
5608
5609 return res;
5610 }
5611
5612 static int image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
5613 {
5614 switch (dim) {
5615 case GLSL_SAMPLER_DIM_BUF:
5616 return 1;
5617 case GLSL_SAMPLER_DIM_1D:
5618 return array ? 2 : 1;
5619 case GLSL_SAMPLER_DIM_2D:
5620 return array ? 3 : 2;
5621 case GLSL_SAMPLER_DIM_MS:
5622 return array ? 4 : 3;
5623 case GLSL_SAMPLER_DIM_3D:
5624 case GLSL_SAMPLER_DIM_CUBE:
5625 return 3;
5626 case GLSL_SAMPLER_DIM_RECT:
5627 case GLSL_SAMPLER_DIM_SUBPASS:
5628 return 2;
5629 case GLSL_SAMPLER_DIM_SUBPASS_MS:
5630 return 3;
5631 default:
5632 break;
5633 }
5634 return 0;
5635 }
5636
5637
5638 /* Adjust the sample index according to FMASK.
5639 *
5640 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
5641 * which is the identity mapping. Each nibble says which physical sample
5642 * should be fetched to get that sample.
5643 *
5644 * For example, 0x11111100 means there are only 2 samples stored and
5645 * the second sample covers 3/4 of the pixel. When reading samples 0
5646 * and 1, return physical sample 0 (determined by the first two 0s
5647 * in FMASK), otherwise return physical sample 1.
5648 *
5649 * The sample index should be adjusted as follows:
5650 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
5651 */
5652 static Temp adjust_sample_index_using_fmask(isel_context *ctx, bool da, std::vector<Temp>& coords, Operand sample_index, Temp fmask_desc_ptr)
5653 {
5654 Builder bld(ctx->program, ctx->block);
5655 Temp fmask = bld.tmp(v1);
5656 unsigned dim = ctx->options->chip_class >= GFX10
5657 ? ac_get_sampler_dim(ctx->options->chip_class, GLSL_SAMPLER_DIM_2D, da)
5658 : 0;
5659
5660 Temp coord = da ? bld.pseudo(aco_opcode::p_create_vector, bld.def(v3), coords[0], coords[1], coords[2]) :
5661 bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), coords[0], coords[1]);
5662 aco_ptr<MIMG_instruction> load{create_instruction<MIMG_instruction>(aco_opcode::image_load, Format::MIMG, 3, 1)};
5663 load->operands[0] = Operand(fmask_desc_ptr);
5664 load->operands[1] = Operand(s4); /* no sampler */
5665 load->operands[2] = Operand(coord);
5666 load->definitions[0] = Definition(fmask);
5667 load->glc = false;
5668 load->dlc = false;
5669 load->dmask = 0x1;
5670 load->unrm = true;
5671 load->da = da;
5672 load->dim = dim;
5673 load->can_reorder = true; /* fmask images shouldn't be modified */
5674 ctx->block->instructions.emplace_back(std::move(load));
5675
5676 Operand sample_index4;
5677 if (sample_index.isConstant()) {
5678 if (sample_index.constantValue() < 16) {
5679 sample_index4 = Operand(sample_index.constantValue() << 2);
5680 } else {
5681 sample_index4 = Operand(0u);
5682 }
5683 } else if (sample_index.regClass() == s1) {
5684 sample_index4 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), sample_index, Operand(2u));
5685 } else {
5686 assert(sample_index.regClass() == v1);
5687 sample_index4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), sample_index);
5688 }
5689
5690 Temp final_sample;
5691 if (sample_index4.isConstant() && sample_index4.constantValue() == 0)
5692 final_sample = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(15u), fmask);
5693 else if (sample_index4.isConstant() && sample_index4.constantValue() == 28)
5694 final_sample = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), Operand(28u), fmask);
5695 else
5696 final_sample = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), fmask, sample_index4, Operand(4u));
5697
5698 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
5699 * resource descriptor is 0 (invalid),
5700 */
5701 Temp compare = bld.tmp(bld.lm);
5702 bld.vopc_e64(aco_opcode::v_cmp_lg_u32, Definition(compare),
5703 Operand(0u), emit_extract_vector(ctx, fmask_desc_ptr, 1, s1)).def(0).setHint(vcc);
5704
5705 Temp sample_index_v = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), sample_index);
5706
5707 /* Replace the MSAA sample index. */
5708 return bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), sample_index_v, final_sample, compare);
5709 }
5710
5711 static Temp get_image_coords(isel_context *ctx, const nir_intrinsic_instr *instr, const struct glsl_type *type)
5712 {
5713
5714 Temp src0 = get_ssa_temp(ctx, instr->src[1].ssa);
5715 enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5716 bool is_array = glsl_sampler_type_is_array(type);
5717 ASSERTED bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
5718 assert(!add_frag_pos && "Input attachments should be lowered.");
5719 bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
5720 bool gfx9_1d = ctx->options->chip_class == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
5721 int count = image_type_to_components_count(dim, is_array);
5722 std::vector<Temp> coords(count);
5723 Builder bld(ctx->program, ctx->block);
5724
5725 if (is_ms) {
5726 count--;
5727 Temp src2 = get_ssa_temp(ctx, instr->src[2].ssa);
5728 /* get sample index */
5729 if (instr->intrinsic == nir_intrinsic_image_deref_load) {
5730 nir_const_value *sample_cv = nir_src_as_const_value(instr->src[2]);
5731 Operand sample_index = sample_cv ? Operand(sample_cv->u32) : Operand(emit_extract_vector(ctx, src2, 0, v1));
5732 std::vector<Temp> fmask_load_address;
5733 for (unsigned i = 0; i < (is_array ? 3 : 2); i++)
5734 fmask_load_address.emplace_back(emit_extract_vector(ctx, src0, i, v1));
5735
5736 Temp fmask_desc_ptr = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_FMASK, nullptr, false, false);
5737 coords[count] = adjust_sample_index_using_fmask(ctx, is_array, fmask_load_address, sample_index, fmask_desc_ptr);
5738 } else {
5739 coords[count] = emit_extract_vector(ctx, src2, 0, v1);
5740 }
5741 }
5742
5743 if (gfx9_1d) {
5744 coords[0] = emit_extract_vector(ctx, src0, 0, v1);
5745 coords.resize(coords.size() + 1);
5746 coords[1] = bld.copy(bld.def(v1), Operand(0u));
5747 if (is_array)
5748 coords[2] = emit_extract_vector(ctx, src0, 1, v1);
5749 } else {
5750 for (int i = 0; i < count; i++)
5751 coords[i] = emit_extract_vector(ctx, src0, i, v1);
5752 }
5753
5754 if (instr->intrinsic == nir_intrinsic_image_deref_load ||
5755 instr->intrinsic == nir_intrinsic_image_deref_store) {
5756 int lod_index = instr->intrinsic == nir_intrinsic_image_deref_load ? 3 : 4;
5757 bool level_zero = nir_src_is_const(instr->src[lod_index]) && nir_src_as_uint(instr->src[lod_index]) == 0;
5758
5759 if (!level_zero)
5760 coords.emplace_back(get_ssa_temp(ctx, instr->src[lod_index].ssa));
5761 }
5762
5763 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, coords.size(), 1)};
5764 for (unsigned i = 0; i < coords.size(); i++)
5765 vec->operands[i] = Operand(coords[i]);
5766 Temp res = {ctx->program->allocateId(), RegClass(RegType::vgpr, coords.size())};
5767 vec->definitions[0] = Definition(res);
5768 ctx->block->instructions.emplace_back(std::move(vec));
5769 return res;
5770 }
5771
5772
5773 void visit_image_load(isel_context *ctx, nir_intrinsic_instr *instr)
5774 {
5775 Builder bld(ctx->program, ctx->block);
5776 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
5777 const struct glsl_type *type = glsl_without_array(var->type);
5778 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5779 bool is_array = glsl_sampler_type_is_array(type);
5780 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5781
5782 if (dim == GLSL_SAMPLER_DIM_BUF) {
5783 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
5784 unsigned num_channels = util_last_bit(mask);
5785 Temp rsrc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true);
5786 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
5787
5788 aco_opcode opcode;
5789 switch (num_channels) {
5790 case 1:
5791 opcode = aco_opcode::buffer_load_format_x;
5792 break;
5793 case 2:
5794 opcode = aco_opcode::buffer_load_format_xy;
5795 break;
5796 case 3:
5797 opcode = aco_opcode::buffer_load_format_xyz;
5798 break;
5799 case 4:
5800 opcode = aco_opcode::buffer_load_format_xyzw;
5801 break;
5802 default:
5803 unreachable(">4 channel buffer image load");
5804 }
5805 aco_ptr<MUBUF_instruction> load{create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 3, 1)};
5806 load->operands[0] = Operand(rsrc);
5807 load->operands[1] = Operand(vindex);
5808 load->operands[2] = Operand((uint32_t) 0);
5809 Temp tmp;
5810 if (num_channels == instr->dest.ssa.num_components && dst.type() == RegType::vgpr)
5811 tmp = dst;
5812 else
5813 tmp = {ctx->program->allocateId(), RegClass(RegType::vgpr, num_channels)};
5814 load->definitions[0] = Definition(tmp);
5815 load->idxen = true;
5816 load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT);
5817 load->dlc = load->glc && ctx->options->chip_class >= GFX10;
5818 load->barrier = barrier_image;
5819 ctx->block->instructions.emplace_back(std::move(load));
5820
5821 expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, (1 << num_channels) - 1);
5822 return;
5823 }
5824
5825 Temp coords = get_image_coords(ctx, instr, type);
5826 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, nullptr, true, true);
5827
5828 unsigned dmask = nir_ssa_def_components_read(&instr->dest.ssa);
5829 unsigned num_components = util_bitcount(dmask);
5830 Temp tmp;
5831 if (num_components == instr->dest.ssa.num_components && dst.type() == RegType::vgpr)
5832 tmp = dst;
5833 else
5834 tmp = {ctx->program->allocateId(), RegClass(RegType::vgpr, num_components)};
5835
5836 bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
5837 aco_opcode opcode = level_zero ? aco_opcode::image_load : aco_opcode::image_load_mip;
5838
5839 aco_ptr<MIMG_instruction> load{create_instruction<MIMG_instruction>(opcode, Format::MIMG, 3, 1)};
5840 load->operands[0] = Operand(resource);
5841 load->operands[1] = Operand(s4); /* no sampler */
5842 load->operands[2] = Operand(coords);
5843 load->definitions[0] = Definition(tmp);
5844 load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0;
5845 load->dlc = load->glc && ctx->options->chip_class >= GFX10;
5846 load->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
5847 load->dmask = dmask;
5848 load->unrm = true;
5849 load->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
5850 load->barrier = barrier_image;
5851 ctx->block->instructions.emplace_back(std::move(load));
5852
5853 expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, dmask);
5854 return;
5855 }
5856
5857 void visit_image_store(isel_context *ctx, nir_intrinsic_instr *instr)
5858 {
5859 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
5860 const struct glsl_type *type = glsl_without_array(var->type);
5861 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5862 bool is_array = glsl_sampler_type_is_array(type);
5863 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa));
5864
5865 bool glc = ctx->options->chip_class == GFX6 || var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE) ? 1 : 0;
5866
5867 if (dim == GLSL_SAMPLER_DIM_BUF) {
5868 Temp rsrc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true);
5869 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
5870 aco_opcode opcode;
5871 switch (data.size()) {
5872 case 1:
5873 opcode = aco_opcode::buffer_store_format_x;
5874 break;
5875 case 2:
5876 opcode = aco_opcode::buffer_store_format_xy;
5877 break;
5878 case 3:
5879 opcode = aco_opcode::buffer_store_format_xyz;
5880 break;
5881 case 4:
5882 opcode = aco_opcode::buffer_store_format_xyzw;
5883 break;
5884 default:
5885 unreachable(">4 channel buffer image store");
5886 }
5887 aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
5888 store->operands[0] = Operand(rsrc);
5889 store->operands[1] = Operand(vindex);
5890 store->operands[2] = Operand((uint32_t) 0);
5891 store->operands[3] = Operand(data);
5892 store->idxen = true;
5893 store->glc = glc;
5894 store->dlc = false;
5895 store->disable_wqm = true;
5896 store->barrier = barrier_image;
5897 ctx->program->needs_exact = true;
5898 ctx->block->instructions.emplace_back(std::move(store));
5899 return;
5900 }
5901
5902 assert(data.type() == RegType::vgpr);
5903 Temp coords = get_image_coords(ctx, instr, type);
5904 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, nullptr, true, true);
5905
5906 bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
5907 aco_opcode opcode = level_zero ? aco_opcode::image_store : aco_opcode::image_store_mip;
5908
5909 aco_ptr<MIMG_instruction> store{create_instruction<MIMG_instruction>(opcode, Format::MIMG, 3, 0)};
5910 store->operands[0] = Operand(resource);
5911 store->operands[1] = Operand(data);
5912 store->operands[2] = Operand(coords);
5913 store->glc = glc;
5914 store->dlc = false;
5915 store->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
5916 store->dmask = (1 << data.size()) - 1;
5917 store->unrm = true;
5918 store->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
5919 store->disable_wqm = true;
5920 store->barrier = barrier_image;
5921 ctx->program->needs_exact = true;
5922 ctx->block->instructions.emplace_back(std::move(store));
5923 return;
5924 }
5925
5926 void visit_image_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
5927 {
5928 /* return the previous value if dest is ever used */
5929 bool return_previous = false;
5930 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
5931 return_previous = true;
5932 break;
5933 }
5934 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
5935 return_previous = true;
5936 break;
5937 }
5938
5939 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
5940 const struct glsl_type *type = glsl_without_array(var->type);
5941 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5942 bool is_array = glsl_sampler_type_is_array(type);
5943 Builder bld(ctx->program, ctx->block);
5944
5945 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa));
5946 assert(data.size() == 1 && "64bit ssbo atomics not yet implemented.");
5947
5948 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap)
5949 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), get_ssa_temp(ctx, instr->src[4].ssa), data);
5950
5951 aco_opcode buf_op, image_op;
5952 switch (instr->intrinsic) {
5953 case nir_intrinsic_image_deref_atomic_add:
5954 buf_op = aco_opcode::buffer_atomic_add;
5955 image_op = aco_opcode::image_atomic_add;
5956 break;
5957 case nir_intrinsic_image_deref_atomic_umin:
5958 buf_op = aco_opcode::buffer_atomic_umin;
5959 image_op = aco_opcode::image_atomic_umin;
5960 break;
5961 case nir_intrinsic_image_deref_atomic_imin:
5962 buf_op = aco_opcode::buffer_atomic_smin;
5963 image_op = aco_opcode::image_atomic_smin;
5964 break;
5965 case nir_intrinsic_image_deref_atomic_umax:
5966 buf_op = aco_opcode::buffer_atomic_umax;
5967 image_op = aco_opcode::image_atomic_umax;
5968 break;
5969 case nir_intrinsic_image_deref_atomic_imax:
5970 buf_op = aco_opcode::buffer_atomic_smax;
5971 image_op = aco_opcode::image_atomic_smax;
5972 break;
5973 case nir_intrinsic_image_deref_atomic_and:
5974 buf_op = aco_opcode::buffer_atomic_and;
5975 image_op = aco_opcode::image_atomic_and;
5976 break;
5977 case nir_intrinsic_image_deref_atomic_or:
5978 buf_op = aco_opcode::buffer_atomic_or;
5979 image_op = aco_opcode::image_atomic_or;
5980 break;
5981 case nir_intrinsic_image_deref_atomic_xor:
5982 buf_op = aco_opcode::buffer_atomic_xor;
5983 image_op = aco_opcode::image_atomic_xor;
5984 break;
5985 case nir_intrinsic_image_deref_atomic_exchange:
5986 buf_op = aco_opcode::buffer_atomic_swap;
5987 image_op = aco_opcode::image_atomic_swap;
5988 break;
5989 case nir_intrinsic_image_deref_atomic_comp_swap:
5990 buf_op = aco_opcode::buffer_atomic_cmpswap;
5991 image_op = aco_opcode::image_atomic_cmpswap;
5992 break;
5993 default:
5994 unreachable("visit_image_atomic should only be called with nir_intrinsic_image_deref_atomic_* instructions.");
5995 }
5996
5997 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5998
5999 if (dim == GLSL_SAMPLER_DIM_BUF) {
6000 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
6001 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true);
6002 //assert(ctx->options->chip_class < GFX9 && "GFX9 stride size workaround not yet implemented.");
6003 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(buf_op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6004 mubuf->operands[0] = Operand(resource);
6005 mubuf->operands[1] = Operand(vindex);
6006 mubuf->operands[2] = Operand((uint32_t)0);
6007 mubuf->operands[3] = Operand(data);
6008 if (return_previous)
6009 mubuf->definitions[0] = Definition(dst);
6010 mubuf->offset = 0;
6011 mubuf->idxen = true;
6012 mubuf->glc = return_previous;
6013 mubuf->dlc = false; /* Not needed for atomics */
6014 mubuf->disable_wqm = true;
6015 mubuf->barrier = barrier_image;
6016 ctx->program->needs_exact = true;
6017 ctx->block->instructions.emplace_back(std::move(mubuf));
6018 return;
6019 }
6020
6021 Temp coords = get_image_coords(ctx, instr, type);
6022 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, nullptr, true, true);
6023 aco_ptr<MIMG_instruction> mimg{create_instruction<MIMG_instruction>(image_op, Format::MIMG, 3, return_previous ? 1 : 0)};
6024 mimg->operands[0] = Operand(resource);
6025 mimg->operands[1] = Operand(data);
6026 mimg->operands[2] = Operand(coords);
6027 if (return_previous)
6028 mimg->definitions[0] = Definition(dst);
6029 mimg->glc = return_previous;
6030 mimg->dlc = false; /* Not needed for atomics */
6031 mimg->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
6032 mimg->dmask = (1 << data.size()) - 1;
6033 mimg->unrm = true;
6034 mimg->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
6035 mimg->disable_wqm = true;
6036 mimg->barrier = barrier_image;
6037 ctx->program->needs_exact = true;
6038 ctx->block->instructions.emplace_back(std::move(mimg));
6039 return;
6040 }
6041
6042 void get_buffer_size(isel_context *ctx, Temp desc, Temp dst, bool in_elements)
6043 {
6044 if (in_elements && ctx->options->chip_class == GFX8) {
6045 /* we only have to divide by 1, 2, 4, 8, 12 or 16 */
6046 Builder bld(ctx->program, ctx->block);
6047
6048 Temp size = emit_extract_vector(ctx, desc, 2, s1);
6049
6050 Temp size_div3 = bld.vop3(aco_opcode::v_mul_hi_u32, bld.def(v1), bld.copy(bld.def(v1), Operand(0xaaaaaaabu)), size);
6051 size_div3 = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.as_uniform(size_div3), Operand(1u));
6052
6053 Temp stride = emit_extract_vector(ctx, desc, 1, s1);
6054 stride = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), stride, Operand((5u << 16) | 16u));
6055
6056 Temp is12 = bld.sopc(aco_opcode::s_cmp_eq_i32, bld.def(s1, scc), stride, Operand(12u));
6057 size = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), size_div3, size, bld.scc(is12));
6058
6059 Temp shr_dst = dst.type() == RegType::vgpr ? bld.tmp(s1) : dst;
6060 bld.sop2(aco_opcode::s_lshr_b32, Definition(shr_dst), bld.def(s1, scc),
6061 size, bld.sop1(aco_opcode::s_ff1_i32_b32, bld.def(s1), stride));
6062 if (dst.type() == RegType::vgpr)
6063 bld.copy(Definition(dst), shr_dst);
6064
6065 /* TODO: we can probably calculate this faster with v_skip when stride != 12 */
6066 } else {
6067 emit_extract_vector(ctx, desc, 2, dst);
6068 }
6069 }
6070
6071 void visit_image_size(isel_context *ctx, nir_intrinsic_instr *instr)
6072 {
6073 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
6074 const struct glsl_type *type = glsl_without_array(var->type);
6075 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
6076 bool is_array = glsl_sampler_type_is_array(type);
6077 Builder bld(ctx->program, ctx->block);
6078
6079 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
6080 Temp desc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, NULL, true, false);
6081 return get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), true);
6082 }
6083
6084 /* LOD */
6085 Temp lod = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0u));
6086
6087 /* Resource */
6088 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, NULL, true, false);
6089
6090 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6091
6092 aco_ptr<MIMG_instruction> mimg{create_instruction<MIMG_instruction>(aco_opcode::image_get_resinfo, Format::MIMG, 3, 1)};
6093 mimg->operands[0] = Operand(resource);
6094 mimg->operands[1] = Operand(s4); /* no sampler */
6095 mimg->operands[2] = Operand(lod);
6096 uint8_t& dmask = mimg->dmask;
6097 mimg->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
6098 mimg->dmask = (1 << instr->dest.ssa.num_components) - 1;
6099 mimg->da = glsl_sampler_type_is_array(type);
6100 mimg->can_reorder = true;
6101 Definition& def = mimg->definitions[0];
6102 ctx->block->instructions.emplace_back(std::move(mimg));
6103
6104 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE &&
6105 glsl_sampler_type_is_array(type)) {
6106
6107 assert(instr->dest.ssa.num_components == 3);
6108 Temp tmp = {ctx->program->allocateId(), v3};
6109 def = Definition(tmp);
6110 emit_split_vector(ctx, tmp, 3);
6111
6112 /* divide 3rd value by 6 by multiplying with magic number */
6113 Temp c = bld.copy(bld.def(s1), Operand((uint32_t) 0x2AAAAAAB));
6114 Temp by_6 = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), emit_extract_vector(ctx, tmp, 2, v1), c);
6115
6116 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
6117 emit_extract_vector(ctx, tmp, 0, v1),
6118 emit_extract_vector(ctx, tmp, 1, v1),
6119 by_6);
6120
6121 } else if (ctx->options->chip_class == GFX9 &&
6122 glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_1D &&
6123 glsl_sampler_type_is_array(type)) {
6124 assert(instr->dest.ssa.num_components == 2);
6125 def = Definition(dst);
6126 dmask = 0x5;
6127 } else {
6128 def = Definition(dst);
6129 }
6130
6131 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
6132 }
6133
6134 void visit_load_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
6135 {
6136 Builder bld(ctx->program, ctx->block);
6137 unsigned num_components = instr->num_components;
6138
6139 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6140 Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6141 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
6142
6143 bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
6144 unsigned size = instr->dest.ssa.bit_size / 8;
6145 load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
6146 nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr), glc, false);
6147 }
6148
6149 void visit_store_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
6150 {
6151 Builder bld(ctx->program, ctx->block);
6152 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
6153 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6154 unsigned writemask = widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
6155 Temp offset = get_ssa_temp(ctx, instr->src[2].ssa);
6156
6157 Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6158 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
6159
6160 bool smem = !nir_src_is_divergent(instr->src[2]) &&
6161 ctx->options->chip_class >= GFX8 &&
6162 elem_size_bytes >= 4;
6163 if (smem)
6164 offset = bld.as_uniform(offset);
6165 bool smem_nonfs = smem && ctx->stage != fragment_fs;
6166
6167 unsigned write_count = 0;
6168 Temp write_datas[32];
6169 unsigned offsets[32];
6170 split_buffer_store(ctx, instr, smem, smem_nonfs ? RegType::sgpr : (smem ? data.type() : RegType::vgpr),
6171 data, writemask, 16, &write_count, write_datas, offsets);
6172
6173 for (unsigned i = 0; i < write_count; i++) {
6174 aco_opcode op = get_buffer_store_op(smem, write_datas[i].bytes());
6175 if (smem && ctx->stage == fragment_fs)
6176 op = aco_opcode::p_fs_buffer_store_smem;
6177
6178 if (smem) {
6179 aco_ptr<SMEM_instruction> store{create_instruction<SMEM_instruction>(op, Format::SMEM, 3, 0)};
6180 store->operands[0] = Operand(rsrc);
6181 if (offsets[i]) {
6182 Temp off = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
6183 offset, Operand(offsets[i]));
6184 store->operands[1] = Operand(off);
6185 } else {
6186 store->operands[1] = Operand(offset);
6187 }
6188 if (op != aco_opcode::p_fs_buffer_store_smem)
6189 store->operands[1].setFixed(m0);
6190 store->operands[2] = Operand(write_datas[i]);
6191 store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
6192 store->dlc = false;
6193 store->disable_wqm = true;
6194 store->barrier = barrier_buffer;
6195 ctx->block->instructions.emplace_back(std::move(store));
6196 ctx->program->wb_smem_l1_on_end = true;
6197 if (op == aco_opcode::p_fs_buffer_store_smem) {
6198 ctx->block->kind |= block_kind_needs_lowering;
6199 ctx->program->needs_exact = true;
6200 }
6201 } else {
6202 aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
6203 store->operands[0] = Operand(rsrc);
6204 store->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
6205 store->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
6206 store->operands[3] = Operand(write_datas[i]);
6207 store->offset = offsets[i];
6208 store->offen = (offset.type() == RegType::vgpr);
6209 store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
6210 store->dlc = false;
6211 store->disable_wqm = true;
6212 store->barrier = barrier_buffer;
6213 ctx->program->needs_exact = true;
6214 ctx->block->instructions.emplace_back(std::move(store));
6215 }
6216 }
6217 }
6218
6219 void visit_atomic_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
6220 {
6221 /* return the previous value if dest is ever used */
6222 bool return_previous = false;
6223 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
6224 return_previous = true;
6225 break;
6226 }
6227 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
6228 return_previous = true;
6229 break;
6230 }
6231
6232 Builder bld(ctx->program, ctx->block);
6233 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
6234
6235 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
6236 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
6237 get_ssa_temp(ctx, instr->src[3].ssa), data);
6238
6239 Temp offset = get_ssa_temp(ctx, instr->src[1].ssa);
6240 Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6241 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
6242
6243 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6244
6245 aco_opcode op32, op64;
6246 switch (instr->intrinsic) {
6247 case nir_intrinsic_ssbo_atomic_add:
6248 op32 = aco_opcode::buffer_atomic_add;
6249 op64 = aco_opcode::buffer_atomic_add_x2;
6250 break;
6251 case nir_intrinsic_ssbo_atomic_imin:
6252 op32 = aco_opcode::buffer_atomic_smin;
6253 op64 = aco_opcode::buffer_atomic_smin_x2;
6254 break;
6255 case nir_intrinsic_ssbo_atomic_umin:
6256 op32 = aco_opcode::buffer_atomic_umin;
6257 op64 = aco_opcode::buffer_atomic_umin_x2;
6258 break;
6259 case nir_intrinsic_ssbo_atomic_imax:
6260 op32 = aco_opcode::buffer_atomic_smax;
6261 op64 = aco_opcode::buffer_atomic_smax_x2;
6262 break;
6263 case nir_intrinsic_ssbo_atomic_umax:
6264 op32 = aco_opcode::buffer_atomic_umax;
6265 op64 = aco_opcode::buffer_atomic_umax_x2;
6266 break;
6267 case nir_intrinsic_ssbo_atomic_and:
6268 op32 = aco_opcode::buffer_atomic_and;
6269 op64 = aco_opcode::buffer_atomic_and_x2;
6270 break;
6271 case nir_intrinsic_ssbo_atomic_or:
6272 op32 = aco_opcode::buffer_atomic_or;
6273 op64 = aco_opcode::buffer_atomic_or_x2;
6274 break;
6275 case nir_intrinsic_ssbo_atomic_xor:
6276 op32 = aco_opcode::buffer_atomic_xor;
6277 op64 = aco_opcode::buffer_atomic_xor_x2;
6278 break;
6279 case nir_intrinsic_ssbo_atomic_exchange:
6280 op32 = aco_opcode::buffer_atomic_swap;
6281 op64 = aco_opcode::buffer_atomic_swap_x2;
6282 break;
6283 case nir_intrinsic_ssbo_atomic_comp_swap:
6284 op32 = aco_opcode::buffer_atomic_cmpswap;
6285 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6286 break;
6287 default:
6288 unreachable("visit_atomic_ssbo should only be called with nir_intrinsic_ssbo_atomic_* instructions.");
6289 }
6290 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6291 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6292 mubuf->operands[0] = Operand(rsrc);
6293 mubuf->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
6294 mubuf->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
6295 mubuf->operands[3] = Operand(data);
6296 if (return_previous)
6297 mubuf->definitions[0] = Definition(dst);
6298 mubuf->offset = 0;
6299 mubuf->offen = (offset.type() == RegType::vgpr);
6300 mubuf->glc = return_previous;
6301 mubuf->dlc = false; /* Not needed for atomics */
6302 mubuf->disable_wqm = true;
6303 mubuf->barrier = barrier_buffer;
6304 ctx->program->needs_exact = true;
6305 ctx->block->instructions.emplace_back(std::move(mubuf));
6306 }
6307
6308 void visit_get_buffer_size(isel_context *ctx, nir_intrinsic_instr *instr) {
6309
6310 Temp index = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6311 Builder bld(ctx->program, ctx->block);
6312 Temp desc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), index, Operand(0u));
6313 get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), false);
6314 }
6315
6316 void visit_load_global(isel_context *ctx, nir_intrinsic_instr *instr)
6317 {
6318 Builder bld(ctx->program, ctx->block);
6319 unsigned num_components = instr->num_components;
6320 unsigned component_size = instr->dest.ssa.bit_size / 8;
6321
6322 LoadEmitInfo info = {Operand(get_ssa_temp(ctx, instr->src[0].ssa)),
6323 get_ssa_temp(ctx, &instr->dest.ssa),
6324 num_components, component_size};
6325 info.glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
6326 info.align_mul = nir_intrinsic_align_mul(instr);
6327 info.align_offset = nir_intrinsic_align_offset(instr);
6328 info.barrier = barrier_buffer;
6329 info.can_reorder = false;
6330 /* VMEM stores don't update the SMEM cache and it's difficult to prove that
6331 * it's safe to use SMEM */
6332 bool can_use_smem = nir_intrinsic_access(instr) & ACCESS_NON_WRITEABLE;
6333 if (info.dst.type() == RegType::vgpr || (info.glc && ctx->options->chip_class < GFX8) || !can_use_smem) {
6334 emit_global_load(ctx, bld, &info);
6335 } else {
6336 info.offset = Operand(bld.as_uniform(info.offset));
6337 emit_smem_load(ctx, bld, &info);
6338 }
6339 }
6340
6341 void visit_store_global(isel_context *ctx, nir_intrinsic_instr *instr)
6342 {
6343 Builder bld(ctx->program, ctx->block);
6344 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6345 unsigned writemask = widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
6346
6347 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6348 Temp addr = get_ssa_temp(ctx, instr->src[1].ssa);
6349 bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
6350
6351 if (ctx->options->chip_class >= GFX7)
6352 addr = as_vgpr(ctx, addr);
6353
6354 unsigned write_count = 0;
6355 Temp write_datas[32];
6356 unsigned offsets[32];
6357 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask,
6358 16, &write_count, write_datas, offsets);
6359
6360 for (unsigned i = 0; i < write_count; i++) {
6361 if (ctx->options->chip_class >= GFX7) {
6362 unsigned offset = offsets[i];
6363 Temp store_addr = addr;
6364 if (offset > 0 && ctx->options->chip_class < GFX9) {
6365 Temp addr0 = bld.tmp(v1), addr1 = bld.tmp(v1);
6366 Temp new_addr0 = bld.tmp(v1), new_addr1 = bld.tmp(v1);
6367 Temp carry = bld.tmp(bld.lm);
6368 bld.pseudo(aco_opcode::p_split_vector, Definition(addr0), Definition(addr1), addr);
6369
6370 bld.vop2(aco_opcode::v_add_co_u32, Definition(new_addr0), bld.hint_vcc(Definition(carry)),
6371 Operand(offset), addr0);
6372 bld.vop2(aco_opcode::v_addc_co_u32, Definition(new_addr1), bld.def(bld.lm),
6373 Operand(0u), addr1,
6374 carry).def(1).setHint(vcc);
6375
6376 store_addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_addr0, new_addr1);
6377
6378 offset = 0;
6379 }
6380
6381 bool global = ctx->options->chip_class >= GFX9;
6382 aco_opcode op;
6383 switch (write_datas[i].bytes()) {
6384 case 1:
6385 op = global ? aco_opcode::global_store_byte : aco_opcode::flat_store_byte;
6386 break;
6387 case 2:
6388 op = global ? aco_opcode::global_store_short : aco_opcode::flat_store_short;
6389 break;
6390 case 4:
6391 op = global ? aco_opcode::global_store_dword : aco_opcode::flat_store_dword;
6392 break;
6393 case 8:
6394 op = global ? aco_opcode::global_store_dwordx2 : aco_opcode::flat_store_dwordx2;
6395 break;
6396 case 12:
6397 op = global ? aco_opcode::global_store_dwordx3 : aco_opcode::flat_store_dwordx3;
6398 break;
6399 case 16:
6400 op = global ? aco_opcode::global_store_dwordx4 : aco_opcode::flat_store_dwordx4;
6401 break;
6402 default:
6403 unreachable("store_global not implemented for this size.");
6404 }
6405
6406 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, 0)};
6407 flat->operands[0] = Operand(store_addr);
6408 flat->operands[1] = Operand(s1);
6409 flat->operands[2] = Operand(write_datas[i]);
6410 flat->glc = glc;
6411 flat->dlc = false;
6412 flat->offset = offset;
6413 flat->disable_wqm = true;
6414 flat->barrier = barrier_buffer;
6415 ctx->program->needs_exact = true;
6416 ctx->block->instructions.emplace_back(std::move(flat));
6417 } else {
6418 assert(ctx->options->chip_class == GFX6);
6419
6420 aco_opcode op = get_buffer_store_op(false, write_datas[i].bytes());
6421
6422 Temp rsrc = get_gfx6_global_rsrc(bld, addr);
6423
6424 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
6425 mubuf->operands[0] = Operand(rsrc);
6426 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
6427 mubuf->operands[2] = Operand(0u);
6428 mubuf->operands[3] = Operand(write_datas[i]);
6429 mubuf->glc = glc;
6430 mubuf->dlc = false;
6431 mubuf->offset = offsets[i];
6432 mubuf->addr64 = addr.type() == RegType::vgpr;
6433 mubuf->disable_wqm = true;
6434 mubuf->barrier = barrier_buffer;
6435 ctx->program->needs_exact = true;
6436 ctx->block->instructions.emplace_back(std::move(mubuf));
6437 }
6438 }
6439 }
6440
6441 void visit_global_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
6442 {
6443 /* return the previous value if dest is ever used */
6444 bool return_previous = false;
6445 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
6446 return_previous = true;
6447 break;
6448 }
6449 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
6450 return_previous = true;
6451 break;
6452 }
6453
6454 Builder bld(ctx->program, ctx->block);
6455 Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
6456 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6457
6458 if (ctx->options->chip_class >= GFX7)
6459 addr = as_vgpr(ctx, addr);
6460
6461 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
6462 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
6463 get_ssa_temp(ctx, instr->src[2].ssa), data);
6464
6465 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6466
6467 aco_opcode op32, op64;
6468
6469 if (ctx->options->chip_class >= GFX7) {
6470 bool global = ctx->options->chip_class >= GFX9;
6471 switch (instr->intrinsic) {
6472 case nir_intrinsic_global_atomic_add:
6473 op32 = global ? aco_opcode::global_atomic_add : aco_opcode::flat_atomic_add;
6474 op64 = global ? aco_opcode::global_atomic_add_x2 : aco_opcode::flat_atomic_add_x2;
6475 break;
6476 case nir_intrinsic_global_atomic_imin:
6477 op32 = global ? aco_opcode::global_atomic_smin : aco_opcode::flat_atomic_smin;
6478 op64 = global ? aco_opcode::global_atomic_smin_x2 : aco_opcode::flat_atomic_smin_x2;
6479 break;
6480 case nir_intrinsic_global_atomic_umin:
6481 op32 = global ? aco_opcode::global_atomic_umin : aco_opcode::flat_atomic_umin;
6482 op64 = global ? aco_opcode::global_atomic_umin_x2 : aco_opcode::flat_atomic_umin_x2;
6483 break;
6484 case nir_intrinsic_global_atomic_imax:
6485 op32 = global ? aco_opcode::global_atomic_smax : aco_opcode::flat_atomic_smax;
6486 op64 = global ? aco_opcode::global_atomic_smax_x2 : aco_opcode::flat_atomic_smax_x2;
6487 break;
6488 case nir_intrinsic_global_atomic_umax:
6489 op32 = global ? aco_opcode::global_atomic_umax : aco_opcode::flat_atomic_umax;
6490 op64 = global ? aco_opcode::global_atomic_umax_x2 : aco_opcode::flat_atomic_umax_x2;
6491 break;
6492 case nir_intrinsic_global_atomic_and:
6493 op32 = global ? aco_opcode::global_atomic_and : aco_opcode::flat_atomic_and;
6494 op64 = global ? aco_opcode::global_atomic_and_x2 : aco_opcode::flat_atomic_and_x2;
6495 break;
6496 case nir_intrinsic_global_atomic_or:
6497 op32 = global ? aco_opcode::global_atomic_or : aco_opcode::flat_atomic_or;
6498 op64 = global ? aco_opcode::global_atomic_or_x2 : aco_opcode::flat_atomic_or_x2;
6499 break;
6500 case nir_intrinsic_global_atomic_xor:
6501 op32 = global ? aco_opcode::global_atomic_xor : aco_opcode::flat_atomic_xor;
6502 op64 = global ? aco_opcode::global_atomic_xor_x2 : aco_opcode::flat_atomic_xor_x2;
6503 break;
6504 case nir_intrinsic_global_atomic_exchange:
6505 op32 = global ? aco_opcode::global_atomic_swap : aco_opcode::flat_atomic_swap;
6506 op64 = global ? aco_opcode::global_atomic_swap_x2 : aco_opcode::flat_atomic_swap_x2;
6507 break;
6508 case nir_intrinsic_global_atomic_comp_swap:
6509 op32 = global ? aco_opcode::global_atomic_cmpswap : aco_opcode::flat_atomic_cmpswap;
6510 op64 = global ? aco_opcode::global_atomic_cmpswap_x2 : aco_opcode::flat_atomic_cmpswap_x2;
6511 break;
6512 default:
6513 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* instructions.");
6514 }
6515
6516 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6517 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)};
6518 flat->operands[0] = Operand(addr);
6519 flat->operands[1] = Operand(s1);
6520 flat->operands[2] = Operand(data);
6521 if (return_previous)
6522 flat->definitions[0] = Definition(dst);
6523 flat->glc = return_previous;
6524 flat->dlc = false; /* Not needed for atomics */
6525 flat->offset = 0;
6526 flat->disable_wqm = true;
6527 flat->barrier = barrier_buffer;
6528 ctx->program->needs_exact = true;
6529 ctx->block->instructions.emplace_back(std::move(flat));
6530 } else {
6531 assert(ctx->options->chip_class == GFX6);
6532
6533 switch (instr->intrinsic) {
6534 case nir_intrinsic_global_atomic_add:
6535 op32 = aco_opcode::buffer_atomic_add;
6536 op64 = aco_opcode::buffer_atomic_add_x2;
6537 break;
6538 case nir_intrinsic_global_atomic_imin:
6539 op32 = aco_opcode::buffer_atomic_smin;
6540 op64 = aco_opcode::buffer_atomic_smin_x2;
6541 break;
6542 case nir_intrinsic_global_atomic_umin:
6543 op32 = aco_opcode::buffer_atomic_umin;
6544 op64 = aco_opcode::buffer_atomic_umin_x2;
6545 break;
6546 case nir_intrinsic_global_atomic_imax:
6547 op32 = aco_opcode::buffer_atomic_smax;
6548 op64 = aco_opcode::buffer_atomic_smax_x2;
6549 break;
6550 case nir_intrinsic_global_atomic_umax:
6551 op32 = aco_opcode::buffer_atomic_umax;
6552 op64 = aco_opcode::buffer_atomic_umax_x2;
6553 break;
6554 case nir_intrinsic_global_atomic_and:
6555 op32 = aco_opcode::buffer_atomic_and;
6556 op64 = aco_opcode::buffer_atomic_and_x2;
6557 break;
6558 case nir_intrinsic_global_atomic_or:
6559 op32 = aco_opcode::buffer_atomic_or;
6560 op64 = aco_opcode::buffer_atomic_or_x2;
6561 break;
6562 case nir_intrinsic_global_atomic_xor:
6563 op32 = aco_opcode::buffer_atomic_xor;
6564 op64 = aco_opcode::buffer_atomic_xor_x2;
6565 break;
6566 case nir_intrinsic_global_atomic_exchange:
6567 op32 = aco_opcode::buffer_atomic_swap;
6568 op64 = aco_opcode::buffer_atomic_swap_x2;
6569 break;
6570 case nir_intrinsic_global_atomic_comp_swap:
6571 op32 = aco_opcode::buffer_atomic_cmpswap;
6572 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6573 break;
6574 default:
6575 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* instructions.");
6576 }
6577
6578 Temp rsrc = get_gfx6_global_rsrc(bld, addr);
6579
6580 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6581
6582 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6583 mubuf->operands[0] = Operand(rsrc);
6584 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
6585 mubuf->operands[2] = Operand(0u);
6586 mubuf->operands[3] = Operand(data);
6587 if (return_previous)
6588 mubuf->definitions[0] = Definition(dst);
6589 mubuf->glc = return_previous;
6590 mubuf->dlc = false;
6591 mubuf->offset = 0;
6592 mubuf->addr64 = addr.type() == RegType::vgpr;
6593 mubuf->disable_wqm = true;
6594 mubuf->barrier = barrier_buffer;
6595 ctx->program->needs_exact = true;
6596 ctx->block->instructions.emplace_back(std::move(mubuf));
6597 }
6598 }
6599
6600 void emit_memory_barrier(isel_context *ctx, nir_intrinsic_instr *instr) {
6601 Builder bld(ctx->program, ctx->block);
6602 switch(instr->intrinsic) {
6603 case nir_intrinsic_group_memory_barrier:
6604 case nir_intrinsic_memory_barrier:
6605 bld.barrier(aco_opcode::p_memory_barrier_common);
6606 break;
6607 case nir_intrinsic_memory_barrier_buffer:
6608 bld.barrier(aco_opcode::p_memory_barrier_buffer);
6609 break;
6610 case nir_intrinsic_memory_barrier_image:
6611 bld.barrier(aco_opcode::p_memory_barrier_image);
6612 break;
6613 case nir_intrinsic_memory_barrier_tcs_patch:
6614 case nir_intrinsic_memory_barrier_shared:
6615 bld.barrier(aco_opcode::p_memory_barrier_shared);
6616 break;
6617 default:
6618 unreachable("Unimplemented memory barrier intrinsic");
6619 break;
6620 }
6621 }
6622
6623 void visit_load_shared(isel_context *ctx, nir_intrinsic_instr *instr)
6624 {
6625 // TODO: implement sparse reads using ds_read2_b32 and nir_ssa_def_components_read()
6626 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6627 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6628 Builder bld(ctx->program, ctx->block);
6629
6630 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
6631 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
6632 load_lds(ctx, elem_size_bytes, dst, address, nir_intrinsic_base(instr), align);
6633 }
6634
6635 void visit_store_shared(isel_context *ctx, nir_intrinsic_instr *instr)
6636 {
6637 unsigned writemask = nir_intrinsic_write_mask(instr);
6638 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
6639 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6640 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6641
6642 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
6643 store_lds(ctx, elem_size_bytes, data, writemask, address, nir_intrinsic_base(instr), align);
6644 }
6645
6646 void visit_shared_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
6647 {
6648 unsigned offset = nir_intrinsic_base(instr);
6649 Builder bld(ctx->program, ctx->block);
6650 Operand m = load_lds_size_m0(bld);
6651 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6652 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6653
6654 unsigned num_operands = 3;
6655 aco_opcode op32, op64, op32_rtn, op64_rtn;
6656 switch(instr->intrinsic) {
6657 case nir_intrinsic_shared_atomic_add:
6658 op32 = aco_opcode::ds_add_u32;
6659 op64 = aco_opcode::ds_add_u64;
6660 op32_rtn = aco_opcode::ds_add_rtn_u32;
6661 op64_rtn = aco_opcode::ds_add_rtn_u64;
6662 break;
6663 case nir_intrinsic_shared_atomic_imin:
6664 op32 = aco_opcode::ds_min_i32;
6665 op64 = aco_opcode::ds_min_i64;
6666 op32_rtn = aco_opcode::ds_min_rtn_i32;
6667 op64_rtn = aco_opcode::ds_min_rtn_i64;
6668 break;
6669 case nir_intrinsic_shared_atomic_umin:
6670 op32 = aco_opcode::ds_min_u32;
6671 op64 = aco_opcode::ds_min_u64;
6672 op32_rtn = aco_opcode::ds_min_rtn_u32;
6673 op64_rtn = aco_opcode::ds_min_rtn_u64;
6674 break;
6675 case nir_intrinsic_shared_atomic_imax:
6676 op32 = aco_opcode::ds_max_i32;
6677 op64 = aco_opcode::ds_max_i64;
6678 op32_rtn = aco_opcode::ds_max_rtn_i32;
6679 op64_rtn = aco_opcode::ds_max_rtn_i64;
6680 break;
6681 case nir_intrinsic_shared_atomic_umax:
6682 op32 = aco_opcode::ds_max_u32;
6683 op64 = aco_opcode::ds_max_u64;
6684 op32_rtn = aco_opcode::ds_max_rtn_u32;
6685 op64_rtn = aco_opcode::ds_max_rtn_u64;
6686 break;
6687 case nir_intrinsic_shared_atomic_and:
6688 op32 = aco_opcode::ds_and_b32;
6689 op64 = aco_opcode::ds_and_b64;
6690 op32_rtn = aco_opcode::ds_and_rtn_b32;
6691 op64_rtn = aco_opcode::ds_and_rtn_b64;
6692 break;
6693 case nir_intrinsic_shared_atomic_or:
6694 op32 = aco_opcode::ds_or_b32;
6695 op64 = aco_opcode::ds_or_b64;
6696 op32_rtn = aco_opcode::ds_or_rtn_b32;
6697 op64_rtn = aco_opcode::ds_or_rtn_b64;
6698 break;
6699 case nir_intrinsic_shared_atomic_xor:
6700 op32 = aco_opcode::ds_xor_b32;
6701 op64 = aco_opcode::ds_xor_b64;
6702 op32_rtn = aco_opcode::ds_xor_rtn_b32;
6703 op64_rtn = aco_opcode::ds_xor_rtn_b64;
6704 break;
6705 case nir_intrinsic_shared_atomic_exchange:
6706 op32 = aco_opcode::ds_write_b32;
6707 op64 = aco_opcode::ds_write_b64;
6708 op32_rtn = aco_opcode::ds_wrxchg_rtn_b32;
6709 op64_rtn = aco_opcode::ds_wrxchg_rtn_b64;
6710 break;
6711 case nir_intrinsic_shared_atomic_comp_swap:
6712 op32 = aco_opcode::ds_cmpst_b32;
6713 op64 = aco_opcode::ds_cmpst_b64;
6714 op32_rtn = aco_opcode::ds_cmpst_rtn_b32;
6715 op64_rtn = aco_opcode::ds_cmpst_rtn_b64;
6716 num_operands = 4;
6717 break;
6718 default:
6719 unreachable("Unhandled shared atomic intrinsic");
6720 }
6721
6722 /* return the previous value if dest is ever used */
6723 bool return_previous = false;
6724 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
6725 return_previous = true;
6726 break;
6727 }
6728 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
6729 return_previous = true;
6730 break;
6731 }
6732
6733 aco_opcode op;
6734 if (data.size() == 1) {
6735 assert(instr->dest.ssa.bit_size == 32);
6736 op = return_previous ? op32_rtn : op32;
6737 } else {
6738 assert(instr->dest.ssa.bit_size == 64);
6739 op = return_previous ? op64_rtn : op64;
6740 }
6741
6742 if (offset > 65535) {
6743 address = bld.vadd32(bld.def(v1), Operand(offset), address);
6744 offset = 0;
6745 }
6746
6747 aco_ptr<DS_instruction> ds;
6748 ds.reset(create_instruction<DS_instruction>(op, Format::DS, num_operands, return_previous ? 1 : 0));
6749 ds->operands[0] = Operand(address);
6750 ds->operands[1] = Operand(data);
6751 if (num_operands == 4)
6752 ds->operands[2] = Operand(get_ssa_temp(ctx, instr->src[2].ssa));
6753 ds->operands[num_operands - 1] = m;
6754 ds->offset0 = offset;
6755 if (return_previous)
6756 ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->dest.ssa));
6757 ctx->block->instructions.emplace_back(std::move(ds));
6758 }
6759
6760 Temp get_scratch_resource(isel_context *ctx)
6761 {
6762 Builder bld(ctx->program, ctx->block);
6763 Temp scratch_addr = ctx->program->private_segment_buffer;
6764 if (ctx->stage != compute_cs)
6765 scratch_addr = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), scratch_addr, Operand(0u));
6766
6767 uint32_t rsrc_conf = S_008F0C_ADD_TID_ENABLE(1) |
6768 S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);;
6769
6770 if (ctx->program->chip_class >= GFX10) {
6771 rsrc_conf |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
6772 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
6773 S_008F0C_RESOURCE_LEVEL(1);
6774 } else if (ctx->program->chip_class <= GFX7) { /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
6775 rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
6776 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
6777 }
6778
6779 /* older generations need element size = 16 bytes. element size removed in GFX9 */
6780 if (ctx->program->chip_class <= GFX8)
6781 rsrc_conf |= S_008F0C_ELEMENT_SIZE(3);
6782
6783 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), scratch_addr, Operand(-1u), Operand(rsrc_conf));
6784 }
6785
6786 void visit_load_scratch(isel_context *ctx, nir_intrinsic_instr *instr) {
6787 Builder bld(ctx->program, ctx->block);
6788 Temp rsrc = get_scratch_resource(ctx);
6789 Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6790 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6791
6792 LoadEmitInfo info = {Operand(offset), dst, instr->dest.ssa.num_components,
6793 instr->dest.ssa.bit_size / 8u, rsrc};
6794 info.align_mul = nir_intrinsic_align_mul(instr);
6795 info.align_offset = nir_intrinsic_align_offset(instr);
6796 info.swizzle_component_size = 16;
6797 info.can_reorder = false;
6798 info.soffset = ctx->program->scratch_offset;
6799 emit_mubuf_load(ctx, bld, &info);
6800 }
6801
6802 void visit_store_scratch(isel_context *ctx, nir_intrinsic_instr *instr) {
6803 Builder bld(ctx->program, ctx->block);
6804 Temp rsrc = get_scratch_resource(ctx);
6805 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6806 Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6807
6808 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6809 unsigned writemask = widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
6810
6811 unsigned write_count = 0;
6812 Temp write_datas[32];
6813 unsigned offsets[32];
6814 split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask,
6815 16, &write_count, write_datas, offsets);
6816
6817 for (unsigned i = 0; i < write_count; i++) {
6818 aco_opcode op = get_buffer_store_op(false, write_datas[i].bytes());
6819 bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset, write_datas[i], offsets[i], true);
6820 }
6821 }
6822
6823 void visit_load_sample_mask_in(isel_context *ctx, nir_intrinsic_instr *instr) {
6824 uint8_t log2_ps_iter_samples;
6825 if (ctx->program->info->ps.force_persample) {
6826 log2_ps_iter_samples =
6827 util_logbase2(ctx->options->key.fs.num_samples);
6828 } else {
6829 log2_ps_iter_samples = ctx->options->key.fs.log2_ps_iter_samples;
6830 }
6831
6832 /* The bit pattern matches that used by fixed function fragment
6833 * processing. */
6834 static const unsigned ps_iter_masks[] = {
6835 0xffff, /* not used */
6836 0x5555,
6837 0x1111,
6838 0x0101,
6839 0x0001,
6840 };
6841 assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
6842
6843 Builder bld(ctx->program, ctx->block);
6844
6845 Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
6846 get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u));
6847 Temp ps_iter_mask = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(ps_iter_masks[log2_ps_iter_samples]));
6848 Temp mask = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), sample_id, ps_iter_mask);
6849 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6850 bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, get_arg(ctx, ctx->args->ac.sample_coverage));
6851 }
6852
6853 void visit_emit_vertex_with_counter(isel_context *ctx, nir_intrinsic_instr *instr) {
6854 Builder bld(ctx->program, ctx->block);
6855
6856 unsigned stream = nir_intrinsic_stream_id(instr);
6857 Temp next_vertex = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6858 next_vertex = bld.v_mul_imm(bld.def(v1), next_vertex, 4u);
6859 nir_const_value *next_vertex_cv = nir_src_as_const_value(instr->src[0]);
6860
6861 /* get GSVS ring */
6862 Temp gsvs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_GSVS_GS * 16u));
6863
6864 unsigned num_components =
6865 ctx->program->info->gs.num_stream_output_components[stream];
6866 assert(num_components);
6867
6868 unsigned stride = 4u * num_components * ctx->shader->info.gs.vertices_out;
6869 unsigned stream_offset = 0;
6870 for (unsigned i = 0; i < stream; i++) {
6871 unsigned prev_stride = 4u * ctx->program->info->gs.num_stream_output_components[i] * ctx->shader->info.gs.vertices_out;
6872 stream_offset += prev_stride * ctx->program->wave_size;
6873 }
6874
6875 /* Limit on the stride field for <= GFX7. */
6876 assert(stride < (1 << 14));
6877
6878 Temp gsvs_dwords[4];
6879 for (unsigned i = 0; i < 4; i++)
6880 gsvs_dwords[i] = bld.tmp(s1);
6881 bld.pseudo(aco_opcode::p_split_vector,
6882 Definition(gsvs_dwords[0]),
6883 Definition(gsvs_dwords[1]),
6884 Definition(gsvs_dwords[2]),
6885 Definition(gsvs_dwords[3]),
6886 gsvs_ring);
6887
6888 if (stream_offset) {
6889 Temp stream_offset_tmp = bld.copy(bld.def(s1), Operand(stream_offset));
6890
6891 Temp carry = bld.tmp(s1);
6892 gsvs_dwords[0] = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), gsvs_dwords[0], stream_offset_tmp);
6893 gsvs_dwords[1] = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), gsvs_dwords[1], Operand(0u), bld.scc(carry));
6894 }
6895
6896 gsvs_dwords[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), gsvs_dwords[1], Operand(S_008F04_STRIDE(stride)));
6897 gsvs_dwords[2] = bld.copy(bld.def(s1), Operand((uint32_t)ctx->program->wave_size));
6898
6899 gsvs_ring = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
6900 gsvs_dwords[0], gsvs_dwords[1], gsvs_dwords[2], gsvs_dwords[3]);
6901
6902 unsigned offset = 0;
6903 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; i++) {
6904 if (ctx->program->info->gs.output_streams[i] != stream)
6905 continue;
6906
6907 for (unsigned j = 0; j < 4; j++) {
6908 if (!(ctx->program->info->gs.output_usage_mask[i] & (1 << j)))
6909 continue;
6910
6911 if (ctx->outputs.mask[i] & (1 << j)) {
6912 Operand vaddr_offset = next_vertex_cv ? Operand(v1) : Operand(next_vertex);
6913 unsigned const_offset = (offset + (next_vertex_cv ? next_vertex_cv->u32 : 0u)) * 4u;
6914 if (const_offset >= 4096u) {
6915 if (vaddr_offset.isUndefined())
6916 vaddr_offset = bld.copy(bld.def(v1), Operand(const_offset / 4096u * 4096u));
6917 else
6918 vaddr_offset = bld.vadd32(bld.def(v1), Operand(const_offset / 4096u * 4096u), vaddr_offset);
6919 const_offset %= 4096u;
6920 }
6921
6922 aco_ptr<MTBUF_instruction> mtbuf{create_instruction<MTBUF_instruction>(aco_opcode::tbuffer_store_format_x, Format::MTBUF, 4, 0)};
6923 mtbuf->operands[0] = Operand(gsvs_ring);
6924 mtbuf->operands[1] = vaddr_offset;
6925 mtbuf->operands[2] = Operand(get_arg(ctx, ctx->args->gs2vs_offset));
6926 mtbuf->operands[3] = Operand(ctx->outputs.temps[i * 4u + j]);
6927 mtbuf->offen = !vaddr_offset.isUndefined();
6928 mtbuf->dfmt = V_008F0C_BUF_DATA_FORMAT_32;
6929 mtbuf->nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
6930 mtbuf->offset = const_offset;
6931 mtbuf->glc = true;
6932 mtbuf->slc = true;
6933 mtbuf->barrier = barrier_gs_data;
6934 mtbuf->can_reorder = true;
6935 bld.insert(std::move(mtbuf));
6936 }
6937
6938 offset += ctx->shader->info.gs.vertices_out;
6939 }
6940
6941 /* outputs for the next vertex are undefined and keeping them around can
6942 * create invalid IR with control flow */
6943 ctx->outputs.mask[i] = 0;
6944 }
6945
6946 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1, sendmsg_gs(false, true, stream));
6947 }
6948
6949 Temp emit_boolean_reduce(isel_context *ctx, nir_op op, unsigned cluster_size, Temp src)
6950 {
6951 Builder bld(ctx->program, ctx->block);
6952
6953 if (cluster_size == 1) {
6954 return src;
6955 } if (op == nir_op_iand && cluster_size == 4) {
6956 //subgroupClusteredAnd(val, 4) -> ~wqm(exec & ~val)
6957 Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
6958 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc),
6959 bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc), tmp));
6960 } else if (op == nir_op_ior && cluster_size == 4) {
6961 //subgroupClusteredOr(val, 4) -> wqm(val & exec)
6962 return bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc),
6963 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)));
6964 } else if (op == nir_op_iand && cluster_size == ctx->program->wave_size) {
6965 //subgroupAnd(val) -> (exec & ~val) == 0
6966 Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp();
6967 Temp cond = bool_to_vector_condition(ctx, emit_wqm(ctx, tmp));
6968 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), cond);
6969 } else if (op == nir_op_ior && cluster_size == ctx->program->wave_size) {
6970 //subgroupOr(val) -> (val & exec) != 0
6971 Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)).def(1).getTemp();
6972 return bool_to_vector_condition(ctx, tmp);
6973 } else if (op == nir_op_ixor && cluster_size == ctx->program->wave_size) {
6974 //subgroupXor(val) -> s_bcnt1_i32_b64(val & exec) & 1
6975 Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
6976 tmp = bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), tmp);
6977 tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), tmp, Operand(1u)).def(1).getTemp();
6978 return bool_to_vector_condition(ctx, tmp);
6979 } else {
6980 //subgroupClustered{And,Or,Xor}(val, n) ->
6981 //lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0)) ; just v_mbcnt_lo_u32_b32 on wave32
6982 //cluster_offset = ~(n - 1) & lane_id
6983 //cluster_mask = ((1 << n) - 1)
6984 //subgroupClusteredAnd():
6985 // return ((val | ~exec) >> cluster_offset) & cluster_mask == cluster_mask
6986 //subgroupClusteredOr():
6987 // return ((val & exec) >> cluster_offset) & cluster_mask != 0
6988 //subgroupClusteredXor():
6989 // return v_bnt_u32_b32(((val & exec) >> cluster_offset) & cluster_mask, 0) & 1 != 0
6990 Temp lane_id = emit_mbcnt(ctx, bld.def(v1));
6991 Temp cluster_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(~uint32_t(cluster_size - 1)), lane_id);
6992
6993 Temp tmp;
6994 if (op == nir_op_iand)
6995 tmp = bld.sop2(Builder::s_orn2, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
6996 else
6997 tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
6998
6999 uint32_t cluster_mask = cluster_size == 32 ? -1 : (1u << cluster_size) - 1u;
7000
7001 if (ctx->program->chip_class <= GFX7)
7002 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), tmp, cluster_offset);
7003 else if (ctx->program->wave_size == 64)
7004 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp);
7005 else
7006 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), cluster_offset, tmp);
7007 tmp = emit_extract_vector(ctx, tmp, 0, v1);
7008 if (cluster_mask != 0xffffffff)
7009 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(cluster_mask), tmp);
7010
7011 Definition cmp_def = Definition();
7012 if (op == nir_op_iand) {
7013 cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(cluster_mask), tmp).def(0);
7014 } else if (op == nir_op_ior) {
7015 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0);
7016 } else if (op == nir_op_ixor) {
7017 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u),
7018 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1), tmp, Operand(0u)));
7019 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0);
7020 }
7021 cmp_def.setHint(vcc);
7022 return cmp_def.getTemp();
7023 }
7024 }
7025
7026 Temp emit_boolean_exclusive_scan(isel_context *ctx, nir_op op, Temp src)
7027 {
7028 Builder bld(ctx->program, ctx->block);
7029
7030 //subgroupExclusiveAnd(val) -> mbcnt(exec & ~val) == 0
7031 //subgroupExclusiveOr(val) -> mbcnt(val & exec) != 0
7032 //subgroupExclusiveXor(val) -> mbcnt(val & exec) & 1 != 0
7033 Temp tmp;
7034 if (op == nir_op_iand)
7035 tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
7036 else
7037 tmp = bld.sop2(Builder::s_and, bld.def(s2), bld.def(s1, scc), src, Operand(exec, bld.lm));
7038
7039 Builder::Result lohi = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), tmp);
7040 Temp lo = lohi.def(0).getTemp();
7041 Temp hi = lohi.def(1).getTemp();
7042 Temp mbcnt = emit_mbcnt(ctx, bld.def(v1), Operand(lo), Operand(hi));
7043
7044 Definition cmp_def = Definition();
7045 if (op == nir_op_iand)
7046 cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0);
7047 else if (op == nir_op_ior)
7048 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0);
7049 else if (op == nir_op_ixor)
7050 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u),
7051 bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), mbcnt)).def(0);
7052 cmp_def.setHint(vcc);
7053 return cmp_def.getTemp();
7054 }
7055
7056 Temp emit_boolean_inclusive_scan(isel_context *ctx, nir_op op, Temp src)
7057 {
7058 Builder bld(ctx->program, ctx->block);
7059
7060 //subgroupInclusiveAnd(val) -> subgroupExclusiveAnd(val) && val
7061 //subgroupInclusiveOr(val) -> subgroupExclusiveOr(val) || val
7062 //subgroupInclusiveXor(val) -> subgroupExclusiveXor(val) ^^ val
7063 Temp tmp = emit_boolean_exclusive_scan(ctx, op, src);
7064 if (op == nir_op_iand)
7065 return bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7066 else if (op == nir_op_ior)
7067 return bld.sop2(Builder::s_or, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7068 else if (op == nir_op_ixor)
7069 return bld.sop2(Builder::s_xor, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
7070
7071 assert(false);
7072 return Temp();
7073 }
7074
7075 void emit_uniform_subgroup(isel_context *ctx, nir_intrinsic_instr *instr, Temp src)
7076 {
7077 Builder bld(ctx->program, ctx->block);
7078 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
7079 if (src.regClass().type() == RegType::vgpr) {
7080 bld.pseudo(aco_opcode::p_as_uniform, dst, src);
7081 } else if (src.regClass() == s1) {
7082 bld.sop1(aco_opcode::s_mov_b32, dst, src);
7083 } else if (src.regClass() == s2) {
7084 bld.sop1(aco_opcode::s_mov_b64, dst, src);
7085 } else {
7086 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7087 nir_print_instr(&instr->instr, stderr);
7088 fprintf(stderr, "\n");
7089 }
7090 }
7091
7092 void emit_interp_center(isel_context *ctx, Temp dst, Temp pos1, Temp pos2)
7093 {
7094 Builder bld(ctx->program, ctx->block);
7095 Temp persp_center = get_arg(ctx, ctx->args->ac.persp_center);
7096 Temp p1 = emit_extract_vector(ctx, persp_center, 0, v1);
7097 Temp p2 = emit_extract_vector(ctx, persp_center, 1, v1);
7098
7099 Temp ddx_1, ddx_2, ddy_1, ddy_2;
7100 uint32_t dpp_ctrl0 = dpp_quad_perm(0, 0, 0, 0);
7101 uint32_t dpp_ctrl1 = dpp_quad_perm(1, 1, 1, 1);
7102 uint32_t dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
7103
7104 /* Build DD X/Y */
7105 if (ctx->program->chip_class >= GFX8) {
7106 Temp tl_1 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p1, dpp_ctrl0);
7107 ddx_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl1);
7108 ddy_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl2);
7109 Temp tl_2 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p2, dpp_ctrl0);
7110 ddx_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl1);
7111 ddy_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl2);
7112 } else {
7113 Temp tl_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl0);
7114 ddx_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl1);
7115 ddx_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_1, tl_1);
7116 ddx_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl2);
7117 ddx_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_2, tl_1);
7118 Temp tl_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl0);
7119 ddy_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl1);
7120 ddy_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_1, tl_2);
7121 ddy_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl2);
7122 ddy_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_2, tl_2);
7123 }
7124
7125 /* res_k = p_k + ddx_k * pos1 + ddy_k * pos2 */
7126 Temp tmp1 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddx_1, pos1, p1);
7127 Temp tmp2 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddx_2, pos1, p2);
7128 tmp1 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddy_1, pos2, tmp1);
7129 tmp2 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddy_2, pos2, tmp2);
7130 Temp wqm1 = bld.tmp(v1);
7131 emit_wqm(ctx, tmp1, wqm1, true);
7132 Temp wqm2 = bld.tmp(v1);
7133 emit_wqm(ctx, tmp2, wqm2, true);
7134 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), wqm1, wqm2);
7135 return;
7136 }
7137
7138 void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
7139 {
7140 Builder bld(ctx->program, ctx->block);
7141 switch(instr->intrinsic) {
7142 case nir_intrinsic_load_barycentric_sample:
7143 case nir_intrinsic_load_barycentric_pixel:
7144 case nir_intrinsic_load_barycentric_centroid: {
7145 glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr);
7146 Temp bary = Temp(0, s2);
7147 switch (mode) {
7148 case INTERP_MODE_SMOOTH:
7149 case INTERP_MODE_NONE:
7150 if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel)
7151 bary = get_arg(ctx, ctx->args->ac.persp_center);
7152 else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
7153 bary = ctx->persp_centroid;
7154 else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
7155 bary = get_arg(ctx, ctx->args->ac.persp_sample);
7156 break;
7157 case INTERP_MODE_NOPERSPECTIVE:
7158 if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel)
7159 bary = get_arg(ctx, ctx->args->ac.linear_center);
7160 else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
7161 bary = ctx->linear_centroid;
7162 else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
7163 bary = get_arg(ctx, ctx->args->ac.linear_sample);
7164 break;
7165 default:
7166 break;
7167 }
7168 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7169 Temp p1 = emit_extract_vector(ctx, bary, 0, v1);
7170 Temp p2 = emit_extract_vector(ctx, bary, 1, v1);
7171 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
7172 Operand(p1), Operand(p2));
7173 emit_split_vector(ctx, dst, 2);
7174 break;
7175 }
7176 case nir_intrinsic_load_barycentric_model: {
7177 Temp model = get_arg(ctx, ctx->args->ac.pull_model);
7178
7179 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7180 Temp p1 = emit_extract_vector(ctx, model, 0, v1);
7181 Temp p2 = emit_extract_vector(ctx, model, 1, v1);
7182 Temp p3 = emit_extract_vector(ctx, model, 2, v1);
7183 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
7184 Operand(p1), Operand(p2), Operand(p3));
7185 emit_split_vector(ctx, dst, 3);
7186 break;
7187 }
7188 case nir_intrinsic_load_barycentric_at_sample: {
7189 uint32_t sample_pos_offset = RING_PS_SAMPLE_POSITIONS * 16;
7190 switch (ctx->options->key.fs.num_samples) {
7191 case 2: sample_pos_offset += 1 << 3; break;
7192 case 4: sample_pos_offset += 3 << 3; break;
7193 case 8: sample_pos_offset += 7 << 3; break;
7194 default: break;
7195 }
7196 Temp sample_pos;
7197 Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
7198 nir_const_value* const_addr = nir_src_as_const_value(instr->src[0]);
7199 Temp private_segment_buffer = ctx->program->private_segment_buffer;
7200 if (addr.type() == RegType::sgpr) {
7201 Operand offset;
7202 if (const_addr) {
7203 sample_pos_offset += const_addr->u32 << 3;
7204 offset = Operand(sample_pos_offset);
7205 } else if (ctx->options->chip_class >= GFX9) {
7206 offset = bld.sop2(aco_opcode::s_lshl3_add_u32, bld.def(s1), bld.def(s1, scc), addr, Operand(sample_pos_offset));
7207 } else {
7208 offset = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), addr, Operand(3u));
7209 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), addr, Operand(sample_pos_offset));
7210 }
7211
7212 Operand off = bld.copy(bld.def(s1), Operand(offset));
7213 sample_pos = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, off);
7214
7215 } else if (ctx->options->chip_class >= GFX9) {
7216 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(3u), addr);
7217 sample_pos = bld.global(aco_opcode::global_load_dwordx2, bld.def(v2), addr, private_segment_buffer, sample_pos_offset);
7218 } else if (ctx->options->chip_class >= GFX7) {
7219 /* addr += private_segment_buffer + sample_pos_offset */
7220 Temp tmp0 = bld.tmp(s1);
7221 Temp tmp1 = bld.tmp(s1);
7222 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp0), Definition(tmp1), private_segment_buffer);
7223 Definition scc_tmp = bld.def(s1, scc);
7224 tmp0 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), scc_tmp, tmp0, Operand(sample_pos_offset));
7225 tmp1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), tmp1, Operand(0u), bld.scc(scc_tmp.getTemp()));
7226 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(3u), addr);
7227 Temp pck0 = bld.tmp(v1);
7228 Temp carry = bld.vadd32(Definition(pck0), tmp0, addr, true).def(1).getTemp();
7229 tmp1 = as_vgpr(ctx, tmp1);
7230 Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.hint_vcc(bld.def(bld.lm)), tmp1, Operand(0u), carry);
7231 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), pck0, pck1);
7232
7233 /* sample_pos = flat_load_dwordx2 addr */
7234 sample_pos = bld.flat(aco_opcode::flat_load_dwordx2, bld.def(v2), addr, Operand(s1));
7235 } else {
7236 assert(ctx->options->chip_class == GFX6);
7237
7238 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
7239 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
7240 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer, Operand(0u), Operand(rsrc_conf));
7241
7242 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(3u), addr);
7243 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), addr, Operand(0u));
7244
7245 sample_pos = bld.tmp(v2);
7246
7247 aco_ptr<MUBUF_instruction> load{create_instruction<MUBUF_instruction>(aco_opcode::buffer_load_dwordx2, Format::MUBUF, 3, 1)};
7248 load->definitions[0] = Definition(sample_pos);
7249 load->operands[0] = Operand(rsrc);
7250 load->operands[1] = Operand(addr);
7251 load->operands[2] = Operand(0u);
7252 load->offset = sample_pos_offset;
7253 load->offen = 0;
7254 load->addr64 = true;
7255 load->glc = false;
7256 load->dlc = false;
7257 load->disable_wqm = false;
7258 load->barrier = barrier_none;
7259 load->can_reorder = true;
7260 ctx->block->instructions.emplace_back(std::move(load));
7261 }
7262
7263 /* sample_pos -= 0.5 */
7264 Temp pos1 = bld.tmp(RegClass(sample_pos.type(), 1));
7265 Temp pos2 = bld.tmp(RegClass(sample_pos.type(), 1));
7266 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), sample_pos);
7267 pos1 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos1, Operand(0x3f000000u));
7268 pos2 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos2, Operand(0x3f000000u));
7269
7270 emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), pos1, pos2);
7271 break;
7272 }
7273 case nir_intrinsic_load_barycentric_at_offset: {
7274 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
7275 RegClass rc = RegClass(offset.type(), 1);
7276 Temp pos1 = bld.tmp(rc), pos2 = bld.tmp(rc);
7277 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), offset);
7278 emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), pos1, pos2);
7279 break;
7280 }
7281 case nir_intrinsic_load_front_face: {
7282 bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7283 Operand(0u), get_arg(ctx, ctx->args->ac.front_face)).def(0).setHint(vcc);
7284 break;
7285 }
7286 case nir_intrinsic_load_view_index: {
7287 if (ctx->stage & (sw_vs | sw_gs | sw_tcs | sw_tes)) {
7288 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7289 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.view_index)));
7290 break;
7291 }
7292
7293 /* fallthrough */
7294 }
7295 case nir_intrinsic_load_layer_id: {
7296 unsigned idx = nir_intrinsic_base(instr);
7297 bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7298 Operand(2u), bld.m0(get_arg(ctx, ctx->args->ac.prim_mask)), idx, 0);
7299 break;
7300 }
7301 case nir_intrinsic_load_frag_coord: {
7302 emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 4);
7303 break;
7304 }
7305 case nir_intrinsic_load_sample_pos: {
7306 Temp posx = get_arg(ctx, ctx->args->ac.frag_pos[0]);
7307 Temp posy = get_arg(ctx, ctx->args->ac.frag_pos[1]);
7308 bld.pseudo(aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7309 posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand(0u),
7310 posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand(0u));
7311 break;
7312 }
7313 case nir_intrinsic_load_tess_coord:
7314 visit_load_tess_coord(ctx, instr);
7315 break;
7316 case nir_intrinsic_load_interpolated_input:
7317 visit_load_interpolated_input(ctx, instr);
7318 break;
7319 case nir_intrinsic_store_output:
7320 visit_store_output(ctx, instr);
7321 break;
7322 case nir_intrinsic_load_input:
7323 case nir_intrinsic_load_input_vertex:
7324 visit_load_input(ctx, instr);
7325 break;
7326 case nir_intrinsic_load_output:
7327 visit_load_output(ctx, instr);
7328 break;
7329 case nir_intrinsic_load_per_vertex_input:
7330 visit_load_per_vertex_input(ctx, instr);
7331 break;
7332 case nir_intrinsic_load_per_vertex_output:
7333 visit_load_per_vertex_output(ctx, instr);
7334 break;
7335 case nir_intrinsic_store_per_vertex_output:
7336 visit_store_per_vertex_output(ctx, instr);
7337 break;
7338 case nir_intrinsic_load_ubo:
7339 visit_load_ubo(ctx, instr);
7340 break;
7341 case nir_intrinsic_load_push_constant:
7342 visit_load_push_constant(ctx, instr);
7343 break;
7344 case nir_intrinsic_load_constant:
7345 visit_load_constant(ctx, instr);
7346 break;
7347 case nir_intrinsic_vulkan_resource_index:
7348 visit_load_resource(ctx, instr);
7349 break;
7350 case nir_intrinsic_discard:
7351 visit_discard(ctx, instr);
7352 break;
7353 case nir_intrinsic_discard_if:
7354 visit_discard_if(ctx, instr);
7355 break;
7356 case nir_intrinsic_load_shared:
7357 visit_load_shared(ctx, instr);
7358 break;
7359 case nir_intrinsic_store_shared:
7360 visit_store_shared(ctx, instr);
7361 break;
7362 case nir_intrinsic_shared_atomic_add:
7363 case nir_intrinsic_shared_atomic_imin:
7364 case nir_intrinsic_shared_atomic_umin:
7365 case nir_intrinsic_shared_atomic_imax:
7366 case nir_intrinsic_shared_atomic_umax:
7367 case nir_intrinsic_shared_atomic_and:
7368 case nir_intrinsic_shared_atomic_or:
7369 case nir_intrinsic_shared_atomic_xor:
7370 case nir_intrinsic_shared_atomic_exchange:
7371 case nir_intrinsic_shared_atomic_comp_swap:
7372 visit_shared_atomic(ctx, instr);
7373 break;
7374 case nir_intrinsic_image_deref_load:
7375 visit_image_load(ctx, instr);
7376 break;
7377 case nir_intrinsic_image_deref_store:
7378 visit_image_store(ctx, instr);
7379 break;
7380 case nir_intrinsic_image_deref_atomic_add:
7381 case nir_intrinsic_image_deref_atomic_umin:
7382 case nir_intrinsic_image_deref_atomic_imin:
7383 case nir_intrinsic_image_deref_atomic_umax:
7384 case nir_intrinsic_image_deref_atomic_imax:
7385 case nir_intrinsic_image_deref_atomic_and:
7386 case nir_intrinsic_image_deref_atomic_or:
7387 case nir_intrinsic_image_deref_atomic_xor:
7388 case nir_intrinsic_image_deref_atomic_exchange:
7389 case nir_intrinsic_image_deref_atomic_comp_swap:
7390 visit_image_atomic(ctx, instr);
7391 break;
7392 case nir_intrinsic_image_deref_size:
7393 visit_image_size(ctx, instr);
7394 break;
7395 case nir_intrinsic_load_ssbo:
7396 visit_load_ssbo(ctx, instr);
7397 break;
7398 case nir_intrinsic_store_ssbo:
7399 visit_store_ssbo(ctx, instr);
7400 break;
7401 case nir_intrinsic_load_global:
7402 visit_load_global(ctx, instr);
7403 break;
7404 case nir_intrinsic_store_global:
7405 visit_store_global(ctx, instr);
7406 break;
7407 case nir_intrinsic_global_atomic_add:
7408 case nir_intrinsic_global_atomic_imin:
7409 case nir_intrinsic_global_atomic_umin:
7410 case nir_intrinsic_global_atomic_imax:
7411 case nir_intrinsic_global_atomic_umax:
7412 case nir_intrinsic_global_atomic_and:
7413 case nir_intrinsic_global_atomic_or:
7414 case nir_intrinsic_global_atomic_xor:
7415 case nir_intrinsic_global_atomic_exchange:
7416 case nir_intrinsic_global_atomic_comp_swap:
7417 visit_global_atomic(ctx, instr);
7418 break;
7419 case nir_intrinsic_ssbo_atomic_add:
7420 case nir_intrinsic_ssbo_atomic_imin:
7421 case nir_intrinsic_ssbo_atomic_umin:
7422 case nir_intrinsic_ssbo_atomic_imax:
7423 case nir_intrinsic_ssbo_atomic_umax:
7424 case nir_intrinsic_ssbo_atomic_and:
7425 case nir_intrinsic_ssbo_atomic_or:
7426 case nir_intrinsic_ssbo_atomic_xor:
7427 case nir_intrinsic_ssbo_atomic_exchange:
7428 case nir_intrinsic_ssbo_atomic_comp_swap:
7429 visit_atomic_ssbo(ctx, instr);
7430 break;
7431 case nir_intrinsic_load_scratch:
7432 visit_load_scratch(ctx, instr);
7433 break;
7434 case nir_intrinsic_store_scratch:
7435 visit_store_scratch(ctx, instr);
7436 break;
7437 case nir_intrinsic_get_buffer_size:
7438 visit_get_buffer_size(ctx, instr);
7439 break;
7440 case nir_intrinsic_control_barrier: {
7441 if (ctx->program->chip_class == GFX6 && ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
7442 /* GFX6 only (thanks to a hw bug workaround):
7443 * The real barrier instruction isn’t needed, because an entire patch
7444 * always fits into a single wave.
7445 */
7446 break;
7447 }
7448
7449 if (ctx->program->workgroup_size > ctx->program->wave_size)
7450 bld.sopp(aco_opcode::s_barrier);
7451
7452 break;
7453 }
7454 case nir_intrinsic_memory_barrier_tcs_patch:
7455 case nir_intrinsic_group_memory_barrier:
7456 case nir_intrinsic_memory_barrier:
7457 case nir_intrinsic_memory_barrier_buffer:
7458 case nir_intrinsic_memory_barrier_image:
7459 case nir_intrinsic_memory_barrier_shared:
7460 emit_memory_barrier(ctx, instr);
7461 break;
7462 case nir_intrinsic_load_num_work_groups: {
7463 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7464 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.num_work_groups)));
7465 emit_split_vector(ctx, dst, 3);
7466 break;
7467 }
7468 case nir_intrinsic_load_local_invocation_id: {
7469 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7470 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.local_invocation_ids)));
7471 emit_split_vector(ctx, dst, 3);
7472 break;
7473 }
7474 case nir_intrinsic_load_work_group_id: {
7475 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7476 struct ac_arg *args = ctx->args->ac.workgroup_ids;
7477 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
7478 args[0].used ? Operand(get_arg(ctx, args[0])) : Operand(0u),
7479 args[1].used ? Operand(get_arg(ctx, args[1])) : Operand(0u),
7480 args[2].used ? Operand(get_arg(ctx, args[2])) : Operand(0u));
7481 emit_split_vector(ctx, dst, 3);
7482 break;
7483 }
7484 case nir_intrinsic_load_local_invocation_index: {
7485 Temp id = emit_mbcnt(ctx, bld.def(v1));
7486
7487 /* The tg_size bits [6:11] contain the subgroup id,
7488 * we need this multiplied by the wave size, and then OR the thread id to it.
7489 */
7490 if (ctx->program->wave_size == 64) {
7491 /* After the s_and the bits are already multiplied by 64 (left shifted by 6) so we can just feed that to v_or */
7492 Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u),
7493 get_arg(ctx, ctx->args->ac.tg_size));
7494 bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, id);
7495 } else {
7496 /* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */
7497 Temp tg_num = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
7498 get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16)));
7499 bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, Operand(0x5u), id);
7500 }
7501 break;
7502 }
7503 case nir_intrinsic_load_subgroup_id: {
7504 if (ctx->stage == compute_cs) {
7505 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc),
7506 get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16)));
7507 } else {
7508 bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x0u));
7509 }
7510 break;
7511 }
7512 case nir_intrinsic_load_subgroup_invocation: {
7513 emit_mbcnt(ctx, Definition(get_ssa_temp(ctx, &instr->dest.ssa)));
7514 break;
7515 }
7516 case nir_intrinsic_load_num_subgroups: {
7517 if (ctx->stage == compute_cs)
7518 bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), Operand(0x3fu),
7519 get_arg(ctx, ctx->args->ac.tg_size));
7520 else
7521 bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x1u));
7522 break;
7523 }
7524 case nir_intrinsic_ballot: {
7525 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7526 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7527 Definition tmp = bld.def(dst.regClass());
7528 Definition lanemask_tmp = dst.size() == bld.lm.size() ? tmp : bld.def(src.regClass());
7529 if (instr->src[0].ssa->bit_size == 1) {
7530 assert(src.regClass() == bld.lm);
7531 bld.sop2(Builder::s_and, lanemask_tmp, bld.def(s1, scc), Operand(exec, bld.lm), src);
7532 } else if (instr->src[0].ssa->bit_size == 32 && src.regClass() == v1) {
7533 bld.vopc(aco_opcode::v_cmp_lg_u32, lanemask_tmp, Operand(0u), src);
7534 } else if (instr->src[0].ssa->bit_size == 64 && src.regClass() == v2) {
7535 bld.vopc(aco_opcode::v_cmp_lg_u64, lanemask_tmp, Operand(0u), src);
7536 } else {
7537 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7538 nir_print_instr(&instr->instr, stderr);
7539 fprintf(stderr, "\n");
7540 }
7541 if (dst.size() != bld.lm.size()) {
7542 /* Wave32 with ballot size set to 64 */
7543 bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), lanemask_tmp.getTemp(), Operand(0u));
7544 }
7545 emit_wqm(ctx, tmp.getTemp(), dst);
7546 break;
7547 }
7548 case nir_intrinsic_shuffle:
7549 case nir_intrinsic_read_invocation: {
7550 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7551 if (!nir_src_is_divergent(instr->src[0])) {
7552 emit_uniform_subgroup(ctx, instr, src);
7553 } else {
7554 Temp tid = get_ssa_temp(ctx, instr->src[1].ssa);
7555 if (instr->intrinsic == nir_intrinsic_read_invocation || !nir_src_is_divergent(instr->src[1]))
7556 tid = bld.as_uniform(tid);
7557 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7558 if (src.regClass() == v1b || src.regClass() == v2b) {
7559 Temp tmp = bld.tmp(v1);
7560 tmp = emit_wqm(ctx, emit_bpermute(ctx, bld, tid, src), tmp);
7561 if (dst.type() == RegType::vgpr)
7562 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(src.regClass() == v1b ? v3b : v2b), tmp);
7563 else
7564 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
7565 } else if (src.regClass() == v1) {
7566 emit_wqm(ctx, emit_bpermute(ctx, bld, tid, src), dst);
7567 } else if (src.regClass() == v2) {
7568 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7569 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7570 lo = emit_wqm(ctx, emit_bpermute(ctx, bld, tid, lo));
7571 hi = emit_wqm(ctx, emit_bpermute(ctx, bld, tid, hi));
7572 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7573 emit_split_vector(ctx, dst, 2);
7574 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) {
7575 assert(src.regClass() == bld.lm);
7576 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid);
7577 bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
7578 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) {
7579 assert(src.regClass() == bld.lm);
7580 Temp tmp;
7581 if (ctx->program->chip_class <= GFX7)
7582 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), src, tid);
7583 else if (ctx->program->wave_size == 64)
7584 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src);
7585 else
7586 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), tid, src);
7587 tmp = emit_extract_vector(ctx, tmp, 0, v1);
7588 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), tmp);
7589 emit_wqm(ctx, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp), dst);
7590 } else {
7591 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7592 nir_print_instr(&instr->instr, stderr);
7593 fprintf(stderr, "\n");
7594 }
7595 }
7596 break;
7597 }
7598 case nir_intrinsic_load_sample_id: {
7599 bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7600 get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u));
7601 break;
7602 }
7603 case nir_intrinsic_load_sample_mask_in: {
7604 visit_load_sample_mask_in(ctx, instr);
7605 break;
7606 }
7607 case nir_intrinsic_read_first_invocation: {
7608 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7609 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7610 if (src.regClass() == v1b || src.regClass() == v2b || src.regClass() == v1) {
7611 emit_wqm(ctx,
7612 bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), src),
7613 dst);
7614 } else if (src.regClass() == v2) {
7615 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7616 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7617 lo = emit_wqm(ctx, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), lo));
7618 hi = emit_wqm(ctx, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), hi));
7619 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7620 emit_split_vector(ctx, dst, 2);
7621 } else if (instr->dest.ssa.bit_size == 1) {
7622 assert(src.regClass() == bld.lm);
7623 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src,
7624 bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)));
7625 bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
7626 } else if (src.regClass() == s1) {
7627 bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src);
7628 } else if (src.regClass() == s2) {
7629 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
7630 } else {
7631 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7632 nir_print_instr(&instr->instr, stderr);
7633 fprintf(stderr, "\n");
7634 }
7635 break;
7636 }
7637 case nir_intrinsic_vote_all: {
7638 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7639 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7640 assert(src.regClass() == bld.lm);
7641 assert(dst.regClass() == bld.lm);
7642
7643 Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp();
7644 Temp cond = bool_to_vector_condition(ctx, emit_wqm(ctx, tmp));
7645 bld.sop1(Builder::s_not, Definition(dst), bld.def(s1, scc), cond);
7646 break;
7647 }
7648 case nir_intrinsic_vote_any: {
7649 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7650 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7651 assert(src.regClass() == bld.lm);
7652 assert(dst.regClass() == bld.lm);
7653
7654 Temp tmp = bool_to_scalar_condition(ctx, src);
7655 bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
7656 break;
7657 }
7658 case nir_intrinsic_reduce:
7659 case nir_intrinsic_inclusive_scan:
7660 case nir_intrinsic_exclusive_scan: {
7661 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7662 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7663 nir_op op = (nir_op) nir_intrinsic_reduction_op(instr);
7664 unsigned cluster_size = instr->intrinsic == nir_intrinsic_reduce ?
7665 nir_intrinsic_cluster_size(instr) : 0;
7666 cluster_size = util_next_power_of_two(MIN2(cluster_size ? cluster_size : ctx->program->wave_size, ctx->program->wave_size));
7667
7668 if (!nir_src_is_divergent(instr->src[0]) && (op == nir_op_ior || op == nir_op_iand)) {
7669 emit_uniform_subgroup(ctx, instr, src);
7670 } else if (instr->dest.ssa.bit_size == 1) {
7671 if (op == nir_op_imul || op == nir_op_umin || op == nir_op_imin)
7672 op = nir_op_iand;
7673 else if (op == nir_op_iadd)
7674 op = nir_op_ixor;
7675 else if (op == nir_op_umax || op == nir_op_imax)
7676 op = nir_op_ior;
7677 assert(op == nir_op_iand || op == nir_op_ior || op == nir_op_ixor);
7678
7679 switch (instr->intrinsic) {
7680 case nir_intrinsic_reduce:
7681 emit_wqm(ctx, emit_boolean_reduce(ctx, op, cluster_size, src), dst);
7682 break;
7683 case nir_intrinsic_exclusive_scan:
7684 emit_wqm(ctx, emit_boolean_exclusive_scan(ctx, op, src), dst);
7685 break;
7686 case nir_intrinsic_inclusive_scan:
7687 emit_wqm(ctx, emit_boolean_inclusive_scan(ctx, op, src), dst);
7688 break;
7689 default:
7690 assert(false);
7691 }
7692 } else if (cluster_size == 1) {
7693 bld.copy(Definition(dst), src);
7694 } else {
7695 unsigned bit_size = instr->src[0].ssa->bit_size;
7696
7697 src = emit_extract_vector(ctx, src, 0, RegClass::get(RegType::vgpr, bit_size / 8));
7698
7699 ReduceOp reduce_op;
7700 switch (op) {
7701 #define CASEI(name) case nir_op_##name: reduce_op = (bit_size == 32) ? name##32 : (bit_size == 16) ? name##16 : (bit_size == 8) ? name##8 : name##64; break;
7702 #define CASEF(name) case nir_op_##name: reduce_op = (bit_size == 32) ? name##32 : (bit_size == 16) ? name##16 : name##64; break;
7703 CASEI(iadd)
7704 CASEI(imul)
7705 CASEI(imin)
7706 CASEI(umin)
7707 CASEI(imax)
7708 CASEI(umax)
7709 CASEI(iand)
7710 CASEI(ior)
7711 CASEI(ixor)
7712 CASEF(fadd)
7713 CASEF(fmul)
7714 CASEF(fmin)
7715 CASEF(fmax)
7716 default:
7717 unreachable("unknown reduction op");
7718 #undef CASEI
7719 #undef CASEF
7720 }
7721
7722 aco_opcode aco_op;
7723 switch (instr->intrinsic) {
7724 case nir_intrinsic_reduce: aco_op = aco_opcode::p_reduce; break;
7725 case nir_intrinsic_inclusive_scan: aco_op = aco_opcode::p_inclusive_scan; break;
7726 case nir_intrinsic_exclusive_scan: aco_op = aco_opcode::p_exclusive_scan; break;
7727 default:
7728 unreachable("unknown reduce intrinsic");
7729 }
7730
7731 aco_ptr<Pseudo_reduction_instruction> reduce{create_instruction<Pseudo_reduction_instruction>(aco_op, Format::PSEUDO_REDUCTION, 3, 5)};
7732 reduce->operands[0] = Operand(src);
7733 // filled in by aco_reduce_assign.cpp, used internally as part of the
7734 // reduce sequence
7735 assert(dst.size() == 1 || dst.size() == 2);
7736 reduce->operands[1] = Operand(RegClass(RegType::vgpr, dst.size()).as_linear());
7737 reduce->operands[2] = Operand(v1.as_linear());
7738
7739 Temp tmp_dst = bld.tmp(dst.regClass());
7740 reduce->definitions[0] = Definition(tmp_dst);
7741 reduce->definitions[1] = bld.def(ctx->program->lane_mask); // used internally
7742 reduce->definitions[2] = Definition();
7743 reduce->definitions[3] = Definition(scc, s1);
7744 reduce->definitions[4] = Definition();
7745 reduce->reduce_op = reduce_op;
7746 reduce->cluster_size = cluster_size;
7747 ctx->block->instructions.emplace_back(std::move(reduce));
7748
7749 emit_wqm(ctx, tmp_dst, dst);
7750 }
7751 break;
7752 }
7753 case nir_intrinsic_quad_broadcast: {
7754 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7755 if (!nir_dest_is_divergent(instr->dest)) {
7756 emit_uniform_subgroup(ctx, instr, src);
7757 } else {
7758 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7759 unsigned lane = nir_src_as_const_value(instr->src[1])->u32;
7760 uint32_t dpp_ctrl = dpp_quad_perm(lane, lane, lane, lane);
7761
7762 if (instr->dest.ssa.bit_size == 1) {
7763 assert(src.regClass() == bld.lm);
7764 assert(dst.regClass() == bld.lm);
7765 uint32_t half_mask = 0x11111111u << lane;
7766 Temp mask_tmp = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(half_mask), Operand(half_mask));
7767 Temp tmp = bld.tmp(bld.lm);
7768 bld.sop1(Builder::s_wqm, Definition(tmp),
7769 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp,
7770 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm))));
7771 emit_wqm(ctx, tmp, dst);
7772 } else if (instr->dest.ssa.bit_size == 8) {
7773 Temp tmp = bld.tmp(v1);
7774 if (ctx->program->chip_class >= GFX8)
7775 emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
7776 else
7777 emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl), tmp);
7778 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v3b), tmp);
7779 } else if (instr->dest.ssa.bit_size == 16) {
7780 Temp tmp = bld.tmp(v1);
7781 if (ctx->program->chip_class >= GFX8)
7782 emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
7783 else
7784 emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl), tmp);
7785 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp);
7786 } else if (instr->dest.ssa.bit_size == 32) {
7787 if (ctx->program->chip_class >= GFX8)
7788 emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), dst);
7789 else
7790 emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl), dst);
7791 } else if (instr->dest.ssa.bit_size == 64) {
7792 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7793 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7794 if (ctx->program->chip_class >= GFX8) {
7795 lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl));
7796 hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl));
7797 } else {
7798 lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, (1 << 15) | dpp_ctrl));
7799 hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, (1 << 15) | dpp_ctrl));
7800 }
7801 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7802 emit_split_vector(ctx, dst, 2);
7803 } else {
7804 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7805 nir_print_instr(&instr->instr, stderr);
7806 fprintf(stderr, "\n");
7807 }
7808 }
7809 break;
7810 }
7811 case nir_intrinsic_quad_swap_horizontal:
7812 case nir_intrinsic_quad_swap_vertical:
7813 case nir_intrinsic_quad_swap_diagonal:
7814 case nir_intrinsic_quad_swizzle_amd: {
7815 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7816 if (!nir_dest_is_divergent(instr->dest)) {
7817 emit_uniform_subgroup(ctx, instr, src);
7818 break;
7819 }
7820 uint16_t dpp_ctrl = 0;
7821 switch (instr->intrinsic) {
7822 case nir_intrinsic_quad_swap_horizontal:
7823 dpp_ctrl = dpp_quad_perm(1, 0, 3, 2);
7824 break;
7825 case nir_intrinsic_quad_swap_vertical:
7826 dpp_ctrl = dpp_quad_perm(2, 3, 0, 1);
7827 break;
7828 case nir_intrinsic_quad_swap_diagonal:
7829 dpp_ctrl = dpp_quad_perm(3, 2, 1, 0);
7830 break;
7831 case nir_intrinsic_quad_swizzle_amd:
7832 dpp_ctrl = nir_intrinsic_swizzle_mask(instr);
7833 break;
7834 default:
7835 break;
7836 }
7837 if (ctx->program->chip_class < GFX8)
7838 dpp_ctrl |= (1 << 15);
7839
7840 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7841 if (instr->dest.ssa.bit_size == 1) {
7842 assert(src.regClass() == bld.lm);
7843 src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand((uint32_t)-1), src);
7844 if (ctx->program->chip_class >= GFX8)
7845 src = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
7846 else
7847 src = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl);
7848 Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), src);
7849 emit_wqm(ctx, tmp, dst);
7850 } else if (instr->dest.ssa.bit_size == 8) {
7851 Temp tmp = bld.tmp(v1);
7852 if (ctx->program->chip_class >= GFX8)
7853 emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
7854 else
7855 emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl), tmp);
7856 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v3b), tmp);
7857 } else if (instr->dest.ssa.bit_size == 16) {
7858 Temp tmp = bld.tmp(v1);
7859 if (ctx->program->chip_class >= GFX8)
7860 emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
7861 else
7862 emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl), tmp);
7863 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp);
7864 } else if (instr->dest.ssa.bit_size == 32) {
7865 Temp tmp;
7866 if (ctx->program->chip_class >= GFX8)
7867 tmp = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
7868 else
7869 tmp = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl);
7870 emit_wqm(ctx, tmp, dst);
7871 } else if (instr->dest.ssa.bit_size == 64) {
7872 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7873 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7874 if (ctx->program->chip_class >= GFX8) {
7875 lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl));
7876 hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl));
7877 } else {
7878 lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, dpp_ctrl));
7879 hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, dpp_ctrl));
7880 }
7881 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7882 emit_split_vector(ctx, dst, 2);
7883 } else {
7884 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7885 nir_print_instr(&instr->instr, stderr);
7886 fprintf(stderr, "\n");
7887 }
7888 break;
7889 }
7890 case nir_intrinsic_masked_swizzle_amd: {
7891 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7892 if (!nir_dest_is_divergent(instr->dest)) {
7893 emit_uniform_subgroup(ctx, instr, src);
7894 break;
7895 }
7896 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7897 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
7898 if (dst.regClass() == v1) {
7899 emit_wqm(ctx,
7900 bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false),
7901 dst);
7902 } else if (dst.regClass() == v2) {
7903 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7904 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7905 lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, mask, 0, false));
7906 hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, mask, 0, false));
7907 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7908 emit_split_vector(ctx, dst, 2);
7909 } else {
7910 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7911 nir_print_instr(&instr->instr, stderr);
7912 fprintf(stderr, "\n");
7913 }
7914 break;
7915 }
7916 case nir_intrinsic_write_invocation_amd: {
7917 Temp src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7918 Temp val = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
7919 Temp lane = bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa));
7920 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7921 if (dst.regClass() == v1) {
7922 /* src2 is ignored for writelane. RA assigns the same reg for dst */
7923 emit_wqm(ctx, bld.writelane(bld.def(v1), val, lane, src), dst);
7924 } else if (dst.regClass() == v2) {
7925 Temp src_lo = bld.tmp(v1), src_hi = bld.tmp(v1);
7926 Temp val_lo = bld.tmp(s1), val_hi = bld.tmp(s1);
7927 bld.pseudo(aco_opcode::p_split_vector, Definition(src_lo), Definition(src_hi), src);
7928 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
7929 Temp lo = emit_wqm(ctx, bld.writelane(bld.def(v1), val_lo, lane, src_hi));
7930 Temp hi = emit_wqm(ctx, bld.writelane(bld.def(v1), val_hi, lane, src_hi));
7931 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7932 emit_split_vector(ctx, dst, 2);
7933 } else {
7934 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7935 nir_print_instr(&instr->instr, stderr);
7936 fprintf(stderr, "\n");
7937 }
7938 break;
7939 }
7940 case nir_intrinsic_mbcnt_amd: {
7941 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7942 RegClass rc = RegClass(src.type(), 1);
7943 Temp mask_lo = bld.tmp(rc), mask_hi = bld.tmp(rc);
7944 bld.pseudo(aco_opcode::p_split_vector, Definition(mask_lo), Definition(mask_hi), src);
7945 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7946 Temp wqm_tmp = emit_mbcnt(ctx, bld.def(v1), Operand(mask_lo), Operand(mask_hi));
7947 emit_wqm(ctx, wqm_tmp, dst);
7948 break;
7949 }
7950 case nir_intrinsic_load_helper_invocation: {
7951 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7952 bld.pseudo(aco_opcode::p_load_helper, Definition(dst));
7953 ctx->block->kind |= block_kind_needs_lowering;
7954 ctx->program->needs_exact = true;
7955 break;
7956 }
7957 case nir_intrinsic_is_helper_invocation: {
7958 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7959 bld.pseudo(aco_opcode::p_is_helper, Definition(dst));
7960 ctx->block->kind |= block_kind_needs_lowering;
7961 ctx->program->needs_exact = true;
7962 break;
7963 }
7964 case nir_intrinsic_demote:
7965 bld.pseudo(aco_opcode::p_demote_to_helper, Operand(-1u));
7966
7967 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
7968 ctx->cf_info.exec_potentially_empty_discard = true;
7969 ctx->block->kind |= block_kind_uses_demote;
7970 ctx->program->needs_exact = true;
7971 break;
7972 case nir_intrinsic_demote_if: {
7973 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7974 assert(src.regClass() == bld.lm);
7975 Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7976 bld.pseudo(aco_opcode::p_demote_to_helper, cond);
7977
7978 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
7979 ctx->cf_info.exec_potentially_empty_discard = true;
7980 ctx->block->kind |= block_kind_uses_demote;
7981 ctx->program->needs_exact = true;
7982 break;
7983 }
7984 case nir_intrinsic_first_invocation: {
7985 emit_wqm(ctx, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)),
7986 get_ssa_temp(ctx, &instr->dest.ssa));
7987 break;
7988 }
7989 case nir_intrinsic_shader_clock: {
7990 aco_opcode opcode =
7991 nir_intrinsic_memory_scope(instr) == NIR_SCOPE_DEVICE ?
7992 aco_opcode::s_memrealtime : aco_opcode::s_memtime;
7993 bld.smem(opcode, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), false);
7994 emit_split_vector(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 2);
7995 break;
7996 }
7997 case nir_intrinsic_load_vertex_id_zero_base: {
7998 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7999 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vertex_id));
8000 break;
8001 }
8002 case nir_intrinsic_load_first_vertex: {
8003 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8004 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.base_vertex));
8005 break;
8006 }
8007 case nir_intrinsic_load_base_instance: {
8008 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8009 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.start_instance));
8010 break;
8011 }
8012 case nir_intrinsic_load_instance_id: {
8013 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8014 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.instance_id));
8015 break;
8016 }
8017 case nir_intrinsic_load_draw_id: {
8018 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8019 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.draw_id));
8020 break;
8021 }
8022 case nir_intrinsic_load_invocation_id: {
8023 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8024
8025 if (ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
8026 if (ctx->options->chip_class >= GFX10)
8027 bld.vop2_e64(aco_opcode::v_and_b32, Definition(dst), Operand(127u), get_arg(ctx, ctx->args->ac.gs_invocation_id));
8028 else
8029 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_invocation_id));
8030 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
8031 bld.vop3(aco_opcode::v_bfe_u32, Definition(dst),
8032 get_arg(ctx, ctx->args->ac.tcs_rel_ids), Operand(8u), Operand(5u));
8033 } else {
8034 unreachable("Unsupported stage for load_invocation_id");
8035 }
8036
8037 break;
8038 }
8039 case nir_intrinsic_load_primitive_id: {
8040 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8041
8042 switch (ctx->shader->info.stage) {
8043 case MESA_SHADER_GEOMETRY:
8044 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_prim_id));
8045 break;
8046 case MESA_SHADER_TESS_CTRL:
8047 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tcs_patch_id));
8048 break;
8049 case MESA_SHADER_TESS_EVAL:
8050 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tes_patch_id));
8051 break;
8052 default:
8053 unreachable("Unimplemented shader stage for nir_intrinsic_load_primitive_id");
8054 }
8055
8056 break;
8057 }
8058 case nir_intrinsic_load_patch_vertices_in: {
8059 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL ||
8060 ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
8061
8062 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8063 bld.copy(Definition(dst), Operand(ctx->args->options->key.tcs.input_vertices));
8064 break;
8065 }
8066 case nir_intrinsic_emit_vertex_with_counter: {
8067 visit_emit_vertex_with_counter(ctx, instr);
8068 break;
8069 }
8070 case nir_intrinsic_end_primitive_with_counter: {
8071 unsigned stream = nir_intrinsic_stream_id(instr);
8072 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1, sendmsg_gs(true, false, stream));
8073 break;
8074 }
8075 case nir_intrinsic_set_vertex_count: {
8076 /* unused, the HW keeps track of this for us */
8077 break;
8078 }
8079 default:
8080 fprintf(stderr, "Unimplemented intrinsic instr: ");
8081 nir_print_instr(&instr->instr, stderr);
8082 fprintf(stderr, "\n");
8083 abort();
8084
8085 break;
8086 }
8087 }
8088
8089
8090 void tex_fetch_ptrs(isel_context *ctx, nir_tex_instr *instr,
8091 Temp *res_ptr, Temp *samp_ptr, Temp *fmask_ptr,
8092 enum glsl_base_type *stype)
8093 {
8094 nir_deref_instr *texture_deref_instr = NULL;
8095 nir_deref_instr *sampler_deref_instr = NULL;
8096 int plane = -1;
8097
8098 for (unsigned i = 0; i < instr->num_srcs; i++) {
8099 switch (instr->src[i].src_type) {
8100 case nir_tex_src_texture_deref:
8101 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
8102 break;
8103 case nir_tex_src_sampler_deref:
8104 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
8105 break;
8106 case nir_tex_src_plane:
8107 plane = nir_src_as_int(instr->src[i].src);
8108 break;
8109 default:
8110 break;
8111 }
8112 }
8113
8114 *stype = glsl_get_sampler_result_type(texture_deref_instr->type);
8115
8116 if (!sampler_deref_instr)
8117 sampler_deref_instr = texture_deref_instr;
8118
8119 if (plane >= 0) {
8120 assert(instr->op != nir_texop_txf_ms &&
8121 instr->op != nir_texop_samples_identical);
8122 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_BUF);
8123 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, (aco_descriptor_type)(ACO_DESC_PLANE_0 + plane), instr, false, false);
8124 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
8125 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_BUFFER, instr, false, false);
8126 } else if (instr->op == nir_texop_fragment_mask_fetch) {
8127 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_FMASK, instr, false, false);
8128 } else {
8129 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_IMAGE, instr, false, false);
8130 }
8131 if (samp_ptr) {
8132 *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, ACO_DESC_SAMPLER, instr, false, false);
8133
8134 if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT && ctx->options->chip_class < GFX8) {
8135 /* fix sampler aniso on SI/CI: samp[0] = samp[0] & img[7] */
8136 Builder bld(ctx->program, ctx->block);
8137
8138 /* to avoid unnecessary moves, we split and recombine sampler and image */
8139 Temp img[8] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1),
8140 bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)};
8141 Temp samp[4] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)};
8142 bld.pseudo(aco_opcode::p_split_vector, Definition(img[0]), Definition(img[1]),
8143 Definition(img[2]), Definition(img[3]), Definition(img[4]),
8144 Definition(img[5]), Definition(img[6]), Definition(img[7]), *res_ptr);
8145 bld.pseudo(aco_opcode::p_split_vector, Definition(samp[0]), Definition(samp[1]),
8146 Definition(samp[2]), Definition(samp[3]), *samp_ptr);
8147
8148 samp[0] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), samp[0], img[7]);
8149 *res_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s8),
8150 img[0], img[1], img[2], img[3],
8151 img[4], img[5], img[6], img[7]);
8152 *samp_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
8153 samp[0], samp[1], samp[2], samp[3]);
8154 }
8155 }
8156 if (fmask_ptr && (instr->op == nir_texop_txf_ms ||
8157 instr->op == nir_texop_samples_identical))
8158 *fmask_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_FMASK, instr, false, false);
8159 }
8160
8161 void build_cube_select(isel_context *ctx, Temp ma, Temp id, Temp deriv,
8162 Temp *out_ma, Temp *out_sc, Temp *out_tc)
8163 {
8164 Builder bld(ctx->program, ctx->block);
8165
8166 Temp deriv_x = emit_extract_vector(ctx, deriv, 0, v1);
8167 Temp deriv_y = emit_extract_vector(ctx, deriv, 1, v1);
8168 Temp deriv_z = emit_extract_vector(ctx, deriv, 2, v1);
8169
8170 Operand neg_one(0xbf800000u);
8171 Operand one(0x3f800000u);
8172 Operand two(0x40000000u);
8173 Operand four(0x40800000u);
8174
8175 Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), ma);
8176 Temp sgn_ma = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, one, is_ma_positive);
8177 Temp neg_sgn_ma = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand(0u), sgn_ma);
8178
8179 Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), four, id);
8180 Temp is_ma_y = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), two, id);
8181 is_ma_y = bld.sop2(Builder::s_andn2, bld.hint_vcc(bld.def(bld.lm)), is_ma_y, is_ma_z);
8182 Temp is_not_ma_x = bld.sop2(aco_opcode::s_or_b64, bld.hint_vcc(bld.def(bld.lm)), bld.def(s1, scc), is_ma_z, is_ma_y);
8183
8184 // select sc
8185 Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_z, deriv_x, is_not_ma_x);
8186 Temp sgn = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1),
8187 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_sgn_ma, sgn_ma, is_ma_z),
8188 one, is_ma_y);
8189 *out_sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
8190
8191 // select tc
8192 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_y, deriv_z, is_ma_y);
8193 sgn = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, sgn_ma, is_ma_y);
8194 *out_tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
8195
8196 // select ma
8197 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
8198 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_x, deriv_y, is_ma_y),
8199 deriv_z, is_ma_z);
8200 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffffu), tmp);
8201 *out_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), two, tmp);
8202 }
8203
8204 void prepare_cube_coords(isel_context *ctx, std::vector<Temp>& coords, Temp* ddx, Temp* ddy, bool is_deriv, bool is_array)
8205 {
8206 Builder bld(ctx->program, ctx->block);
8207 Temp ma, tc, sc, id;
8208
8209 if (is_array) {
8210 coords[3] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[3]);
8211
8212 // see comment in ac_prepare_cube_coords()
8213 if (ctx->options->chip_class <= GFX8)
8214 coords[3] = bld.vop2(aco_opcode::v_max_f32, bld.def(v1), Operand(0u), coords[3]);
8215 }
8216
8217 ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8218
8219 aco_ptr<VOP3A_instruction> vop3a{create_instruction<VOP3A_instruction>(aco_opcode::v_rcp_f32, asVOP3(Format::VOP1), 1, 1)};
8220 vop3a->operands[0] = Operand(ma);
8221 vop3a->abs[0] = true;
8222 Temp invma = bld.tmp(v1);
8223 vop3a->definitions[0] = Definition(invma);
8224 ctx->block->instructions.emplace_back(std::move(vop3a));
8225
8226 sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8227 if (!is_deriv)
8228 sc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), sc, invma, Operand(0x3fc00000u/*1.5*/));
8229
8230 tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8231 if (!is_deriv)
8232 tc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), tc, invma, Operand(0x3fc00000u/*1.5*/));
8233
8234 id = bld.vop3(aco_opcode::v_cubeid_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8235
8236 if (is_deriv) {
8237 sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), sc, invma);
8238 tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tc, invma);
8239
8240 for (unsigned i = 0; i < 2; i++) {
8241 // see comment in ac_prepare_cube_coords()
8242 Temp deriv_ma;
8243 Temp deriv_sc, deriv_tc;
8244 build_cube_select(ctx, ma, id, i ? *ddy : *ddx,
8245 &deriv_ma, &deriv_sc, &deriv_tc);
8246
8247 deriv_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, invma);
8248
8249 Temp x = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
8250 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_sc, invma),
8251 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, sc));
8252 Temp y = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
8253 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_tc, invma),
8254 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, tc));
8255 *(i ? ddy : ddx) = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), x, y);
8256 }
8257
8258 sc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand(0x3fc00000u/*1.5*/), sc);
8259 tc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand(0x3fc00000u/*1.5*/), tc);
8260 }
8261
8262 if (is_array)
8263 id = bld.vop2(aco_opcode::v_madmk_f32, bld.def(v1), coords[3], id, Operand(0x41000000u/*8.0*/));
8264 coords.resize(3);
8265 coords[0] = sc;
8266 coords[1] = tc;
8267 coords[2] = id;
8268 }
8269
8270 void get_const_vec(nir_ssa_def *vec, nir_const_value *cv[4])
8271 {
8272 if (vec->parent_instr->type != nir_instr_type_alu)
8273 return;
8274 nir_alu_instr *vec_instr = nir_instr_as_alu(vec->parent_instr);
8275 if (vec_instr->op != nir_op_vec(vec->num_components))
8276 return;
8277
8278 for (unsigned i = 0; i < vec->num_components; i++) {
8279 cv[i] = vec_instr->src[i].swizzle[0] == 0 ?
8280 nir_src_as_const_value(vec_instr->src[i].src) : NULL;
8281 }
8282 }
8283
8284 void visit_tex(isel_context *ctx, nir_tex_instr *instr)
8285 {
8286 Builder bld(ctx->program, ctx->block);
8287 bool has_bias = false, has_lod = false, level_zero = false, has_compare = false,
8288 has_offset = false, has_ddx = false, has_ddy = false, has_derivs = false, has_sample_index = false,
8289 has_clamped_lod = false;
8290 Temp resource, sampler, fmask_ptr, bias = Temp(), compare = Temp(), sample_index = Temp(),
8291 lod = Temp(), offset = Temp(), ddx = Temp(), ddy = Temp(),
8292 clamped_lod = Temp();
8293 std::vector<Temp> coords;
8294 std::vector<Temp> derivs;
8295 nir_const_value *sample_index_cv = NULL;
8296 nir_const_value *const_offset[4] = {NULL, NULL, NULL, NULL};
8297 enum glsl_base_type stype;
8298 tex_fetch_ptrs(ctx, instr, &resource, &sampler, &fmask_ptr, &stype);
8299
8300 bool tg4_integer_workarounds = ctx->options->chip_class <= GFX8 && instr->op == nir_texop_tg4 &&
8301 (stype == GLSL_TYPE_UINT || stype == GLSL_TYPE_INT);
8302 bool tg4_integer_cube_workaround = tg4_integer_workarounds &&
8303 instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
8304
8305 for (unsigned i = 0; i < instr->num_srcs; i++) {
8306 switch (instr->src[i].src_type) {
8307 case nir_tex_src_coord: {
8308 Temp coord = get_ssa_temp(ctx, instr->src[i].src.ssa);
8309 for (unsigned i = 0; i < coord.size(); i++)
8310 coords.emplace_back(emit_extract_vector(ctx, coord, i, v1));
8311 break;
8312 }
8313 case nir_tex_src_bias:
8314 bias = get_ssa_temp(ctx, instr->src[i].src.ssa);
8315 has_bias = true;
8316 break;
8317 case nir_tex_src_lod: {
8318 nir_const_value *val = nir_src_as_const_value(instr->src[i].src);
8319
8320 if (val && val->f32 <= 0.0) {
8321 level_zero = true;
8322 } else {
8323 lod = get_ssa_temp(ctx, instr->src[i].src.ssa);
8324 has_lod = true;
8325 }
8326 break;
8327 }
8328 case nir_tex_src_min_lod:
8329 clamped_lod = get_ssa_temp(ctx, instr->src[i].src.ssa);
8330 has_clamped_lod = true;
8331 break;
8332 case nir_tex_src_comparator:
8333 if (instr->is_shadow) {
8334 compare = get_ssa_temp(ctx, instr->src[i].src.ssa);
8335 has_compare = true;
8336 }
8337 break;
8338 case nir_tex_src_offset:
8339 offset = get_ssa_temp(ctx, instr->src[i].src.ssa);
8340 get_const_vec(instr->src[i].src.ssa, const_offset);
8341 has_offset = true;
8342 break;
8343 case nir_tex_src_ddx:
8344 ddx = get_ssa_temp(ctx, instr->src[i].src.ssa);
8345 has_ddx = true;
8346 break;
8347 case nir_tex_src_ddy:
8348 ddy = get_ssa_temp(ctx, instr->src[i].src.ssa);
8349 has_ddy = true;
8350 break;
8351 case nir_tex_src_ms_index:
8352 sample_index = get_ssa_temp(ctx, instr->src[i].src.ssa);
8353 sample_index_cv = nir_src_as_const_value(instr->src[i].src);
8354 has_sample_index = true;
8355 break;
8356 case nir_tex_src_texture_offset:
8357 case nir_tex_src_sampler_offset:
8358 default:
8359 break;
8360 }
8361 }
8362
8363 if (instr->op == nir_texop_txs && instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
8364 return get_buffer_size(ctx, resource, get_ssa_temp(ctx, &instr->dest.ssa), true);
8365
8366 if (instr->op == nir_texop_texture_samples) {
8367 Temp dword3 = emit_extract_vector(ctx, resource, 3, s1);
8368
8369 Temp samples_log2 = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3, Operand(16u | 4u<<16));
8370 Temp samples = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), Operand(1u), samples_log2);
8371 Temp type = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3, Operand(28u | 4u<<16 /* offset=28, width=4 */));
8372
8373 Operand default_sample = Operand(1u);
8374 if (ctx->options->robust_buffer_access) {
8375 /* Extract the second dword of the descriptor, if it's
8376 * all zero, then it's a null descriptor.
8377 */
8378 Temp dword1 = emit_extract_vector(ctx, resource, 1, s1);
8379 Temp is_non_null_descriptor = bld.sopc(aco_opcode::s_cmp_gt_u32, bld.def(s1, scc), dword1, Operand(0u));
8380 default_sample = Operand(is_non_null_descriptor);
8381 }
8382
8383 Temp is_msaa = bld.sopc(aco_opcode::s_cmp_ge_u32, bld.def(s1, scc), type, Operand(14u));
8384 bld.sop2(aco_opcode::s_cselect_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8385 samples, default_sample, bld.scc(is_msaa));
8386 return;
8387 }
8388
8389 if (has_offset && instr->op != nir_texop_txf && instr->op != nir_texop_txf_ms) {
8390 aco_ptr<Instruction> tmp_instr;
8391 Temp acc, pack = Temp();
8392
8393 uint32_t pack_const = 0;
8394 for (unsigned i = 0; i < offset.size(); i++) {
8395 if (!const_offset[i])
8396 continue;
8397 pack_const |= (const_offset[i]->u32 & 0x3Fu) << (8u * i);
8398 }
8399
8400 if (offset.type() == RegType::sgpr) {
8401 for (unsigned i = 0; i < offset.size(); i++) {
8402 if (const_offset[i])
8403 continue;
8404
8405 acc = emit_extract_vector(ctx, offset, i, s1);
8406 acc = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), acc, Operand(0x3Fu));
8407
8408 if (i) {
8409 acc = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), acc, Operand(8u * i));
8410 }
8411
8412 if (pack == Temp()) {
8413 pack = acc;
8414 } else {
8415 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), pack, acc);
8416 }
8417 }
8418
8419 if (pack_const && pack != Temp())
8420 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(pack_const), pack);
8421 } else {
8422 for (unsigned i = 0; i < offset.size(); i++) {
8423 if (const_offset[i])
8424 continue;
8425
8426 acc = emit_extract_vector(ctx, offset, i, v1);
8427 acc = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x3Fu), acc);
8428
8429 if (i) {
8430 acc = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(8u * i), acc);
8431 }
8432
8433 if (pack == Temp()) {
8434 pack = acc;
8435 } else {
8436 pack = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), pack, acc);
8437 }
8438 }
8439
8440 if (pack_const && pack != Temp())
8441 pack = bld.sop2(aco_opcode::v_or_b32, bld.def(v1), Operand(pack_const), pack);
8442 }
8443 if (pack_const && pack == Temp())
8444 offset = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(pack_const));
8445 else if (pack == Temp())
8446 has_offset = false;
8447 else
8448 offset = pack;
8449 }
8450
8451 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && instr->coord_components)
8452 prepare_cube_coords(ctx, coords, &ddx, &ddy, instr->op == nir_texop_txd, instr->is_array && instr->op != nir_texop_lod);
8453
8454 /* pack derivatives */
8455 if (has_ddx || has_ddy) {
8456 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D && ctx->options->chip_class == GFX9) {
8457 assert(has_ddx && has_ddy && ddx.size() == 1 && ddy.size() == 1);
8458 Temp zero = bld.copy(bld.def(v1), Operand(0u));
8459 derivs = {ddx, zero, ddy, zero};
8460 } else {
8461 for (unsigned i = 0; has_ddx && i < ddx.size(); i++)
8462 derivs.emplace_back(emit_extract_vector(ctx, ddx, i, v1));
8463 for (unsigned i = 0; has_ddy && i < ddy.size(); i++)
8464 derivs.emplace_back(emit_extract_vector(ctx, ddy, i, v1));
8465 }
8466 has_derivs = true;
8467 }
8468
8469 if (instr->coord_components > 1 &&
8470 instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
8471 instr->is_array &&
8472 instr->op != nir_texop_txf)
8473 coords[1] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[1]);
8474
8475 if (instr->coord_components > 2 &&
8476 (instr->sampler_dim == GLSL_SAMPLER_DIM_2D ||
8477 instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
8478 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS ||
8479 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
8480 instr->is_array &&
8481 instr->op != nir_texop_txf &&
8482 instr->op != nir_texop_txf_ms &&
8483 instr->op != nir_texop_fragment_fetch &&
8484 instr->op != nir_texop_fragment_mask_fetch)
8485 coords[2] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[2]);
8486
8487 if (ctx->options->chip_class == GFX9 &&
8488 instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
8489 instr->op != nir_texop_lod && instr->coord_components) {
8490 assert(coords.size() > 0 && coords.size() < 3);
8491
8492 coords.insert(std::next(coords.begin()), bld.copy(bld.def(v1), instr->op == nir_texop_txf ?
8493 Operand((uint32_t) 0) :
8494 Operand((uint32_t) 0x3f000000)));
8495 }
8496
8497 bool da = should_declare_array(ctx, instr->sampler_dim, instr->is_array);
8498
8499 if (instr->op == nir_texop_samples_identical)
8500 resource = fmask_ptr;
8501
8502 else if ((instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
8503 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
8504 instr->op != nir_texop_txs &&
8505 instr->op != nir_texop_fragment_fetch &&
8506 instr->op != nir_texop_fragment_mask_fetch) {
8507 assert(has_sample_index);
8508 Operand op(sample_index);
8509 if (sample_index_cv)
8510 op = Operand(sample_index_cv->u32);
8511 sample_index = adjust_sample_index_using_fmask(ctx, da, coords, op, fmask_ptr);
8512 }
8513
8514 if (has_offset && (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)) {
8515 for (unsigned i = 0; i < std::min(offset.size(), instr->coord_components); i++) {
8516 Temp off = emit_extract_vector(ctx, offset, i, v1);
8517 coords[i] = bld.vadd32(bld.def(v1), coords[i], off);
8518 }
8519 has_offset = false;
8520 }
8521
8522 /* Build tex instruction */
8523 unsigned dmask = nir_ssa_def_components_read(&instr->dest.ssa);
8524 unsigned dim = ctx->options->chip_class >= GFX10 && instr->sampler_dim != GLSL_SAMPLER_DIM_BUF
8525 ? ac_get_sampler_dim(ctx->options->chip_class, instr->sampler_dim, instr->is_array)
8526 : 0;
8527 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8528 Temp tmp_dst = dst;
8529
8530 /* gather4 selects the component by dmask and always returns vec4 */
8531 if (instr->op == nir_texop_tg4) {
8532 assert(instr->dest.ssa.num_components == 4);
8533 if (instr->is_shadow)
8534 dmask = 1;
8535 else
8536 dmask = 1 << instr->component;
8537 if (tg4_integer_cube_workaround || dst.type() == RegType::sgpr)
8538 tmp_dst = bld.tmp(v4);
8539 } else if (instr->op == nir_texop_samples_identical) {
8540 tmp_dst = bld.tmp(v1);
8541 } else if (util_bitcount(dmask) != instr->dest.ssa.num_components || dst.type() == RegType::sgpr) {
8542 tmp_dst = bld.tmp(RegClass(RegType::vgpr, util_bitcount(dmask)));
8543 }
8544
8545 aco_ptr<MIMG_instruction> tex;
8546 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
8547 if (!has_lod)
8548 lod = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0u));
8549
8550 bool div_by_6 = instr->op == nir_texop_txs &&
8551 instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
8552 instr->is_array &&
8553 (dmask & (1 << 2));
8554 if (tmp_dst.id() == dst.id() && div_by_6)
8555 tmp_dst = bld.tmp(tmp_dst.regClass());
8556
8557 tex.reset(create_instruction<MIMG_instruction>(aco_opcode::image_get_resinfo, Format::MIMG, 3, 1));
8558 tex->operands[0] = Operand(resource);
8559 tex->operands[1] = Operand(s4); /* no sampler */
8560 tex->operands[2] = Operand(as_vgpr(ctx,lod));
8561 if (ctx->options->chip_class == GFX9 &&
8562 instr->op == nir_texop_txs &&
8563 instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
8564 instr->is_array) {
8565 tex->dmask = (dmask & 0x1) | ((dmask & 0x2) << 1);
8566 } else if (instr->op == nir_texop_query_levels) {
8567 tex->dmask = 1 << 3;
8568 } else {
8569 tex->dmask = dmask;
8570 }
8571 tex->da = da;
8572 tex->definitions[0] = Definition(tmp_dst);
8573 tex->dim = dim;
8574 tex->can_reorder = true;
8575 ctx->block->instructions.emplace_back(std::move(tex));
8576
8577 if (div_by_6) {
8578 /* divide 3rd value by 6 by multiplying with magic number */
8579 emit_split_vector(ctx, tmp_dst, tmp_dst.size());
8580 Temp c = bld.copy(bld.def(s1), Operand((uint32_t) 0x2AAAAAAB));
8581 Temp by_6 = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), emit_extract_vector(ctx, tmp_dst, 2, v1), c);
8582 assert(instr->dest.ssa.num_components == 3);
8583 Temp tmp = dst.type() == RegType::vgpr ? dst : bld.tmp(v3);
8584 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp),
8585 emit_extract_vector(ctx, tmp_dst, 0, v1),
8586 emit_extract_vector(ctx, tmp_dst, 1, v1),
8587 by_6);
8588
8589 }
8590
8591 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
8592 return;
8593 }
8594
8595 Temp tg4_compare_cube_wa64 = Temp();
8596
8597 if (tg4_integer_workarounds) {
8598 tex.reset(create_instruction<MIMG_instruction>(aco_opcode::image_get_resinfo, Format::MIMG, 3, 1));
8599 tex->operands[0] = Operand(resource);
8600 tex->operands[1] = Operand(s4); /* no sampler */
8601 tex->operands[2] = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0u));
8602 tex->dim = dim;
8603 tex->dmask = 0x3;
8604 tex->da = da;
8605 Temp size = bld.tmp(v2);
8606 tex->definitions[0] = Definition(size);
8607 tex->can_reorder = true;
8608 ctx->block->instructions.emplace_back(std::move(tex));
8609 emit_split_vector(ctx, size, size.size());
8610
8611 Temp half_texel[2];
8612 for (unsigned i = 0; i < 2; i++) {
8613 half_texel[i] = emit_extract_vector(ctx, size, i, v1);
8614 half_texel[i] = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), half_texel[i]);
8615 half_texel[i] = bld.vop1(aco_opcode::v_rcp_iflag_f32, bld.def(v1), half_texel[i]);
8616 half_texel[i] = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0xbf000000/*-0.5*/), half_texel[i]);
8617 }
8618
8619 Temp new_coords[2] = {
8620 bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[0], half_texel[0]),
8621 bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[1], half_texel[1])
8622 };
8623
8624 if (tg4_integer_cube_workaround) {
8625 // see comment in ac_nir_to_llvm.c's lower_gather4_integer()
8626 Temp desc[resource.size()];
8627 aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector,
8628 Format::PSEUDO, 1, resource.size())};
8629 split->operands[0] = Operand(resource);
8630 for (unsigned i = 0; i < resource.size(); i++) {
8631 desc[i] = bld.tmp(s1);
8632 split->definitions[i] = Definition(desc[i]);
8633 }
8634 ctx->block->instructions.emplace_back(std::move(split));
8635
8636 Temp dfmt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), desc[1], Operand(20u | (6u << 16)));
8637 Temp compare_cube_wa = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), dfmt,
8638 Operand((uint32_t)V_008F14_IMG_DATA_FORMAT_8_8_8_8));
8639
8640 Temp nfmt;
8641 if (stype == GLSL_TYPE_UINT) {
8642 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
8643 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_USCALED),
8644 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_UINT),
8645 bld.scc(compare_cube_wa));
8646 } else {
8647 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
8648 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_SSCALED),
8649 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_SINT),
8650 bld.scc(compare_cube_wa));
8651 }
8652 tg4_compare_cube_wa64 = bld.tmp(bld.lm);
8653 bool_to_vector_condition(ctx, compare_cube_wa, tg4_compare_cube_wa64);
8654
8655 nfmt = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), nfmt, Operand(26u));
8656
8657 desc[1] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), desc[1],
8658 Operand((uint32_t)C_008F14_NUM_FORMAT));
8659 desc[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), desc[1], nfmt);
8660
8661 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
8662 Format::PSEUDO, resource.size(), 1)};
8663 for (unsigned i = 0; i < resource.size(); i++)
8664 vec->operands[i] = Operand(desc[i]);
8665 resource = bld.tmp(resource.regClass());
8666 vec->definitions[0] = Definition(resource);
8667 ctx->block->instructions.emplace_back(std::move(vec));
8668
8669 new_coords[0] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
8670 new_coords[0], coords[0], tg4_compare_cube_wa64);
8671 new_coords[1] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
8672 new_coords[1], coords[1], tg4_compare_cube_wa64);
8673 }
8674 coords[0] = new_coords[0];
8675 coords[1] = new_coords[1];
8676 }
8677
8678 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
8679 //FIXME: if (ctx->abi->gfx9_stride_size_workaround) return ac_build_buffer_load_format_gfx9_safe()
8680
8681 assert(coords.size() == 1);
8682 unsigned last_bit = util_last_bit(nir_ssa_def_components_read(&instr->dest.ssa));
8683 aco_opcode op;
8684 switch (last_bit) {
8685 case 1:
8686 op = aco_opcode::buffer_load_format_x; break;
8687 case 2:
8688 op = aco_opcode::buffer_load_format_xy; break;
8689 case 3:
8690 op = aco_opcode::buffer_load_format_xyz; break;
8691 case 4:
8692 op = aco_opcode::buffer_load_format_xyzw; break;
8693 default:
8694 unreachable("Tex instruction loads more than 4 components.");
8695 }
8696
8697 /* if the instruction return value matches exactly the nir dest ssa, we can use it directly */
8698 if (last_bit == instr->dest.ssa.num_components && dst.type() == RegType::vgpr)
8699 tmp_dst = dst;
8700 else
8701 tmp_dst = bld.tmp(RegType::vgpr, last_bit);
8702
8703 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
8704 mubuf->operands[0] = Operand(resource);
8705 mubuf->operands[1] = Operand(coords[0]);
8706 mubuf->operands[2] = Operand((uint32_t) 0);
8707 mubuf->definitions[0] = Definition(tmp_dst);
8708 mubuf->idxen = true;
8709 mubuf->can_reorder = true;
8710 ctx->block->instructions.emplace_back(std::move(mubuf));
8711
8712 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, (1 << last_bit) - 1);
8713 return;
8714 }
8715
8716 /* gather MIMG address components */
8717 std::vector<Temp> args;
8718 if (has_offset)
8719 args.emplace_back(offset);
8720 if (has_bias)
8721 args.emplace_back(bias);
8722 if (has_compare)
8723 args.emplace_back(compare);
8724 if (has_derivs)
8725 args.insert(args.end(), derivs.begin(), derivs.end());
8726
8727 args.insert(args.end(), coords.begin(), coords.end());
8728 if (has_sample_index)
8729 args.emplace_back(sample_index);
8730 if (has_lod)
8731 args.emplace_back(lod);
8732 if (has_clamped_lod)
8733 args.emplace_back(clamped_lod);
8734
8735 Temp arg = bld.tmp(RegClass(RegType::vgpr, args.size()));
8736 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, args.size(), 1)};
8737 vec->definitions[0] = Definition(arg);
8738 for (unsigned i = 0; i < args.size(); i++)
8739 vec->operands[i] = Operand(args[i]);
8740 ctx->block->instructions.emplace_back(std::move(vec));
8741
8742
8743 if (instr->op == nir_texop_txf ||
8744 instr->op == nir_texop_txf_ms ||
8745 instr->op == nir_texop_samples_identical ||
8746 instr->op == nir_texop_fragment_fetch ||
8747 instr->op == nir_texop_fragment_mask_fetch) {
8748 aco_opcode op = level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS || instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS ? aco_opcode::image_load : aco_opcode::image_load_mip;
8749 tex.reset(create_instruction<MIMG_instruction>(op, Format::MIMG, 3, 1));
8750 tex->operands[0] = Operand(resource);
8751 tex->operands[1] = Operand(s4); /* no sampler */
8752 tex->operands[2] = Operand(arg);
8753 tex->dim = dim;
8754 tex->dmask = dmask;
8755 tex->unrm = true;
8756 tex->da = da;
8757 tex->definitions[0] = Definition(tmp_dst);
8758 tex->can_reorder = true;
8759 ctx->block->instructions.emplace_back(std::move(tex));
8760
8761 if (instr->op == nir_texop_samples_identical) {
8762 assert(dmask == 1 && dst.regClass() == v1);
8763 assert(dst.id() != tmp_dst.id());
8764
8765 Temp tmp = bld.tmp(bld.lm);
8766 bld.vopc(aco_opcode::v_cmp_eq_u32, Definition(tmp), Operand(0u), tmp_dst).def(0).setHint(vcc);
8767 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand((uint32_t)-1), tmp);
8768
8769 } else {
8770 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
8771 }
8772 return;
8773 }
8774
8775 // TODO: would be better to do this by adding offsets, but needs the opcodes ordered.
8776 aco_opcode opcode = aco_opcode::image_sample;
8777 if (has_offset) { /* image_sample_*_o */
8778 if (has_clamped_lod) {
8779 if (has_compare) {
8780 opcode = aco_opcode::image_sample_c_cl_o;
8781 if (has_derivs)
8782 opcode = aco_opcode::image_sample_c_d_cl_o;
8783 if (has_bias)
8784 opcode = aco_opcode::image_sample_c_b_cl_o;
8785 } else {
8786 opcode = aco_opcode::image_sample_cl_o;
8787 if (has_derivs)
8788 opcode = aco_opcode::image_sample_d_cl_o;
8789 if (has_bias)
8790 opcode = aco_opcode::image_sample_b_cl_o;
8791 }
8792 } else if (has_compare) {
8793 opcode = aco_opcode::image_sample_c_o;
8794 if (has_derivs)
8795 opcode = aco_opcode::image_sample_c_d_o;
8796 if (has_bias)
8797 opcode = aco_opcode::image_sample_c_b_o;
8798 if (level_zero)
8799 opcode = aco_opcode::image_sample_c_lz_o;
8800 if (has_lod)
8801 opcode = aco_opcode::image_sample_c_l_o;
8802 } else {
8803 opcode = aco_opcode::image_sample_o;
8804 if (has_derivs)
8805 opcode = aco_opcode::image_sample_d_o;
8806 if (has_bias)
8807 opcode = aco_opcode::image_sample_b_o;
8808 if (level_zero)
8809 opcode = aco_opcode::image_sample_lz_o;
8810 if (has_lod)
8811 opcode = aco_opcode::image_sample_l_o;
8812 }
8813 } else if (has_clamped_lod) { /* image_sample_*_cl */
8814 if (has_compare) {
8815 opcode = aco_opcode::image_sample_c_cl;
8816 if (has_derivs)
8817 opcode = aco_opcode::image_sample_c_d_cl;
8818 if (has_bias)
8819 opcode = aco_opcode::image_sample_c_b_cl;
8820 } else {
8821 opcode = aco_opcode::image_sample_cl;
8822 if (has_derivs)
8823 opcode = aco_opcode::image_sample_d_cl;
8824 if (has_bias)
8825 opcode = aco_opcode::image_sample_b_cl;
8826 }
8827 } else { /* no offset */
8828 if (has_compare) {
8829 opcode = aco_opcode::image_sample_c;
8830 if (has_derivs)
8831 opcode = aco_opcode::image_sample_c_d;
8832 if (has_bias)
8833 opcode = aco_opcode::image_sample_c_b;
8834 if (level_zero)
8835 opcode = aco_opcode::image_sample_c_lz;
8836 if (has_lod)
8837 opcode = aco_opcode::image_sample_c_l;
8838 } else {
8839 opcode = aco_opcode::image_sample;
8840 if (has_derivs)
8841 opcode = aco_opcode::image_sample_d;
8842 if (has_bias)
8843 opcode = aco_opcode::image_sample_b;
8844 if (level_zero)
8845 opcode = aco_opcode::image_sample_lz;
8846 if (has_lod)
8847 opcode = aco_opcode::image_sample_l;
8848 }
8849 }
8850
8851 if (instr->op == nir_texop_tg4) {
8852 if (has_offset) { /* image_gather4_*_o */
8853 if (has_compare) {
8854 opcode = aco_opcode::image_gather4_c_lz_o;
8855 if (has_lod)
8856 opcode = aco_opcode::image_gather4_c_l_o;
8857 if (has_bias)
8858 opcode = aco_opcode::image_gather4_c_b_o;
8859 } else {
8860 opcode = aco_opcode::image_gather4_lz_o;
8861 if (has_lod)
8862 opcode = aco_opcode::image_gather4_l_o;
8863 if (has_bias)
8864 opcode = aco_opcode::image_gather4_b_o;
8865 }
8866 } else {
8867 if (has_compare) {
8868 opcode = aco_opcode::image_gather4_c_lz;
8869 if (has_lod)
8870 opcode = aco_opcode::image_gather4_c_l;
8871 if (has_bias)
8872 opcode = aco_opcode::image_gather4_c_b;
8873 } else {
8874 opcode = aco_opcode::image_gather4_lz;
8875 if (has_lod)
8876 opcode = aco_opcode::image_gather4_l;
8877 if (has_bias)
8878 opcode = aco_opcode::image_gather4_b;
8879 }
8880 }
8881 } else if (instr->op == nir_texop_lod) {
8882 opcode = aco_opcode::image_get_lod;
8883 }
8884
8885 /* we don't need the bias, sample index, compare value or offset to be
8886 * computed in WQM but if the p_create_vector copies the coordinates, then it
8887 * needs to be in WQM */
8888 if (ctx->stage == fragment_fs &&
8889 !has_derivs && !has_lod && !level_zero &&
8890 instr->sampler_dim != GLSL_SAMPLER_DIM_MS &&
8891 instr->sampler_dim != GLSL_SAMPLER_DIM_SUBPASS_MS)
8892 arg = emit_wqm(ctx, arg, bld.tmp(arg.regClass()), true);
8893
8894 tex.reset(create_instruction<MIMG_instruction>(opcode, Format::MIMG, 3, 1));
8895 tex->operands[0] = Operand(resource);
8896 tex->operands[1] = Operand(sampler);
8897 tex->operands[2] = Operand(arg);
8898 tex->dim = dim;
8899 tex->dmask = dmask;
8900 tex->da = da;
8901 tex->definitions[0] = Definition(tmp_dst);
8902 tex->can_reorder = true;
8903 ctx->block->instructions.emplace_back(std::move(tex));
8904
8905 if (tg4_integer_cube_workaround) {
8906 assert(tmp_dst.id() != dst.id());
8907 assert(tmp_dst.size() == dst.size() && dst.size() == 4);
8908
8909 emit_split_vector(ctx, tmp_dst, tmp_dst.size());
8910 Temp val[4];
8911 for (unsigned i = 0; i < dst.size(); i++) {
8912 val[i] = emit_extract_vector(ctx, tmp_dst, i, v1);
8913 Temp cvt_val;
8914 if (stype == GLSL_TYPE_UINT)
8915 cvt_val = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), val[i]);
8916 else
8917 cvt_val = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), val[i]);
8918 val[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), val[i], cvt_val, tg4_compare_cube_wa64);
8919 }
8920 Temp tmp = dst.regClass() == v4 ? dst : bld.tmp(v4);
8921 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp),
8922 val[0], val[1], val[2], val[3]);
8923 }
8924 unsigned mask = instr->op == nir_texop_tg4 ? 0xF : dmask;
8925 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, mask);
8926
8927 }
8928
8929
8930 Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa, RegClass rc)
8931 {
8932 Temp tmp = get_ssa_temp(ctx, ssa);
8933 if (ssa->parent_instr->type == nir_instr_type_ssa_undef)
8934 return Operand(rc);
8935 else
8936 return Operand(tmp);
8937 }
8938
8939 void visit_phi(isel_context *ctx, nir_phi_instr *instr)
8940 {
8941 aco_ptr<Pseudo_instruction> phi;
8942 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8943 assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
8944
8945 bool logical = !dst.is_linear() || nir_dest_is_divergent(instr->dest);
8946 logical |= ctx->block->kind & block_kind_merge;
8947 aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
8948
8949 /* we want a sorted list of sources, since the predecessor list is also sorted */
8950 std::map<unsigned, nir_ssa_def*> phi_src;
8951 nir_foreach_phi_src(src, instr)
8952 phi_src[src->pred->index] = src->src.ssa;
8953
8954 std::vector<unsigned>& preds = logical ? ctx->block->logical_preds : ctx->block->linear_preds;
8955 unsigned num_operands = 0;
8956 Operand operands[std::max(exec_list_length(&instr->srcs), (unsigned)preds.size()) + 1];
8957 unsigned num_defined = 0;
8958 unsigned cur_pred_idx = 0;
8959 for (std::pair<unsigned, nir_ssa_def *> src : phi_src) {
8960 if (cur_pred_idx < preds.size()) {
8961 /* handle missing preds (IF merges with discard/break) and extra preds (loop exit with discard) */
8962 unsigned block = ctx->cf_info.nir_to_aco[src.first];
8963 unsigned skipped = 0;
8964 while (cur_pred_idx + skipped < preds.size() && preds[cur_pred_idx + skipped] != block)
8965 skipped++;
8966 if (cur_pred_idx + skipped < preds.size()) {
8967 for (unsigned i = 0; i < skipped; i++)
8968 operands[num_operands++] = Operand(dst.regClass());
8969 cur_pred_idx += skipped;
8970 } else {
8971 continue;
8972 }
8973 }
8974 /* Handle missing predecessors at the end. This shouldn't happen with loop
8975 * headers and we can't ignore these sources for loop header phis. */
8976 if (!(ctx->block->kind & block_kind_loop_header) && cur_pred_idx >= preds.size())
8977 continue;
8978 cur_pred_idx++;
8979 Operand op = get_phi_operand(ctx, src.second, dst.regClass());
8980 operands[num_operands++] = op;
8981 num_defined += !op.isUndefined();
8982 }
8983 /* handle block_kind_continue_or_break at loop exit blocks */
8984 while (cur_pred_idx++ < preds.size())
8985 operands[num_operands++] = Operand(dst.regClass());
8986
8987 /* If the loop ends with a break, still add a linear continue edge in case
8988 * that break is divergent or continue_or_break is used. We'll either remove
8989 * this operand later in visit_loop() if it's not necessary or replace the
8990 * undef with something correct. */
8991 if (!logical && ctx->block->kind & block_kind_loop_header) {
8992 nir_loop *loop = nir_cf_node_as_loop(instr->instr.block->cf_node.parent);
8993 nir_block *last = nir_loop_last_block(loop);
8994 if (last->successors[0] != instr->instr.block)
8995 operands[num_operands++] = Operand(RegClass());
8996 }
8997
8998 if (num_defined == 0) {
8999 Builder bld(ctx->program, ctx->block);
9000 if (dst.regClass() == s1) {
9001 bld.sop1(aco_opcode::s_mov_b32, Definition(dst), Operand(0u));
9002 } else if (dst.regClass() == v1) {
9003 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), Operand(0u));
9004 } else {
9005 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
9006 for (unsigned i = 0; i < dst.size(); i++)
9007 vec->operands[i] = Operand(0u);
9008 vec->definitions[0] = Definition(dst);
9009 ctx->block->instructions.emplace_back(std::move(vec));
9010 }
9011 return;
9012 }
9013
9014 /* we can use a linear phi in some cases if one src is undef */
9015 if (dst.is_linear() && ctx->block->kind & block_kind_merge && num_defined == 1) {
9016 phi.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, num_operands, 1));
9017
9018 Block *linear_else = &ctx->program->blocks[ctx->block->linear_preds[1]];
9019 Block *invert = &ctx->program->blocks[linear_else->linear_preds[0]];
9020 assert(invert->kind & block_kind_invert);
9021
9022 unsigned then_block = invert->linear_preds[0];
9023
9024 Block* insert_block = NULL;
9025 for (unsigned i = 0; i < num_operands; i++) {
9026 Operand op = operands[i];
9027 if (op.isUndefined())
9028 continue;
9029 insert_block = ctx->block->logical_preds[i] == then_block ? invert : ctx->block;
9030 phi->operands[0] = op;
9031 break;
9032 }
9033 assert(insert_block); /* should be handled by the "num_defined == 0" case above */
9034 phi->operands[1] = Operand(dst.regClass());
9035 phi->definitions[0] = Definition(dst);
9036 insert_block->instructions.emplace(insert_block->instructions.begin(), std::move(phi));
9037 return;
9038 }
9039
9040 /* try to scalarize vector phis */
9041 if (instr->dest.ssa.bit_size != 1 && dst.size() > 1) {
9042 // TODO: scalarize linear phis on divergent ifs
9043 bool can_scalarize = (opcode == aco_opcode::p_phi || !(ctx->block->kind & block_kind_merge));
9044 std::array<Temp, NIR_MAX_VEC_COMPONENTS> new_vec;
9045 for (unsigned i = 0; can_scalarize && (i < num_operands); i++) {
9046 Operand src = operands[i];
9047 if (src.isTemp() && ctx->allocated_vec.find(src.tempId()) == ctx->allocated_vec.end())
9048 can_scalarize = false;
9049 }
9050 if (can_scalarize) {
9051 unsigned num_components = instr->dest.ssa.num_components;
9052 assert(dst.size() % num_components == 0);
9053 RegClass rc = RegClass(dst.type(), dst.size() / num_components);
9054
9055 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
9056 for (unsigned k = 0; k < num_components; k++) {
9057 phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
9058 for (unsigned i = 0; i < num_operands; i++) {
9059 Operand src = operands[i];
9060 phi->operands[i] = src.isTemp() ? Operand(ctx->allocated_vec[src.tempId()][k]) : Operand(rc);
9061 }
9062 Temp phi_dst = {ctx->program->allocateId(), rc};
9063 phi->definitions[0] = Definition(phi_dst);
9064 ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi));
9065 new_vec[k] = phi_dst;
9066 vec->operands[k] = Operand(phi_dst);
9067 }
9068 vec->definitions[0] = Definition(dst);
9069 ctx->block->instructions.emplace_back(std::move(vec));
9070 ctx->allocated_vec.emplace(dst.id(), new_vec);
9071 return;
9072 }
9073 }
9074
9075 phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
9076 for (unsigned i = 0; i < num_operands; i++)
9077 phi->operands[i] = operands[i];
9078 phi->definitions[0] = Definition(dst);
9079 ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi));
9080 }
9081
9082
9083 void visit_undef(isel_context *ctx, nir_ssa_undef_instr *instr)
9084 {
9085 Temp dst = get_ssa_temp(ctx, &instr->def);
9086
9087 assert(dst.type() == RegType::sgpr);
9088
9089 if (dst.size() == 1) {
9090 Builder(ctx->program, ctx->block).copy(Definition(dst), Operand(0u));
9091 } else {
9092 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
9093 for (unsigned i = 0; i < dst.size(); i++)
9094 vec->operands[i] = Operand(0u);
9095 vec->definitions[0] = Definition(dst);
9096 ctx->block->instructions.emplace_back(std::move(vec));
9097 }
9098 }
9099
9100 void visit_jump(isel_context *ctx, nir_jump_instr *instr)
9101 {
9102 Builder bld(ctx->program, ctx->block);
9103 Block *logical_target;
9104 append_logical_end(ctx->block);
9105 unsigned idx = ctx->block->index;
9106
9107 switch (instr->type) {
9108 case nir_jump_break:
9109 logical_target = ctx->cf_info.parent_loop.exit;
9110 add_logical_edge(idx, logical_target);
9111 ctx->block->kind |= block_kind_break;
9112
9113 if (!ctx->cf_info.parent_if.is_divergent &&
9114 !ctx->cf_info.parent_loop.has_divergent_continue) {
9115 /* uniform break - directly jump out of the loop */
9116 ctx->block->kind |= block_kind_uniform;
9117 ctx->cf_info.has_branch = true;
9118 bld.branch(aco_opcode::p_branch);
9119 add_linear_edge(idx, logical_target);
9120 return;
9121 }
9122 ctx->cf_info.parent_loop.has_divergent_branch = true;
9123 ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
9124 break;
9125 case nir_jump_continue:
9126 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
9127 add_logical_edge(idx, logical_target);
9128 ctx->block->kind |= block_kind_continue;
9129
9130 if (ctx->cf_info.parent_if.is_divergent) {
9131 /* for potential uniform breaks after this continue,
9132 we must ensure that they are handled correctly */
9133 ctx->cf_info.parent_loop.has_divergent_continue = true;
9134 ctx->cf_info.parent_loop.has_divergent_branch = true;
9135 ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
9136 } else {
9137 /* uniform continue - directly jump to the loop header */
9138 ctx->block->kind |= block_kind_uniform;
9139 ctx->cf_info.has_branch = true;
9140 bld.branch(aco_opcode::p_branch);
9141 add_linear_edge(idx, logical_target);
9142 return;
9143 }
9144 break;
9145 default:
9146 fprintf(stderr, "Unknown NIR jump instr: ");
9147 nir_print_instr(&instr->instr, stderr);
9148 fprintf(stderr, "\n");
9149 abort();
9150 }
9151
9152 if (ctx->cf_info.parent_if.is_divergent && !ctx->cf_info.exec_potentially_empty_break) {
9153 ctx->cf_info.exec_potentially_empty_break = true;
9154 ctx->cf_info.exec_potentially_empty_break_depth = ctx->cf_info.loop_nest_depth;
9155 }
9156
9157 /* remove critical edges from linear CFG */
9158 bld.branch(aco_opcode::p_branch);
9159 Block* break_block = ctx->program->create_and_insert_block();
9160 break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9161 break_block->kind |= block_kind_uniform;
9162 add_linear_edge(idx, break_block);
9163 /* the loop_header pointer might be invalidated by this point */
9164 if (instr->type == nir_jump_continue)
9165 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
9166 add_linear_edge(break_block->index, logical_target);
9167 bld.reset(break_block);
9168 bld.branch(aco_opcode::p_branch);
9169
9170 Block* continue_block = ctx->program->create_and_insert_block();
9171 continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9172 add_linear_edge(idx, continue_block);
9173 append_logical_start(continue_block);
9174 ctx->block = continue_block;
9175 return;
9176 }
9177
9178 void visit_block(isel_context *ctx, nir_block *block)
9179 {
9180 nir_foreach_instr(instr, block) {
9181 switch (instr->type) {
9182 case nir_instr_type_alu:
9183 visit_alu_instr(ctx, nir_instr_as_alu(instr));
9184 break;
9185 case nir_instr_type_load_const:
9186 visit_load_const(ctx, nir_instr_as_load_const(instr));
9187 break;
9188 case nir_instr_type_intrinsic:
9189 visit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
9190 break;
9191 case nir_instr_type_tex:
9192 visit_tex(ctx, nir_instr_as_tex(instr));
9193 break;
9194 case nir_instr_type_phi:
9195 visit_phi(ctx, nir_instr_as_phi(instr));
9196 break;
9197 case nir_instr_type_ssa_undef:
9198 visit_undef(ctx, nir_instr_as_ssa_undef(instr));
9199 break;
9200 case nir_instr_type_deref:
9201 break;
9202 case nir_instr_type_jump:
9203 visit_jump(ctx, nir_instr_as_jump(instr));
9204 break;
9205 default:
9206 fprintf(stderr, "Unknown NIR instr type: ");
9207 nir_print_instr(instr, stderr);
9208 fprintf(stderr, "\n");
9209 //abort();
9210 }
9211 }
9212
9213 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9214 ctx->cf_info.nir_to_aco[block->index] = ctx->block->index;
9215 }
9216
9217
9218
9219 static Operand create_continue_phis(isel_context *ctx, unsigned first, unsigned last,
9220 aco_ptr<Instruction>& header_phi, Operand *vals)
9221 {
9222 vals[0] = Operand(header_phi->definitions[0].getTemp());
9223 RegClass rc = vals[0].regClass();
9224
9225 unsigned loop_nest_depth = ctx->program->blocks[first].loop_nest_depth;
9226
9227 unsigned next_pred = 1;
9228
9229 for (unsigned idx = first + 1; idx <= last; idx++) {
9230 Block& block = ctx->program->blocks[idx];
9231 if (block.loop_nest_depth != loop_nest_depth) {
9232 vals[idx - first] = vals[idx - 1 - first];
9233 continue;
9234 }
9235
9236 if (block.kind & block_kind_continue) {
9237 vals[idx - first] = header_phi->operands[next_pred];
9238 next_pred++;
9239 continue;
9240 }
9241
9242 bool all_same = true;
9243 for (unsigned i = 1; all_same && (i < block.linear_preds.size()); i++)
9244 all_same = vals[block.linear_preds[i] - first] == vals[block.linear_preds[0] - first];
9245
9246 Operand val;
9247 if (all_same) {
9248 val = vals[block.linear_preds[0] - first];
9249 } else {
9250 aco_ptr<Instruction> phi(create_instruction<Pseudo_instruction>(
9251 aco_opcode::p_linear_phi, Format::PSEUDO, block.linear_preds.size(), 1));
9252 for (unsigned i = 0; i < block.linear_preds.size(); i++)
9253 phi->operands[i] = vals[block.linear_preds[i] - first];
9254 val = Operand(Temp(ctx->program->allocateId(), rc));
9255 phi->definitions[0] = Definition(val.getTemp());
9256 block.instructions.emplace(block.instructions.begin(), std::move(phi));
9257 }
9258 vals[idx - first] = val;
9259 }
9260
9261 return vals[last - first];
9262 }
9263
9264 static void visit_loop(isel_context *ctx, nir_loop *loop)
9265 {
9266 //TODO: we might want to wrap the loop around a branch if exec_potentially_empty=true
9267 append_logical_end(ctx->block);
9268 ctx->block->kind |= block_kind_loop_preheader | block_kind_uniform;
9269 Builder bld(ctx->program, ctx->block);
9270 bld.branch(aco_opcode::p_branch);
9271 unsigned loop_preheader_idx = ctx->block->index;
9272
9273 Block loop_exit = Block();
9274 loop_exit.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9275 loop_exit.kind |= (block_kind_loop_exit | (ctx->block->kind & block_kind_top_level));
9276
9277 Block* loop_header = ctx->program->create_and_insert_block();
9278 loop_header->loop_nest_depth = ctx->cf_info.loop_nest_depth + 1;
9279 loop_header->kind |= block_kind_loop_header;
9280 add_edge(loop_preheader_idx, loop_header);
9281 ctx->block = loop_header;
9282
9283 /* emit loop body */
9284 unsigned loop_header_idx = loop_header->index;
9285 loop_info_RAII loop_raii(ctx, loop_header_idx, &loop_exit);
9286 append_logical_start(ctx->block);
9287 bool unreachable = visit_cf_list(ctx, &loop->body);
9288
9289 //TODO: what if a loop ends with a unconditional or uniformly branched continue and this branch is never taken?
9290 if (!ctx->cf_info.has_branch) {
9291 append_logical_end(ctx->block);
9292 if (ctx->cf_info.exec_potentially_empty_discard || ctx->cf_info.exec_potentially_empty_break) {
9293 /* Discards can result in code running with an empty exec mask.
9294 * This would result in divergent breaks not ever being taken. As a
9295 * workaround, break the loop when the loop mask is empty instead of
9296 * always continuing. */
9297 ctx->block->kind |= (block_kind_continue_or_break | block_kind_uniform);
9298 unsigned block_idx = ctx->block->index;
9299
9300 /* create helper blocks to avoid critical edges */
9301 Block *break_block = ctx->program->create_and_insert_block();
9302 break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9303 break_block->kind = block_kind_uniform;
9304 bld.reset(break_block);
9305 bld.branch(aco_opcode::p_branch);
9306 add_linear_edge(block_idx, break_block);
9307 add_linear_edge(break_block->index, &loop_exit);
9308
9309 Block *continue_block = ctx->program->create_and_insert_block();
9310 continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9311 continue_block->kind = block_kind_uniform;
9312 bld.reset(continue_block);
9313 bld.branch(aco_opcode::p_branch);
9314 add_linear_edge(block_idx, continue_block);
9315 add_linear_edge(continue_block->index, &ctx->program->blocks[loop_header_idx]);
9316
9317 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9318 add_logical_edge(block_idx, &ctx->program->blocks[loop_header_idx]);
9319 ctx->block = &ctx->program->blocks[block_idx];
9320 } else {
9321 ctx->block->kind |= (block_kind_continue | block_kind_uniform);
9322 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9323 add_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
9324 else
9325 add_linear_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
9326 }
9327
9328 bld.reset(ctx->block);
9329 bld.branch(aco_opcode::p_branch);
9330 }
9331
9332 /* Fixup phis in loop header from unreachable blocks.
9333 * has_branch/has_divergent_branch also indicates if the loop ends with a
9334 * break/continue instruction, but we don't emit those if unreachable=true */
9335 if (unreachable) {
9336 assert(ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch);
9337 bool linear = ctx->cf_info.has_branch;
9338 bool logical = ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch;
9339 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
9340 if ((logical && instr->opcode == aco_opcode::p_phi) ||
9341 (linear && instr->opcode == aco_opcode::p_linear_phi)) {
9342 /* the last operand should be the one that needs to be removed */
9343 instr->operands.pop_back();
9344 } else if (!is_phi(instr)) {
9345 break;
9346 }
9347 }
9348 }
9349
9350 /* Fixup linear phis in loop header from expecting a continue. Both this fixup
9351 * and the previous one shouldn't both happen at once because a break in the
9352 * merge block would get CSE'd */
9353 if (nir_loop_last_block(loop)->successors[0] != nir_loop_first_block(loop)) {
9354 unsigned num_vals = ctx->cf_info.has_branch ? 1 : (ctx->block->index - loop_header_idx + 1);
9355 Operand vals[num_vals];
9356 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
9357 if (instr->opcode == aco_opcode::p_linear_phi) {
9358 if (ctx->cf_info.has_branch)
9359 instr->operands.pop_back();
9360 else
9361 instr->operands.back() = create_continue_phis(ctx, loop_header_idx, ctx->block->index, instr, vals);
9362 } else if (!is_phi(instr)) {
9363 break;
9364 }
9365 }
9366 }
9367
9368 ctx->cf_info.has_branch = false;
9369
9370 // TODO: if the loop has not a single exit, we must add one °°
9371 /* emit loop successor block */
9372 ctx->block = ctx->program->insert_block(std::move(loop_exit));
9373 append_logical_start(ctx->block);
9374
9375 #if 0
9376 // TODO: check if it is beneficial to not branch on continues
9377 /* trim linear phis in loop header */
9378 for (auto&& instr : loop_entry->instructions) {
9379 if (instr->opcode == aco_opcode::p_linear_phi) {
9380 aco_ptr<Pseudo_instruction> new_phi{create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, loop_entry->linear_predecessors.size(), 1)};
9381 new_phi->definitions[0] = instr->definitions[0];
9382 for (unsigned i = 0; i < new_phi->operands.size(); i++)
9383 new_phi->operands[i] = instr->operands[i];
9384 /* check that the remaining operands are all the same */
9385 for (unsigned i = new_phi->operands.size(); i < instr->operands.size(); i++)
9386 assert(instr->operands[i].tempId() == instr->operands.back().tempId());
9387 instr.swap(new_phi);
9388 } else if (instr->opcode == aco_opcode::p_phi) {
9389 continue;
9390 } else {
9391 break;
9392 }
9393 }
9394 #endif
9395 }
9396
9397 static void begin_divergent_if_then(isel_context *ctx, if_context *ic, Temp cond)
9398 {
9399 ic->cond = cond;
9400
9401 append_logical_end(ctx->block);
9402 ctx->block->kind |= block_kind_branch;
9403
9404 /* branch to linear then block */
9405 assert(cond.regClass() == ctx->program->lane_mask);
9406 aco_ptr<Pseudo_branch_instruction> branch;
9407 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 0));
9408 branch->operands[0] = Operand(cond);
9409 ctx->block->instructions.push_back(std::move(branch));
9410
9411 ic->BB_if_idx = ctx->block->index;
9412 ic->BB_invert = Block();
9413 ic->BB_invert.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9414 /* Invert blocks are intentionally not marked as top level because they
9415 * are not part of the logical cfg. */
9416 ic->BB_invert.kind |= block_kind_invert;
9417 ic->BB_endif = Block();
9418 ic->BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9419 ic->BB_endif.kind |= (block_kind_merge | (ctx->block->kind & block_kind_top_level));
9420
9421 ic->exec_potentially_empty_discard_old = ctx->cf_info.exec_potentially_empty_discard;
9422 ic->exec_potentially_empty_break_old = ctx->cf_info.exec_potentially_empty_break;
9423 ic->exec_potentially_empty_break_depth_old = ctx->cf_info.exec_potentially_empty_break_depth;
9424 ic->divergent_old = ctx->cf_info.parent_if.is_divergent;
9425 ctx->cf_info.parent_if.is_divergent = true;
9426
9427 /* divergent branches use cbranch_execz */
9428 ctx->cf_info.exec_potentially_empty_discard = false;
9429 ctx->cf_info.exec_potentially_empty_break = false;
9430 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9431
9432 /** emit logical then block */
9433 Block* BB_then_logical = ctx->program->create_and_insert_block();
9434 BB_then_logical->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9435 add_edge(ic->BB_if_idx, BB_then_logical);
9436 ctx->block = BB_then_logical;
9437 append_logical_start(BB_then_logical);
9438 }
9439
9440 static void begin_divergent_if_else(isel_context *ctx, if_context *ic)
9441 {
9442 Block *BB_then_logical = ctx->block;
9443 append_logical_end(BB_then_logical);
9444 /* branch from logical then block to invert block */
9445 aco_ptr<Pseudo_branch_instruction> branch;
9446 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9447 BB_then_logical->instructions.emplace_back(std::move(branch));
9448 add_linear_edge(BB_then_logical->index, &ic->BB_invert);
9449 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9450 add_logical_edge(BB_then_logical->index, &ic->BB_endif);
9451 BB_then_logical->kind |= block_kind_uniform;
9452 assert(!ctx->cf_info.has_branch);
9453 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
9454 ctx->cf_info.parent_loop.has_divergent_branch = false;
9455
9456 /** emit linear then block */
9457 Block* BB_then_linear = ctx->program->create_and_insert_block();
9458 BB_then_linear->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9459 BB_then_linear->kind |= block_kind_uniform;
9460 add_linear_edge(ic->BB_if_idx, BB_then_linear);
9461 /* branch from linear then block to invert block */
9462 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9463 BB_then_linear->instructions.emplace_back(std::move(branch));
9464 add_linear_edge(BB_then_linear->index, &ic->BB_invert);
9465
9466 /** emit invert merge block */
9467 ctx->block = ctx->program->insert_block(std::move(ic->BB_invert));
9468 ic->invert_idx = ctx->block->index;
9469
9470 /* branch to linear else block (skip else) */
9471 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_nz, Format::PSEUDO_BRANCH, 1, 0));
9472 branch->operands[0] = Operand(ic->cond);
9473 ctx->block->instructions.push_back(std::move(branch));
9474
9475 ic->exec_potentially_empty_discard_old |= ctx->cf_info.exec_potentially_empty_discard;
9476 ic->exec_potentially_empty_break_old |= ctx->cf_info.exec_potentially_empty_break;
9477 ic->exec_potentially_empty_break_depth_old =
9478 std::min(ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
9479 /* divergent branches use cbranch_execz */
9480 ctx->cf_info.exec_potentially_empty_discard = false;
9481 ctx->cf_info.exec_potentially_empty_break = false;
9482 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9483
9484 /** emit logical else block */
9485 Block* BB_else_logical = ctx->program->create_and_insert_block();
9486 BB_else_logical->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9487 add_logical_edge(ic->BB_if_idx, BB_else_logical);
9488 add_linear_edge(ic->invert_idx, BB_else_logical);
9489 ctx->block = BB_else_logical;
9490 append_logical_start(BB_else_logical);
9491 }
9492
9493 static void end_divergent_if(isel_context *ctx, if_context *ic)
9494 {
9495 Block *BB_else_logical = ctx->block;
9496 append_logical_end(BB_else_logical);
9497
9498 /* branch from logical else block to endif block */
9499 aco_ptr<Pseudo_branch_instruction> branch;
9500 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9501 BB_else_logical->instructions.emplace_back(std::move(branch));
9502 add_linear_edge(BB_else_logical->index, &ic->BB_endif);
9503 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9504 add_logical_edge(BB_else_logical->index, &ic->BB_endif);
9505 BB_else_logical->kind |= block_kind_uniform;
9506
9507 assert(!ctx->cf_info.has_branch);
9508 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
9509
9510
9511 /** emit linear else block */
9512 Block* BB_else_linear = ctx->program->create_and_insert_block();
9513 BB_else_linear->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9514 BB_else_linear->kind |= block_kind_uniform;
9515 add_linear_edge(ic->invert_idx, BB_else_linear);
9516
9517 /* branch from linear else block to endif block */
9518 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9519 BB_else_linear->instructions.emplace_back(std::move(branch));
9520 add_linear_edge(BB_else_linear->index, &ic->BB_endif);
9521
9522
9523 /** emit endif merge block */
9524 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
9525 append_logical_start(ctx->block);
9526
9527
9528 ctx->cf_info.parent_if.is_divergent = ic->divergent_old;
9529 ctx->cf_info.exec_potentially_empty_discard |= ic->exec_potentially_empty_discard_old;
9530 ctx->cf_info.exec_potentially_empty_break |= ic->exec_potentially_empty_break_old;
9531 ctx->cf_info.exec_potentially_empty_break_depth =
9532 std::min(ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
9533 if (ctx->cf_info.loop_nest_depth == ctx->cf_info.exec_potentially_empty_break_depth &&
9534 !ctx->cf_info.parent_if.is_divergent) {
9535 ctx->cf_info.exec_potentially_empty_break = false;
9536 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9537 }
9538 /* uniform control flow never has an empty exec-mask */
9539 if (!ctx->cf_info.loop_nest_depth && !ctx->cf_info.parent_if.is_divergent) {
9540 ctx->cf_info.exec_potentially_empty_discard = false;
9541 ctx->cf_info.exec_potentially_empty_break = false;
9542 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9543 }
9544 }
9545
9546 static void begin_uniform_if_then(isel_context *ctx, if_context *ic, Temp cond)
9547 {
9548 assert(cond.regClass() == s1);
9549
9550 append_logical_end(ctx->block);
9551 ctx->block->kind |= block_kind_uniform;
9552
9553 aco_ptr<Pseudo_branch_instruction> branch;
9554 aco_opcode branch_opcode = aco_opcode::p_cbranch_z;
9555 branch.reset(create_instruction<Pseudo_branch_instruction>(branch_opcode, Format::PSEUDO_BRANCH, 1, 0));
9556 branch->operands[0] = Operand(cond);
9557 branch->operands[0].setFixed(scc);
9558 ctx->block->instructions.emplace_back(std::move(branch));
9559
9560 ic->BB_if_idx = ctx->block->index;
9561 ic->BB_endif = Block();
9562 ic->BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9563 ic->BB_endif.kind |= ctx->block->kind & block_kind_top_level;
9564
9565 ctx->cf_info.has_branch = false;
9566 ctx->cf_info.parent_loop.has_divergent_branch = false;
9567
9568 /** emit then block */
9569 Block* BB_then = ctx->program->create_and_insert_block();
9570 BB_then->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9571 add_edge(ic->BB_if_idx, BB_then);
9572 append_logical_start(BB_then);
9573 ctx->block = BB_then;
9574 }
9575
9576 static void begin_uniform_if_else(isel_context *ctx, if_context *ic)
9577 {
9578 Block *BB_then = ctx->block;
9579
9580 ic->uniform_has_then_branch = ctx->cf_info.has_branch;
9581 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
9582
9583 if (!ic->uniform_has_then_branch) {
9584 append_logical_end(BB_then);
9585 /* branch from then block to endif block */
9586 aco_ptr<Pseudo_branch_instruction> branch;
9587 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9588 BB_then->instructions.emplace_back(std::move(branch));
9589 add_linear_edge(BB_then->index, &ic->BB_endif);
9590 if (!ic->then_branch_divergent)
9591 add_logical_edge(BB_then->index, &ic->BB_endif);
9592 BB_then->kind |= block_kind_uniform;
9593 }
9594
9595 ctx->cf_info.has_branch = false;
9596 ctx->cf_info.parent_loop.has_divergent_branch = false;
9597
9598 /** emit else block */
9599 Block* BB_else = ctx->program->create_and_insert_block();
9600 BB_else->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9601 add_edge(ic->BB_if_idx, BB_else);
9602 append_logical_start(BB_else);
9603 ctx->block = BB_else;
9604 }
9605
9606 static void end_uniform_if(isel_context *ctx, if_context *ic)
9607 {
9608 Block *BB_else = ctx->block;
9609
9610 if (!ctx->cf_info.has_branch) {
9611 append_logical_end(BB_else);
9612 /* branch from then block to endif block */
9613 aco_ptr<Pseudo_branch_instruction> branch;
9614 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9615 BB_else->instructions.emplace_back(std::move(branch));
9616 add_linear_edge(BB_else->index, &ic->BB_endif);
9617 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9618 add_logical_edge(BB_else->index, &ic->BB_endif);
9619 BB_else->kind |= block_kind_uniform;
9620 }
9621
9622 ctx->cf_info.has_branch &= ic->uniform_has_then_branch;
9623 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
9624
9625 /** emit endif merge block */
9626 if (!ctx->cf_info.has_branch) {
9627 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
9628 append_logical_start(ctx->block);
9629 }
9630 }
9631
9632 static bool visit_if(isel_context *ctx, nir_if *if_stmt)
9633 {
9634 Temp cond = get_ssa_temp(ctx, if_stmt->condition.ssa);
9635 Builder bld(ctx->program, ctx->block);
9636 aco_ptr<Pseudo_branch_instruction> branch;
9637 if_context ic;
9638
9639 if (!nir_src_is_divergent(if_stmt->condition)) { /* uniform condition */
9640 /**
9641 * Uniform conditionals are represented in the following way*) :
9642 *
9643 * The linear and logical CFG:
9644 * BB_IF
9645 * / \
9646 * BB_THEN (logical) BB_ELSE (logical)
9647 * \ /
9648 * BB_ENDIF
9649 *
9650 * *) Exceptions may be due to break and continue statements within loops
9651 * If a break/continue happens within uniform control flow, it branches
9652 * to the loop exit/entry block. Otherwise, it branches to the next
9653 * merge block.
9654 **/
9655
9656 // TODO: in a post-RA optimizer, we could check if the condition is in VCC and omit this instruction
9657 assert(cond.regClass() == ctx->program->lane_mask);
9658 cond = bool_to_scalar_condition(ctx, cond);
9659
9660 begin_uniform_if_then(ctx, &ic, cond);
9661 visit_cf_list(ctx, &if_stmt->then_list);
9662
9663 begin_uniform_if_else(ctx, &ic);
9664 visit_cf_list(ctx, &if_stmt->else_list);
9665
9666 end_uniform_if(ctx, &ic);
9667 } else { /* non-uniform condition */
9668 /**
9669 * To maintain a logical and linear CFG without critical edges,
9670 * non-uniform conditionals are represented in the following way*) :
9671 *
9672 * The linear CFG:
9673 * BB_IF
9674 * / \
9675 * BB_THEN (logical) BB_THEN (linear)
9676 * \ /
9677 * BB_INVERT (linear)
9678 * / \
9679 * BB_ELSE (logical) BB_ELSE (linear)
9680 * \ /
9681 * BB_ENDIF
9682 *
9683 * The logical CFG:
9684 * BB_IF
9685 * / \
9686 * BB_THEN (logical) BB_ELSE (logical)
9687 * \ /
9688 * BB_ENDIF
9689 *
9690 * *) Exceptions may be due to break and continue statements within loops
9691 **/
9692
9693 begin_divergent_if_then(ctx, &ic, cond);
9694 visit_cf_list(ctx, &if_stmt->then_list);
9695
9696 begin_divergent_if_else(ctx, &ic);
9697 visit_cf_list(ctx, &if_stmt->else_list);
9698
9699 end_divergent_if(ctx, &ic);
9700 }
9701
9702 return !ctx->cf_info.has_branch && !ctx->block->logical_preds.empty();
9703 }
9704
9705 static bool visit_cf_list(isel_context *ctx,
9706 struct exec_list *list)
9707 {
9708 foreach_list_typed(nir_cf_node, node, node, list) {
9709 switch (node->type) {
9710 case nir_cf_node_block:
9711 visit_block(ctx, nir_cf_node_as_block(node));
9712 break;
9713 case nir_cf_node_if:
9714 if (!visit_if(ctx, nir_cf_node_as_if(node)))
9715 return true;
9716 break;
9717 case nir_cf_node_loop:
9718 visit_loop(ctx, nir_cf_node_as_loop(node));
9719 break;
9720 default:
9721 unreachable("unimplemented cf list type");
9722 }
9723 }
9724 return false;
9725 }
9726
9727 static void create_null_export(isel_context *ctx)
9728 {
9729 /* Some shader stages always need to have exports.
9730 * So when there is none, we need to add a null export.
9731 */
9732
9733 unsigned dest = (ctx->program->stage & hw_fs) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
9734 bool vm = (ctx->program->stage & hw_fs) || ctx->program->chip_class >= GFX10;
9735 Builder bld(ctx->program, ctx->block);
9736 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
9737 /* enabled_mask */ 0, dest, /* compr */ false, /* done */ true, vm);
9738 }
9739
9740 static bool export_vs_varying(isel_context *ctx, int slot, bool is_pos, int *next_pos)
9741 {
9742 assert(ctx->stage == vertex_vs ||
9743 ctx->stage == tess_eval_vs ||
9744 ctx->stage == gs_copy_vs ||
9745 ctx->stage == ngg_vertex_gs ||
9746 ctx->stage == ngg_tess_eval_gs);
9747
9748 int offset = (ctx->stage & sw_tes)
9749 ? ctx->program->info->tes.outinfo.vs_output_param_offset[slot]
9750 : ctx->program->info->vs.outinfo.vs_output_param_offset[slot];
9751 uint64_t mask = ctx->outputs.mask[slot];
9752 if (!is_pos && !mask)
9753 return false;
9754 if (!is_pos && offset == AC_EXP_PARAM_UNDEFINED)
9755 return false;
9756 aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
9757 exp->enabled_mask = mask;
9758 for (unsigned i = 0; i < 4; ++i) {
9759 if (mask & (1 << i))
9760 exp->operands[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
9761 else
9762 exp->operands[i] = Operand(v1);
9763 }
9764 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
9765 * Setting valid_mask=1 prevents it and has no other effect.
9766 */
9767 exp->valid_mask = ctx->options->chip_class >= GFX10 && is_pos && *next_pos == 0;
9768 exp->done = false;
9769 exp->compressed = false;
9770 if (is_pos)
9771 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
9772 else
9773 exp->dest = V_008DFC_SQ_EXP_PARAM + offset;
9774 ctx->block->instructions.emplace_back(std::move(exp));
9775
9776 return true;
9777 }
9778
9779 static void export_vs_psiz_layer_viewport(isel_context *ctx, int *next_pos)
9780 {
9781 aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
9782 exp->enabled_mask = 0;
9783 for (unsigned i = 0; i < 4; ++i)
9784 exp->operands[i] = Operand(v1);
9785 if (ctx->outputs.mask[VARYING_SLOT_PSIZ]) {
9786 exp->operands[0] = Operand(ctx->outputs.temps[VARYING_SLOT_PSIZ * 4u]);
9787 exp->enabled_mask |= 0x1;
9788 }
9789 if (ctx->outputs.mask[VARYING_SLOT_LAYER]) {
9790 exp->operands[2] = Operand(ctx->outputs.temps[VARYING_SLOT_LAYER * 4u]);
9791 exp->enabled_mask |= 0x4;
9792 }
9793 if (ctx->outputs.mask[VARYING_SLOT_VIEWPORT]) {
9794 if (ctx->options->chip_class < GFX9) {
9795 exp->operands[3] = Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]);
9796 exp->enabled_mask |= 0x8;
9797 } else {
9798 Builder bld(ctx->program, ctx->block);
9799
9800 Temp out = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u),
9801 Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]));
9802 if (exp->operands[2].isTemp())
9803 out = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(out), exp->operands[2]);
9804
9805 exp->operands[2] = Operand(out);
9806 exp->enabled_mask |= 0x4;
9807 }
9808 }
9809 exp->valid_mask = ctx->options->chip_class >= GFX10 && *next_pos == 0;
9810 exp->done = false;
9811 exp->compressed = false;
9812 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
9813 ctx->block->instructions.emplace_back(std::move(exp));
9814 }
9815
9816 static void create_export_phis(isel_context *ctx)
9817 {
9818 /* Used when exports are needed, but the output temps are defined in a preceding block.
9819 * This function will set up phis in order to access the outputs in the next block.
9820 */
9821
9822 assert(ctx->block->instructions.back()->opcode == aco_opcode::p_logical_start);
9823 aco_ptr<Instruction> logical_start = aco_ptr<Instruction>(ctx->block->instructions.back().release());
9824 ctx->block->instructions.pop_back();
9825
9826 Builder bld(ctx->program, ctx->block);
9827
9828 for (unsigned slot = 0; slot <= VARYING_SLOT_VAR31; ++slot) {
9829 uint64_t mask = ctx->outputs.mask[slot];
9830 for (unsigned i = 0; i < 4; ++i) {
9831 if (!(mask & (1 << i)))
9832 continue;
9833
9834 Temp old = ctx->outputs.temps[slot * 4 + i];
9835 Temp phi = bld.pseudo(aco_opcode::p_phi, bld.def(v1), old, Operand(v1));
9836 ctx->outputs.temps[slot * 4 + i] = phi;
9837 }
9838 }
9839
9840 bld.insert(std::move(logical_start));
9841 }
9842
9843 static void create_vs_exports(isel_context *ctx)
9844 {
9845 assert(ctx->stage == vertex_vs ||
9846 ctx->stage == tess_eval_vs ||
9847 ctx->stage == gs_copy_vs ||
9848 ctx->stage == ngg_vertex_gs ||
9849 ctx->stage == ngg_tess_eval_gs);
9850
9851 radv_vs_output_info *outinfo = (ctx->stage & sw_tes)
9852 ? &ctx->program->info->tes.outinfo
9853 : &ctx->program->info->vs.outinfo;
9854
9855 if (outinfo->export_prim_id && !(ctx->stage & hw_ngg_gs)) {
9856 ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1;
9857 ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = get_arg(ctx, ctx->args->vs_prim_id);
9858 }
9859
9860 if (ctx->options->key.has_multiview_view_index) {
9861 ctx->outputs.mask[VARYING_SLOT_LAYER] |= 0x1;
9862 ctx->outputs.temps[VARYING_SLOT_LAYER * 4u] = as_vgpr(ctx, get_arg(ctx, ctx->args->ac.view_index));
9863 }
9864
9865 /* the order these position exports are created is important */
9866 int next_pos = 0;
9867 bool exported_pos = export_vs_varying(ctx, VARYING_SLOT_POS, true, &next_pos);
9868 if (outinfo->writes_pointsize || outinfo->writes_layer || outinfo->writes_viewport_index) {
9869 export_vs_psiz_layer_viewport(ctx, &next_pos);
9870 exported_pos = true;
9871 }
9872 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
9873 exported_pos |= export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, true, &next_pos);
9874 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
9875 exported_pos |= export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, true, &next_pos);
9876
9877 if (ctx->export_clip_dists) {
9878 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
9879 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, false, &next_pos);
9880 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
9881 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, false, &next_pos);
9882 }
9883
9884 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
9885 if (i < VARYING_SLOT_VAR0 &&
9886 i != VARYING_SLOT_LAYER &&
9887 i != VARYING_SLOT_PRIMITIVE_ID &&
9888 i != VARYING_SLOT_VIEWPORT)
9889 continue;
9890
9891 export_vs_varying(ctx, i, false, NULL);
9892 }
9893
9894 if (!exported_pos)
9895 create_null_export(ctx);
9896 }
9897
9898 static bool export_fs_mrt_z(isel_context *ctx)
9899 {
9900 Builder bld(ctx->program, ctx->block);
9901 unsigned enabled_channels = 0;
9902 bool compr = false;
9903 Operand values[4];
9904
9905 for (unsigned i = 0; i < 4; ++i) {
9906 values[i] = Operand(v1);
9907 }
9908
9909 /* Both stencil and sample mask only need 16-bits. */
9910 if (!ctx->program->info->ps.writes_z &&
9911 (ctx->program->info->ps.writes_stencil ||
9912 ctx->program->info->ps.writes_sample_mask)) {
9913 compr = true; /* COMPR flag */
9914
9915 if (ctx->program->info->ps.writes_stencil) {
9916 /* Stencil should be in X[23:16]. */
9917 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
9918 values[0] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u), values[0]);
9919 enabled_channels |= 0x3;
9920 }
9921
9922 if (ctx->program->info->ps.writes_sample_mask) {
9923 /* SampleMask should be in Y[15:0]. */
9924 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
9925 enabled_channels |= 0xc;
9926 }
9927 } else {
9928 if (ctx->program->info->ps.writes_z) {
9929 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_DEPTH * 4u]);
9930 enabled_channels |= 0x1;
9931 }
9932
9933 if (ctx->program->info->ps.writes_stencil) {
9934 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
9935 enabled_channels |= 0x2;
9936 }
9937
9938 if (ctx->program->info->ps.writes_sample_mask) {
9939 values[2] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
9940 enabled_channels |= 0x4;
9941 }
9942 }
9943
9944 /* GFX6 (except OLAND and HAINAN) has a bug that it only looks at the X
9945 * writemask component.
9946 */
9947 if (ctx->options->chip_class == GFX6 &&
9948 ctx->options->family != CHIP_OLAND &&
9949 ctx->options->family != CHIP_HAINAN) {
9950 enabled_channels |= 0x1;
9951 }
9952
9953 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3],
9954 enabled_channels, V_008DFC_SQ_EXP_MRTZ, compr);
9955
9956 return true;
9957 }
9958
9959 static bool export_fs_mrt_color(isel_context *ctx, int slot)
9960 {
9961 Builder bld(ctx->program, ctx->block);
9962 unsigned write_mask = ctx->outputs.mask[slot];
9963 Operand values[4];
9964
9965 for (unsigned i = 0; i < 4; ++i) {
9966 if (write_mask & (1 << i)) {
9967 values[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
9968 } else {
9969 values[i] = Operand(v1);
9970 }
9971 }
9972
9973 unsigned target, col_format;
9974 unsigned enabled_channels = 0;
9975 aco_opcode compr_op = (aco_opcode)0;
9976
9977 slot -= FRAG_RESULT_DATA0;
9978 target = V_008DFC_SQ_EXP_MRT + slot;
9979 col_format = (ctx->options->key.fs.col_format >> (4 * slot)) & 0xf;
9980
9981 bool is_int8 = (ctx->options->key.fs.is_int8 >> slot) & 1;
9982 bool is_int10 = (ctx->options->key.fs.is_int10 >> slot) & 1;
9983 bool is_16bit = values[0].regClass() == v2b;
9984
9985 switch (col_format)
9986 {
9987 case V_028714_SPI_SHADER_ZERO:
9988 enabled_channels = 0; /* writemask */
9989 target = V_008DFC_SQ_EXP_NULL;
9990 break;
9991
9992 case V_028714_SPI_SHADER_32_R:
9993 enabled_channels = 1;
9994 break;
9995
9996 case V_028714_SPI_SHADER_32_GR:
9997 enabled_channels = 0x3;
9998 break;
9999
10000 case V_028714_SPI_SHADER_32_AR:
10001 if (ctx->options->chip_class >= GFX10) {
10002 /* Special case: on GFX10, the outputs are different for 32_AR */
10003 enabled_channels = 0x3;
10004 values[1] = values[3];
10005 values[3] = Operand(v1);
10006 } else {
10007 enabled_channels = 0x9;
10008 }
10009 break;
10010
10011 case V_028714_SPI_SHADER_FP16_ABGR:
10012 enabled_channels = 0x5;
10013 compr_op = aco_opcode::v_cvt_pkrtz_f16_f32;
10014 if (is_16bit) {
10015 if (ctx->options->chip_class >= GFX9) {
10016 /* Pack the FP16 values together instead of converting them to
10017 * FP32 and back to FP16.
10018 * TODO: use p_create_vector and let the compiler optimizes.
10019 */
10020 compr_op = aco_opcode::v_pack_b32_f16;
10021 } else {
10022 for (unsigned i = 0; i < 4; i++) {
10023 if ((write_mask >> i) & 1)
10024 values[i] = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), values[i]);
10025 }
10026 }
10027 }
10028 break;
10029
10030 case V_028714_SPI_SHADER_UNORM16_ABGR:
10031 enabled_channels = 0x5;
10032 if (is_16bit && ctx->options->chip_class >= GFX9) {
10033 compr_op = aco_opcode::v_cvt_pknorm_u16_f16;
10034 } else {
10035 compr_op = aco_opcode::v_cvt_pknorm_u16_f32;
10036 }
10037 break;
10038
10039 case V_028714_SPI_SHADER_SNORM16_ABGR:
10040 enabled_channels = 0x5;
10041 if (is_16bit && ctx->options->chip_class >= GFX9) {
10042 compr_op = aco_opcode::v_cvt_pknorm_i16_f16;
10043 } else {
10044 compr_op = aco_opcode::v_cvt_pknorm_i16_f32;
10045 }
10046 break;
10047
10048 case V_028714_SPI_SHADER_UINT16_ABGR: {
10049 enabled_channels = 0x5;
10050 compr_op = aco_opcode::v_cvt_pk_u16_u32;
10051 if (is_int8 || is_int10) {
10052 /* clamp */
10053 uint32_t max_rgb = is_int8 ? 255 : is_int10 ? 1023 : 0;
10054 Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb));
10055
10056 for (unsigned i = 0; i < 4; i++) {
10057 if ((write_mask >> i) & 1) {
10058 values[i] = bld.vop2(aco_opcode::v_min_u32, bld.def(v1),
10059 i == 3 && is_int10 ? Operand(3u) : Operand(max_rgb_val),
10060 values[i]);
10061 }
10062 }
10063 } else if (is_16bit) {
10064 for (unsigned i = 0; i < 4; i++) {
10065 if ((write_mask >> i) & 1) {
10066 Temp tmp = convert_int(ctx, bld, values[i].getTemp(), 16, 32, false);
10067 values[i] = Operand(tmp);
10068 }
10069 }
10070 }
10071 break;
10072 }
10073
10074 case V_028714_SPI_SHADER_SINT16_ABGR:
10075 enabled_channels = 0x5;
10076 compr_op = aco_opcode::v_cvt_pk_i16_i32;
10077 if (is_int8 || is_int10) {
10078 /* clamp */
10079 uint32_t max_rgb = is_int8 ? 127 : is_int10 ? 511 : 0;
10080 uint32_t min_rgb = is_int8 ? -128 :is_int10 ? -512 : 0;
10081 Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb));
10082 Temp min_rgb_val = bld.copy(bld.def(s1), Operand(min_rgb));
10083
10084 for (unsigned i = 0; i < 4; i++) {
10085 if ((write_mask >> i) & 1) {
10086 values[i] = bld.vop2(aco_opcode::v_min_i32, bld.def(v1),
10087 i == 3 && is_int10 ? Operand(1u) : Operand(max_rgb_val),
10088 values[i]);
10089 values[i] = bld.vop2(aco_opcode::v_max_i32, bld.def(v1),
10090 i == 3 && is_int10 ? Operand(-2u) : Operand(min_rgb_val),
10091 values[i]);
10092 }
10093 }
10094 } else if (is_16bit) {
10095 for (unsigned i = 0; i < 4; i++) {
10096 if ((write_mask >> i) & 1) {
10097 Temp tmp = convert_int(ctx, bld, values[i].getTemp(), 16, 32, true);
10098 values[i] = Operand(tmp);
10099 }
10100 }
10101 }
10102 break;
10103
10104 case V_028714_SPI_SHADER_32_ABGR:
10105 enabled_channels = 0xF;
10106 break;
10107
10108 default:
10109 break;
10110 }
10111
10112 if (target == V_008DFC_SQ_EXP_NULL)
10113 return false;
10114
10115 /* Replace NaN by zero (only 32-bit) to fix game bugs if requested. */
10116 if (ctx->options->enable_mrt_output_nan_fixup &&
10117 !is_16bit &&
10118 (col_format == V_028714_SPI_SHADER_32_R ||
10119 col_format == V_028714_SPI_SHADER_32_GR ||
10120 col_format == V_028714_SPI_SHADER_32_AR ||
10121 col_format == V_028714_SPI_SHADER_32_ABGR ||
10122 col_format == V_028714_SPI_SHADER_FP16_ABGR)) {
10123 for (int i = 0; i < 4; i++) {
10124 if (!(write_mask & (1 << i)))
10125 continue;
10126
10127 Temp isnan = bld.vopc(aco_opcode::v_cmp_class_f32,
10128 bld.hint_vcc(bld.def(bld.lm)), values[i],
10129 bld.copy(bld.def(v1), Operand(3u)));
10130 values[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), values[i],
10131 bld.copy(bld.def(v1), Operand(0u)), isnan);
10132 }
10133 }
10134
10135 if ((bool) compr_op) {
10136 for (int i = 0; i < 2; i++) {
10137 /* check if at least one of the values to be compressed is enabled */
10138 unsigned enabled = (write_mask >> (i*2) | write_mask >> (i*2+1)) & 0x1;
10139 if (enabled) {
10140 enabled_channels |= enabled << (i*2);
10141 values[i] = bld.vop3(compr_op, bld.def(v1),
10142 values[i*2].isUndefined() ? Operand(0u) : values[i*2],
10143 values[i*2+1].isUndefined() ? Operand(0u): values[i*2+1]);
10144 } else {
10145 values[i] = Operand(v1);
10146 }
10147 }
10148 values[2] = Operand(v1);
10149 values[3] = Operand(v1);
10150 } else {
10151 for (int i = 0; i < 4; i++)
10152 values[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1);
10153 }
10154
10155 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3],
10156 enabled_channels, target, (bool) compr_op);
10157 return true;
10158 }
10159
10160 static void create_fs_exports(isel_context *ctx)
10161 {
10162 bool exported = false;
10163
10164 /* Export depth, stencil and sample mask. */
10165 if (ctx->outputs.mask[FRAG_RESULT_DEPTH] ||
10166 ctx->outputs.mask[FRAG_RESULT_STENCIL] ||
10167 ctx->outputs.mask[FRAG_RESULT_SAMPLE_MASK])
10168 exported |= export_fs_mrt_z(ctx);
10169
10170 /* Export all color render targets. */
10171 for (unsigned i = FRAG_RESULT_DATA0; i < FRAG_RESULT_DATA7 + 1; ++i)
10172 if (ctx->outputs.mask[i])
10173 exported |= export_fs_mrt_color(ctx, i);
10174
10175 if (!exported)
10176 create_null_export(ctx);
10177 }
10178
10179 static void write_tcs_tess_factors(isel_context *ctx)
10180 {
10181 unsigned outer_comps;
10182 unsigned inner_comps;
10183
10184 switch (ctx->args->options->key.tcs.primitive_mode) {
10185 case GL_ISOLINES:
10186 outer_comps = 2;
10187 inner_comps = 0;
10188 break;
10189 case GL_TRIANGLES:
10190 outer_comps = 3;
10191 inner_comps = 1;
10192 break;
10193 case GL_QUADS:
10194 outer_comps = 4;
10195 inner_comps = 2;
10196 break;
10197 default:
10198 return;
10199 }
10200
10201 Builder bld(ctx->program, ctx->block);
10202
10203 bld.barrier(aco_opcode::p_memory_barrier_shared);
10204 if (unlikely(ctx->program->chip_class != GFX6 && ctx->program->workgroup_size > ctx->program->wave_size))
10205 bld.sopp(aco_opcode::s_barrier);
10206
10207 Temp tcs_rel_ids = get_arg(ctx, ctx->args->ac.tcs_rel_ids);
10208 Temp invocation_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), tcs_rel_ids, Operand(8u), Operand(5u));
10209
10210 Temp invocation_id_is_zero = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), invocation_id);
10211 if_context ic_invocation_id_is_zero;
10212 begin_divergent_if_then(ctx, &ic_invocation_id_is_zero, invocation_id_is_zero);
10213 bld.reset(ctx->block);
10214
10215 Temp hs_ring_tess_factor = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_FACTOR * 16u));
10216
10217 std::pair<Temp, unsigned> lds_base = get_tcs_output_lds_offset(ctx);
10218 unsigned stride = inner_comps + outer_comps;
10219 unsigned lds_align = calculate_lds_alignment(ctx, lds_base.second);
10220 Temp tf_inner_vec;
10221 Temp tf_outer_vec;
10222 Temp out[6];
10223 assert(stride <= (sizeof(out) / sizeof(Temp)));
10224
10225 if (ctx->args->options->key.tcs.primitive_mode == GL_ISOLINES) {
10226 // LINES reversal
10227 tf_outer_vec = load_lds(ctx, 4, bld.tmp(v2), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_out_loc, lds_align);
10228 out[1] = emit_extract_vector(ctx, tf_outer_vec, 0, v1);
10229 out[0] = emit_extract_vector(ctx, tf_outer_vec, 1, v1);
10230 } else {
10231 tf_outer_vec = load_lds(ctx, 4, bld.tmp(RegClass(RegType::vgpr, outer_comps)), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_out_loc, lds_align);
10232 tf_inner_vec = load_lds(ctx, 4, bld.tmp(RegClass(RegType::vgpr, inner_comps)), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_in_loc, lds_align);
10233
10234 for (unsigned i = 0; i < outer_comps; ++i)
10235 out[i] = emit_extract_vector(ctx, tf_outer_vec, i, v1);
10236 for (unsigned i = 0; i < inner_comps; ++i)
10237 out[outer_comps + i] = emit_extract_vector(ctx, tf_inner_vec, i, v1);
10238 }
10239
10240 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
10241 Temp tf_base = get_arg(ctx, ctx->args->tess_factor_offset);
10242 Temp byte_offset = bld.v_mul24_imm(bld.def(v1), rel_patch_id, stride * 4u);
10243 unsigned tf_const_offset = 0;
10244
10245 if (ctx->program->chip_class <= GFX8) {
10246 Temp rel_patch_id_is_zero = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), rel_patch_id);
10247 if_context ic_rel_patch_id_is_zero;
10248 begin_divergent_if_then(ctx, &ic_rel_patch_id_is_zero, rel_patch_id_is_zero);
10249 bld.reset(ctx->block);
10250
10251 /* Store the dynamic HS control word. */
10252 Temp control_word = bld.copy(bld.def(v1), Operand(0x80000000u));
10253 bld.mubuf(aco_opcode::buffer_store_dword,
10254 /* SRSRC */ hs_ring_tess_factor, /* VADDR */ Operand(v1), /* SOFFSET */ tf_base, /* VDATA */ control_word,
10255 /* immediate OFFSET */ 0, /* OFFEN */ false, /* idxen*/ false, /* addr64 */ false,
10256 /* disable_wqm */ false, /* glc */ true);
10257 tf_const_offset += 4;
10258
10259 begin_divergent_if_else(ctx, &ic_rel_patch_id_is_zero);
10260 end_divergent_if(ctx, &ic_rel_patch_id_is_zero);
10261 bld.reset(ctx->block);
10262 }
10263
10264 assert(stride == 2 || stride == 4 || stride == 6);
10265 Temp tf_vec = create_vec_from_array(ctx, out, stride, RegType::vgpr, 4u);
10266 store_vmem_mubuf(ctx, tf_vec, hs_ring_tess_factor, byte_offset, tf_base, tf_const_offset, 4, (1 << stride) - 1, true, false);
10267
10268 /* Store to offchip for TES to read - only if TES reads them */
10269 if (ctx->args->options->key.tcs.tes_reads_tess_factors) {
10270 Temp hs_ring_tess_offchip = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
10271 Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
10272
10273 std::pair<Temp, unsigned> vmem_offs_outer = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, ctx->tcs_tess_lvl_out_loc);
10274 store_vmem_mubuf(ctx, tf_outer_vec, hs_ring_tess_offchip, vmem_offs_outer.first, oc_lds, vmem_offs_outer.second, 4, (1 << outer_comps) - 1, true, false);
10275
10276 if (likely(inner_comps)) {
10277 std::pair<Temp, unsigned> vmem_offs_inner = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, ctx->tcs_tess_lvl_in_loc);
10278 store_vmem_mubuf(ctx, tf_inner_vec, hs_ring_tess_offchip, vmem_offs_inner.first, oc_lds, vmem_offs_inner.second, 4, (1 << inner_comps) - 1, true, false);
10279 }
10280 }
10281
10282 begin_divergent_if_else(ctx, &ic_invocation_id_is_zero);
10283 end_divergent_if(ctx, &ic_invocation_id_is_zero);
10284 }
10285
10286 static void emit_stream_output(isel_context *ctx,
10287 Temp const *so_buffers,
10288 Temp const *so_write_offset,
10289 const struct radv_stream_output *output)
10290 {
10291 unsigned num_comps = util_bitcount(output->component_mask);
10292 unsigned writemask = (1 << num_comps) - 1;
10293 unsigned loc = output->location;
10294 unsigned buf = output->buffer;
10295
10296 assert(num_comps && num_comps <= 4);
10297 if (!num_comps || num_comps > 4)
10298 return;
10299
10300 unsigned start = ffs(output->component_mask) - 1;
10301
10302 Temp out[4];
10303 bool all_undef = true;
10304 assert(ctx->stage & hw_vs);
10305 for (unsigned i = 0; i < num_comps; i++) {
10306 out[i] = ctx->outputs.temps[loc * 4 + start + i];
10307 all_undef = all_undef && !out[i].id();
10308 }
10309 if (all_undef)
10310 return;
10311
10312 while (writemask) {
10313 int start, count;
10314 u_bit_scan_consecutive_range(&writemask, &start, &count);
10315 if (count == 3 && ctx->options->chip_class == GFX6) {
10316 /* GFX6 doesn't support storing vec3, split it. */
10317 writemask |= 1u << (start + 2);
10318 count = 2;
10319 }
10320
10321 unsigned offset = output->offset + start * 4;
10322
10323 Temp write_data = {ctx->program->allocateId(), RegClass(RegType::vgpr, count)};
10324 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
10325 for (int i = 0; i < count; ++i)
10326 vec->operands[i] = (ctx->outputs.mask[loc] & 1 << (start + i)) ? Operand(out[start + i]) : Operand(0u);
10327 vec->definitions[0] = Definition(write_data);
10328 ctx->block->instructions.emplace_back(std::move(vec));
10329
10330 aco_opcode opcode;
10331 switch (count) {
10332 case 1:
10333 opcode = aco_opcode::buffer_store_dword;
10334 break;
10335 case 2:
10336 opcode = aco_opcode::buffer_store_dwordx2;
10337 break;
10338 case 3:
10339 opcode = aco_opcode::buffer_store_dwordx3;
10340 break;
10341 case 4:
10342 opcode = aco_opcode::buffer_store_dwordx4;
10343 break;
10344 default:
10345 unreachable("Unsupported dword count.");
10346 }
10347
10348 aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
10349 store->operands[0] = Operand(so_buffers[buf]);
10350 store->operands[1] = Operand(so_write_offset[buf]);
10351 store->operands[2] = Operand((uint32_t) 0);
10352 store->operands[3] = Operand(write_data);
10353 if (offset > 4095) {
10354 /* Don't think this can happen in RADV, but maybe GL? It's easy to do this anyway. */
10355 Builder bld(ctx->program, ctx->block);
10356 store->operands[0] = bld.vadd32(bld.def(v1), Operand(offset), Operand(so_write_offset[buf]));
10357 } else {
10358 store->offset = offset;
10359 }
10360 store->offen = true;
10361 store->glc = true;
10362 store->dlc = false;
10363 store->slc = true;
10364 store->can_reorder = true;
10365 ctx->block->instructions.emplace_back(std::move(store));
10366 }
10367 }
10368
10369 static void emit_streamout(isel_context *ctx, unsigned stream)
10370 {
10371 Builder bld(ctx->program, ctx->block);
10372
10373 Temp so_buffers[4];
10374 Temp buf_ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->streamout_buffers));
10375 for (unsigned i = 0; i < 4; i++) {
10376 unsigned stride = ctx->program->info->so.strides[i];
10377 if (!stride)
10378 continue;
10379
10380 Operand off = bld.copy(bld.def(s1), Operand(i * 16u));
10381 so_buffers[i] = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), buf_ptr, off);
10382 }
10383
10384 Temp so_vtx_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10385 get_arg(ctx, ctx->args->streamout_config), Operand(0x70010u));
10386
10387 Temp tid = emit_mbcnt(ctx, bld.def(v1));
10388
10389 Temp can_emit = bld.vopc(aco_opcode::v_cmp_gt_i32, bld.def(bld.lm), so_vtx_count, tid);
10390
10391 if_context ic;
10392 begin_divergent_if_then(ctx, &ic, can_emit);
10393
10394 bld.reset(ctx->block);
10395
10396 Temp so_write_index = bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->streamout_write_idx), tid);
10397
10398 Temp so_write_offset[4];
10399
10400 for (unsigned i = 0; i < 4; i++) {
10401 unsigned stride = ctx->program->info->so.strides[i];
10402 if (!stride)
10403 continue;
10404
10405 if (stride == 1) {
10406 Temp offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
10407 get_arg(ctx, ctx->args->streamout_write_idx),
10408 get_arg(ctx, ctx->args->streamout_offset[i]));
10409 Temp new_offset = bld.vadd32(bld.def(v1), offset, tid);
10410
10411 so_write_offset[i] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), new_offset);
10412 } else {
10413 Temp offset = bld.v_mul_imm(bld.def(v1), so_write_index, stride * 4u);
10414 Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(4u),
10415 get_arg(ctx, ctx->args->streamout_offset[i]));
10416 so_write_offset[i] = bld.vadd32(bld.def(v1), offset, offset2);
10417 }
10418 }
10419
10420 for (unsigned i = 0; i < ctx->program->info->so.num_outputs; i++) {
10421 struct radv_stream_output *output =
10422 &ctx->program->info->so.outputs[i];
10423 if (stream != output->stream)
10424 continue;
10425
10426 emit_stream_output(ctx, so_buffers, so_write_offset, output);
10427 }
10428
10429 begin_divergent_if_else(ctx, &ic);
10430 end_divergent_if(ctx, &ic);
10431 }
10432
10433 } /* end namespace */
10434
10435 void fix_ls_vgpr_init_bug(isel_context *ctx, Pseudo_instruction *startpgm)
10436 {
10437 assert(ctx->shader->info.stage == MESA_SHADER_VERTEX);
10438 Builder bld(ctx->program, ctx->block);
10439 constexpr unsigned hs_idx = 1u;
10440 Builder::Result hs_thread_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10441 get_arg(ctx, ctx->args->merged_wave_info),
10442 Operand((8u << 16) | (hs_idx * 8u)));
10443 Temp ls_has_nonzero_hs_threads = bool_to_vector_condition(ctx, hs_thread_count.def(1).getTemp());
10444
10445 /* If there are no HS threads, SPI mistakenly loads the LS VGPRs starting at VGPR 0. */
10446
10447 Temp instance_id = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10448 get_arg(ctx, ctx->args->rel_auto_id),
10449 get_arg(ctx, ctx->args->ac.instance_id),
10450 ls_has_nonzero_hs_threads);
10451 Temp rel_auto_id = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10452 get_arg(ctx, ctx->args->ac.tcs_rel_ids),
10453 get_arg(ctx, ctx->args->rel_auto_id),
10454 ls_has_nonzero_hs_threads);
10455 Temp vertex_id = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10456 get_arg(ctx, ctx->args->ac.tcs_patch_id),
10457 get_arg(ctx, ctx->args->ac.vertex_id),
10458 ls_has_nonzero_hs_threads);
10459
10460 ctx->arg_temps[ctx->args->ac.instance_id.arg_index] = instance_id;
10461 ctx->arg_temps[ctx->args->rel_auto_id.arg_index] = rel_auto_id;
10462 ctx->arg_temps[ctx->args->ac.vertex_id.arg_index] = vertex_id;
10463 }
10464
10465 void split_arguments(isel_context *ctx, Pseudo_instruction *startpgm)
10466 {
10467 /* Split all arguments except for the first (ring_offsets) and the last
10468 * (exec) so that the dead channels don't stay live throughout the program.
10469 */
10470 for (int i = 1; i < startpgm->definitions.size() - 1; i++) {
10471 if (startpgm->definitions[i].regClass().size() > 1) {
10472 emit_split_vector(ctx, startpgm->definitions[i].getTemp(),
10473 startpgm->definitions[i].regClass().size());
10474 }
10475 }
10476 }
10477
10478 void handle_bc_optimize(isel_context *ctx)
10479 {
10480 /* needed when SPI_PS_IN_CONTROL.BC_OPTIMIZE_DISABLE is set to 0 */
10481 Builder bld(ctx->program, ctx->block);
10482 uint32_t spi_ps_input_ena = ctx->program->config->spi_ps_input_ena;
10483 bool uses_center = G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena);
10484 bool uses_centroid = G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena);
10485 ctx->persp_centroid = get_arg(ctx, ctx->args->ac.persp_centroid);
10486 ctx->linear_centroid = get_arg(ctx, ctx->args->ac.linear_centroid);
10487 if (uses_center && uses_centroid) {
10488 Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)),
10489 get_arg(ctx, ctx->args->ac.prim_mask), Operand(0u));
10490
10491 if (G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena)) {
10492 Temp new_coord[2];
10493 for (unsigned i = 0; i < 2; i++) {
10494 Temp persp_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_centroid), i, v1);
10495 Temp persp_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_center), i, v1);
10496 new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10497 persp_centroid, persp_center, sel);
10498 }
10499 ctx->persp_centroid = bld.tmp(v2);
10500 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->persp_centroid),
10501 Operand(new_coord[0]), Operand(new_coord[1]));
10502 emit_split_vector(ctx, ctx->persp_centroid, 2);
10503 }
10504
10505 if (G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena)) {
10506 Temp new_coord[2];
10507 for (unsigned i = 0; i < 2; i++) {
10508 Temp linear_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_centroid), i, v1);
10509 Temp linear_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_center), i, v1);
10510 new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10511 linear_centroid, linear_center, sel);
10512 }
10513 ctx->linear_centroid = bld.tmp(v2);
10514 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->linear_centroid),
10515 Operand(new_coord[0]), Operand(new_coord[1]));
10516 emit_split_vector(ctx, ctx->linear_centroid, 2);
10517 }
10518 }
10519 }
10520
10521 void setup_fp_mode(isel_context *ctx, nir_shader *shader)
10522 {
10523 Program *program = ctx->program;
10524
10525 unsigned float_controls = shader->info.float_controls_execution_mode;
10526
10527 program->next_fp_mode.preserve_signed_zero_inf_nan32 =
10528 float_controls & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32;
10529 program->next_fp_mode.preserve_signed_zero_inf_nan16_64 =
10530 float_controls & (FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 |
10531 FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
10532
10533 program->next_fp_mode.must_flush_denorms32 =
10534 float_controls & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32;
10535 program->next_fp_mode.must_flush_denorms16_64 =
10536 float_controls & (FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 |
10537 FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
10538
10539 program->next_fp_mode.care_about_round32 =
10540 float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32);
10541
10542 program->next_fp_mode.care_about_round16_64 =
10543 float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 |
10544 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
10545
10546 /* default to preserving fp16 and fp64 denorms, since it's free for fp64 and
10547 * the precision seems needed for Wolfenstein: Youngblood to render correctly */
10548 if (program->next_fp_mode.must_flush_denorms16_64)
10549 program->next_fp_mode.denorm16_64 = 0;
10550 else
10551 program->next_fp_mode.denorm16_64 = fp_denorm_keep;
10552
10553 /* preserving fp32 denorms is expensive, so only do it if asked */
10554 if (float_controls & FLOAT_CONTROLS_DENORM_PRESERVE_FP32)
10555 program->next_fp_mode.denorm32 = fp_denorm_keep;
10556 else
10557 program->next_fp_mode.denorm32 = 0;
10558
10559 if (float_controls & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32)
10560 program->next_fp_mode.round32 = fp_round_tz;
10561 else
10562 program->next_fp_mode.round32 = fp_round_ne;
10563
10564 if (float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64))
10565 program->next_fp_mode.round16_64 = fp_round_tz;
10566 else
10567 program->next_fp_mode.round16_64 = fp_round_ne;
10568
10569 ctx->block->fp_mode = program->next_fp_mode;
10570 }
10571
10572 void cleanup_cfg(Program *program)
10573 {
10574 /* create linear_succs/logical_succs */
10575 for (Block& BB : program->blocks) {
10576 for (unsigned idx : BB.linear_preds)
10577 program->blocks[idx].linear_succs.emplace_back(BB.index);
10578 for (unsigned idx : BB.logical_preds)
10579 program->blocks[idx].logical_succs.emplace_back(BB.index);
10580 }
10581 }
10582
10583 Temp merged_wave_info_to_mask(isel_context *ctx, unsigned i)
10584 {
10585 Builder bld(ctx->program, ctx->block);
10586
10587 /* The s_bfm only cares about s0.u[5:0] so we don't need either s_bfe nor s_and here */
10588 Temp count = i == 0
10589 ? get_arg(ctx, ctx->args->merged_wave_info)
10590 : bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc),
10591 get_arg(ctx, ctx->args->merged_wave_info), Operand(i * 8u));
10592
10593 Temp mask = bld.sop2(aco_opcode::s_bfm_b64, bld.def(s2), count, Operand(0u));
10594 Temp cond;
10595
10596 if (ctx->program->wave_size == 64) {
10597 /* Special case for 64 active invocations, because 64 doesn't work with s_bfm */
10598 Temp active_64 = bld.sopc(aco_opcode::s_bitcmp1_b32, bld.def(s1, scc), count, Operand(6u /* log2(64) */));
10599 cond = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(-1u), mask, bld.scc(active_64));
10600 } else {
10601 /* We use s_bfm_b64 (not _b32) which works with 32, but we need to extract the lower half of the register */
10602 cond = emit_extract_vector(ctx, mask, 0, bld.lm);
10603 }
10604
10605 return cond;
10606 }
10607
10608 bool ngg_early_prim_export(isel_context *ctx)
10609 {
10610 /* TODO: Check edge flags, and if they are written, return false. (Needed for OpenGL, not for Vulkan.) */
10611 return true;
10612 }
10613
10614 void ngg_emit_sendmsg_gs_alloc_req(isel_context *ctx)
10615 {
10616 Builder bld(ctx->program, ctx->block);
10617
10618 /* It is recommended to do the GS_ALLOC_REQ as soon and as quickly as possible, so we set the maximum priority (3). */
10619 bld.sopp(aco_opcode::s_setprio, -1u, 0x3u);
10620
10621 /* Get the id of the current wave within the threadgroup (workgroup) */
10622 Builder::Result wave_id_in_tg = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10623 get_arg(ctx, ctx->args->merged_wave_info), Operand(24u | (4u << 16)));
10624
10625 /* Execute the following code only on the first wave (wave id 0),
10626 * use the SCC def to tell if the wave id is zero or not.
10627 */
10628 Temp cond = wave_id_in_tg.def(1).getTemp();
10629 if_context ic;
10630 begin_uniform_if_then(ctx, &ic, cond);
10631 begin_uniform_if_else(ctx, &ic);
10632 bld.reset(ctx->block);
10633
10634 /* Number of vertices output by VS/TES */
10635 Temp vtx_cnt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10636 get_arg(ctx, ctx->args->gs_tg_info), Operand(12u | (9u << 16u)));
10637 /* Number of primitives output by VS/TES */
10638 Temp prm_cnt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10639 get_arg(ctx, ctx->args->gs_tg_info), Operand(22u | (9u << 16u)));
10640
10641 /* Put the number of vertices and primitives into m0 for the GS_ALLOC_REQ */
10642 Temp tmp = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), prm_cnt, Operand(12u));
10643 tmp = bld.sop2(aco_opcode::s_or_b32, bld.m0(bld.def(s1)), bld.def(s1, scc), tmp, vtx_cnt);
10644
10645 /* Request the SPI to allocate space for the primitives and vertices that will be exported by the threadgroup. */
10646 bld.sopp(aco_opcode::s_sendmsg, bld.m0(tmp), -1, sendmsg_gs_alloc_req);
10647
10648 end_uniform_if(ctx, &ic);
10649
10650 /* After the GS_ALLOC_REQ is done, reset priority to default (0). */
10651 bld.reset(ctx->block);
10652 bld.sopp(aco_opcode::s_setprio, -1u, 0x0u);
10653 }
10654
10655 Temp ngg_get_prim_exp_arg(isel_context *ctx, unsigned num_vertices, const Temp vtxindex[])
10656 {
10657 Builder bld(ctx->program, ctx->block);
10658
10659 if (ctx->args->options->key.vs_common_out.as_ngg_passthrough) {
10660 return get_arg(ctx, ctx->args->gs_vtx_offset[0]);
10661 }
10662
10663 Temp gs_invocation_id = get_arg(ctx, ctx->args->ac.gs_invocation_id);
10664 Temp tmp;
10665
10666 for (unsigned i = 0; i < num_vertices; ++i) {
10667 assert(vtxindex[i].id());
10668
10669 if (i)
10670 tmp = bld.vop3(aco_opcode::v_lshl_add_u32, bld.def(v1), vtxindex[i], Operand(10u * i), tmp);
10671 else
10672 tmp = vtxindex[i];
10673
10674 /* The initial edge flag is always false in tess eval shaders. */
10675 if (ctx->stage == ngg_vertex_gs) {
10676 Temp edgeflag = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), gs_invocation_id, Operand(8 + i), Operand(1u));
10677 tmp = bld.vop3(aco_opcode::v_lshl_add_u32, bld.def(v1), edgeflag, Operand(10u * i + 9u), tmp);
10678 }
10679 }
10680
10681 /* TODO: Set isnull field in case of merged NGG VS+GS. */
10682
10683 return tmp;
10684 }
10685
10686 void ngg_emit_prim_export(isel_context *ctx, unsigned num_vertices_per_primitive, const Temp vtxindex[])
10687 {
10688 Builder bld(ctx->program, ctx->block);
10689 Temp prim_exp_arg = ngg_get_prim_exp_arg(ctx, num_vertices_per_primitive, vtxindex);
10690
10691 bld.exp(aco_opcode::exp, prim_exp_arg, Operand(v1), Operand(v1), Operand(v1),
10692 1 /* enabled mask */, V_008DFC_SQ_EXP_PRIM /* dest */,
10693 false /* compressed */, true/* done */, false /* valid mask */);
10694 }
10695
10696 void ngg_emit_nogs_gsthreads(isel_context *ctx)
10697 {
10698 /* Emit the things that NGG GS threads need to do, for shaders that don't have SW GS.
10699 * These must always come before VS exports.
10700 *
10701 * It is recommended to do these as early as possible. They can be at the beginning when
10702 * there is no SW GS and the shader doesn't write edge flags.
10703 */
10704
10705 if_context ic;
10706 Temp is_gs_thread = merged_wave_info_to_mask(ctx, 1);
10707 begin_divergent_if_then(ctx, &ic, is_gs_thread);
10708
10709 Builder bld(ctx->program, ctx->block);
10710 constexpr unsigned max_vertices_per_primitive = 3;
10711 unsigned num_vertices_per_primitive = max_vertices_per_primitive;
10712
10713 if (ctx->stage == ngg_vertex_gs) {
10714 /* TODO: optimize for points & lines */
10715 } else if (ctx->stage == ngg_tess_eval_gs) {
10716 if (ctx->shader->info.tess.point_mode)
10717 num_vertices_per_primitive = 1;
10718 else if (ctx->shader->info.tess.primitive_mode == GL_ISOLINES)
10719 num_vertices_per_primitive = 2;
10720 } else {
10721 unreachable("Unsupported NGG shader stage");
10722 }
10723
10724 Temp vtxindex[max_vertices_per_primitive];
10725 vtxindex[0] = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu),
10726 get_arg(ctx, ctx->args->gs_vtx_offset[0]));
10727 vtxindex[1] = num_vertices_per_primitive < 2 ? Temp(0, v1) :
10728 bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
10729 get_arg(ctx, ctx->args->gs_vtx_offset[0]), Operand(16u), Operand(16u));
10730 vtxindex[2] = num_vertices_per_primitive < 3 ? Temp(0, v1) :
10731 bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu),
10732 get_arg(ctx, ctx->args->gs_vtx_offset[2]));
10733
10734 /* Export primitive data to the index buffer. */
10735 ngg_emit_prim_export(ctx, num_vertices_per_primitive, vtxindex);
10736
10737 /* Export primitive ID. */
10738 if (ctx->stage == ngg_vertex_gs && ctx->args->options->key.vs_common_out.export_prim_id) {
10739 /* Copy Primitive IDs from GS threads to the LDS address corresponding to the ES thread of the provoking vertex. */
10740 Temp prim_id = get_arg(ctx, ctx->args->ac.gs_prim_id);
10741 Temp provoking_vtx_index = vtxindex[0];
10742 Temp addr = bld.v_mul_imm(bld.def(v1), provoking_vtx_index, 4u);
10743
10744 store_lds(ctx, 4, prim_id, 0x1u, addr, 0u, 4u);
10745 }
10746
10747 begin_divergent_if_else(ctx, &ic);
10748 end_divergent_if(ctx, &ic);
10749 }
10750
10751 void ngg_emit_nogs_output(isel_context *ctx)
10752 {
10753 /* Emits NGG GS output, for stages that don't have SW GS. */
10754
10755 if_context ic;
10756 Builder bld(ctx->program, ctx->block);
10757 bool late_prim_export = !ngg_early_prim_export(ctx);
10758
10759 /* NGG streamout is currently disabled by default. */
10760 assert(!ctx->args->shader_info->so.num_outputs);
10761
10762 if (late_prim_export) {
10763 /* VS exports are output to registers in a predecessor block. Emit phis to get them into this block. */
10764 create_export_phis(ctx);
10765 /* Do what we need to do in the GS threads. */
10766 ngg_emit_nogs_gsthreads(ctx);
10767
10768 /* What comes next should be executed on ES threads. */
10769 Temp is_es_thread = merged_wave_info_to_mask(ctx, 0);
10770 begin_divergent_if_then(ctx, &ic, is_es_thread);
10771 bld.reset(ctx->block);
10772 }
10773
10774 /* Export VS outputs */
10775 ctx->block->kind |= block_kind_export_end;
10776 create_vs_exports(ctx);
10777
10778 /* Export primitive ID */
10779 if (ctx->args->options->key.vs_common_out.export_prim_id) {
10780 Temp prim_id;
10781
10782 if (ctx->stage == ngg_vertex_gs) {
10783 /* Wait for GS threads to store primitive ID in LDS. */
10784 bld.barrier(aco_opcode::p_memory_barrier_shared);
10785 bld.sopp(aco_opcode::s_barrier);
10786
10787 /* Calculate LDS address where the GS threads stored the primitive ID. */
10788 Temp wave_id_in_tg = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10789 get_arg(ctx, ctx->args->merged_wave_info), Operand(24u | (4u << 16)));
10790 Temp thread_id_in_wave = emit_mbcnt(ctx, bld.def(v1));
10791 Temp wave_id_mul = bld.v_mul24_imm(bld.def(v1), as_vgpr(ctx, wave_id_in_tg), ctx->program->wave_size);
10792 Temp thread_id_in_tg = bld.vadd32(bld.def(v1), Operand(wave_id_mul), Operand(thread_id_in_wave));
10793 Temp addr = bld.v_mul24_imm(bld.def(v1), thread_id_in_tg, 4u);
10794
10795 /* Load primitive ID from LDS. */
10796 prim_id = load_lds(ctx, 4, bld.tmp(v1), addr, 0u, 4u);
10797 } else if (ctx->stage == ngg_tess_eval_gs) {
10798 /* TES: Just use the patch ID as the primitive ID. */
10799 prim_id = get_arg(ctx, ctx->args->ac.tes_patch_id);
10800 } else {
10801 unreachable("unsupported NGG shader stage.");
10802 }
10803
10804 ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1;
10805 ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = prim_id;
10806
10807 export_vs_varying(ctx, VARYING_SLOT_PRIMITIVE_ID, false, nullptr);
10808 }
10809
10810 if (late_prim_export) {
10811 begin_divergent_if_else(ctx, &ic);
10812 end_divergent_if(ctx, &ic);
10813 bld.reset(ctx->block);
10814 }
10815 }
10816
10817 void select_program(Program *program,
10818 unsigned shader_count,
10819 struct nir_shader *const *shaders,
10820 ac_shader_config* config,
10821 struct radv_shader_args *args)
10822 {
10823 isel_context ctx = setup_isel_context(program, shader_count, shaders, config, args, false);
10824 if_context ic_merged_wave_info;
10825 bool ngg_no_gs = ctx.stage == ngg_vertex_gs || ctx.stage == ngg_tess_eval_gs;
10826
10827 for (unsigned i = 0; i < shader_count; i++) {
10828 nir_shader *nir = shaders[i];
10829 init_context(&ctx, nir);
10830
10831 setup_fp_mode(&ctx, nir);
10832
10833 if (!i) {
10834 /* needs to be after init_context() for FS */
10835 Pseudo_instruction *startpgm = add_startpgm(&ctx);
10836 append_logical_start(ctx.block);
10837
10838 if (unlikely(args->options->has_ls_vgpr_init_bug && ctx.stage == vertex_tess_control_hs))
10839 fix_ls_vgpr_init_bug(&ctx, startpgm);
10840
10841 split_arguments(&ctx, startpgm);
10842 }
10843
10844 if (ngg_no_gs) {
10845 ngg_emit_sendmsg_gs_alloc_req(&ctx);
10846
10847 if (ngg_early_prim_export(&ctx))
10848 ngg_emit_nogs_gsthreads(&ctx);
10849 }
10850
10851 /* In a merged VS+TCS HS, the VS implementation can be completely empty. */
10852 nir_function_impl *func = nir_shader_get_entrypoint(nir);
10853 bool empty_shader = nir_cf_list_is_empty_block(&func->body) &&
10854 ((nir->info.stage == MESA_SHADER_VERTEX &&
10855 (ctx.stage == vertex_tess_control_hs || ctx.stage == vertex_geometry_gs)) ||
10856 (nir->info.stage == MESA_SHADER_TESS_EVAL &&
10857 ctx.stage == tess_eval_geometry_gs));
10858
10859 bool check_merged_wave_info = ctx.tcs_in_out_eq ? i == 0 : ((shader_count >= 2 && !empty_shader) || ngg_no_gs);
10860 bool endif_merged_wave_info = ctx.tcs_in_out_eq ? i == 1 : check_merged_wave_info;
10861 if (check_merged_wave_info) {
10862 Temp cond = merged_wave_info_to_mask(&ctx, i);
10863 begin_divergent_if_then(&ctx, &ic_merged_wave_info, cond);
10864 }
10865
10866 if (i) {
10867 Builder bld(ctx.program, ctx.block);
10868
10869 bld.barrier(aco_opcode::p_memory_barrier_shared);
10870 bld.sopp(aco_opcode::s_barrier);
10871
10872 if (ctx.stage == vertex_geometry_gs || ctx.stage == tess_eval_geometry_gs) {
10873 ctx.gs_wave_id = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1, m0), bld.def(s1, scc), get_arg(&ctx, args->merged_wave_info), Operand((8u << 16) | 16u));
10874 }
10875 } else if (ctx.stage == geometry_gs)
10876 ctx.gs_wave_id = get_arg(&ctx, args->gs_wave_id);
10877
10878 if (ctx.stage == fragment_fs)
10879 handle_bc_optimize(&ctx);
10880
10881 visit_cf_list(&ctx, &func->body);
10882
10883 if (ctx.program->info->so.num_outputs && (ctx.stage & hw_vs))
10884 emit_streamout(&ctx, 0);
10885
10886 if (ctx.stage & hw_vs) {
10887 create_vs_exports(&ctx);
10888 ctx.block->kind |= block_kind_export_end;
10889 } else if (ngg_no_gs && ngg_early_prim_export(&ctx)) {
10890 ngg_emit_nogs_output(&ctx);
10891 } else if (nir->info.stage == MESA_SHADER_GEOMETRY) {
10892 Builder bld(ctx.program, ctx.block);
10893 bld.barrier(aco_opcode::p_memory_barrier_gs_data);
10894 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx.gs_wave_id), -1, sendmsg_gs_done(false, false, 0));
10895 } else if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
10896 write_tcs_tess_factors(&ctx);
10897 }
10898
10899 if (ctx.stage == fragment_fs) {
10900 create_fs_exports(&ctx);
10901 ctx.block->kind |= block_kind_export_end;
10902 }
10903
10904 if (endif_merged_wave_info) {
10905 begin_divergent_if_else(&ctx, &ic_merged_wave_info);
10906 end_divergent_if(&ctx, &ic_merged_wave_info);
10907 }
10908
10909 if (ngg_no_gs && !ngg_early_prim_export(&ctx))
10910 ngg_emit_nogs_output(&ctx);
10911
10912 if (i == 0 && ctx.stage == vertex_tess_control_hs && ctx.tcs_in_out_eq) {
10913 /* Outputs of the previous stage are inputs to the next stage */
10914 ctx.inputs = ctx.outputs;
10915 ctx.outputs = shader_io_state();
10916 }
10917 }
10918
10919 program->config->float_mode = program->blocks[0].fp_mode.val;
10920
10921 append_logical_end(ctx.block);
10922 ctx.block->kind |= block_kind_uniform;
10923 Builder bld(ctx.program, ctx.block);
10924 if (ctx.program->wb_smem_l1_on_end)
10925 bld.smem(aco_opcode::s_dcache_wb, false);
10926 bld.sopp(aco_opcode::s_endpgm);
10927
10928 cleanup_cfg(program);
10929 }
10930
10931 void select_gs_copy_shader(Program *program, struct nir_shader *gs_shader,
10932 ac_shader_config* config,
10933 struct radv_shader_args *args)
10934 {
10935 isel_context ctx = setup_isel_context(program, 1, &gs_shader, config, args, true);
10936
10937 program->next_fp_mode.preserve_signed_zero_inf_nan32 = false;
10938 program->next_fp_mode.preserve_signed_zero_inf_nan16_64 = false;
10939 program->next_fp_mode.must_flush_denorms32 = false;
10940 program->next_fp_mode.must_flush_denorms16_64 = false;
10941 program->next_fp_mode.care_about_round32 = false;
10942 program->next_fp_mode.care_about_round16_64 = false;
10943 program->next_fp_mode.denorm16_64 = fp_denorm_keep;
10944 program->next_fp_mode.denorm32 = 0;
10945 program->next_fp_mode.round32 = fp_round_ne;
10946 program->next_fp_mode.round16_64 = fp_round_ne;
10947 ctx.block->fp_mode = program->next_fp_mode;
10948
10949 add_startpgm(&ctx);
10950 append_logical_start(ctx.block);
10951
10952 Builder bld(ctx.program, ctx.block);
10953
10954 Temp gsvs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), program->private_segment_buffer, Operand(RING_GSVS_VS * 16u));
10955
10956 Operand stream_id(0u);
10957 if (args->shader_info->so.num_outputs)
10958 stream_id = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10959 get_arg(&ctx, ctx.args->streamout_config), Operand(0x20018u));
10960
10961 Temp vtx_offset = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), get_arg(&ctx, ctx.args->ac.vertex_id));
10962
10963 std::stack<Block> endif_blocks;
10964
10965 for (unsigned stream = 0; stream < 4; stream++) {
10966 if (stream_id.isConstant() && stream != stream_id.constantValue())
10967 continue;
10968
10969 unsigned num_components = args->shader_info->gs.num_stream_output_components[stream];
10970 if (stream > 0 && (!num_components || !args->shader_info->so.num_outputs))
10971 continue;
10972
10973 memset(ctx.outputs.mask, 0, sizeof(ctx.outputs.mask));
10974
10975 unsigned BB_if_idx = ctx.block->index;
10976 Block BB_endif = Block();
10977 if (!stream_id.isConstant()) {
10978 /* begin IF */
10979 Temp cond = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), stream_id, Operand(stream));
10980 append_logical_end(ctx.block);
10981 ctx.block->kind |= block_kind_uniform;
10982 bld.branch(aco_opcode::p_cbranch_z, cond);
10983
10984 BB_endif.kind |= ctx.block->kind & block_kind_top_level;
10985
10986 ctx.block = ctx.program->create_and_insert_block();
10987 add_edge(BB_if_idx, ctx.block);
10988 bld.reset(ctx.block);
10989 append_logical_start(ctx.block);
10990 }
10991
10992 unsigned offset = 0;
10993 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
10994 if (args->shader_info->gs.output_streams[i] != stream)
10995 continue;
10996
10997 unsigned output_usage_mask = args->shader_info->gs.output_usage_mask[i];
10998 unsigned length = util_last_bit(output_usage_mask);
10999 for (unsigned j = 0; j < length; ++j) {
11000 if (!(output_usage_mask & (1 << j)))
11001 continue;
11002
11003 unsigned const_offset = offset * args->shader_info->gs.vertices_out * 16 * 4;
11004 Temp voffset = vtx_offset;
11005 if (const_offset >= 4096u) {
11006 voffset = bld.vadd32(bld.def(v1), Operand(const_offset / 4096u * 4096u), voffset);
11007 const_offset %= 4096u;
11008 }
11009
11010 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(aco_opcode::buffer_load_dword, Format::MUBUF, 3, 1)};
11011 mubuf->definitions[0] = bld.def(v1);
11012 mubuf->operands[0] = Operand(gsvs_ring);
11013 mubuf->operands[1] = Operand(voffset);
11014 mubuf->operands[2] = Operand(0u);
11015 mubuf->offen = true;
11016 mubuf->offset = const_offset;
11017 mubuf->glc = true;
11018 mubuf->slc = true;
11019 mubuf->dlc = args->options->chip_class >= GFX10;
11020 mubuf->barrier = barrier_none;
11021 mubuf->can_reorder = true;
11022
11023 ctx.outputs.mask[i] |= 1 << j;
11024 ctx.outputs.temps[i * 4u + j] = mubuf->definitions[0].getTemp();
11025
11026 bld.insert(std::move(mubuf));
11027
11028 offset++;
11029 }
11030 }
11031
11032 if (args->shader_info->so.num_outputs) {
11033 emit_streamout(&ctx, stream);
11034 bld.reset(ctx.block);
11035 }
11036
11037 if (stream == 0) {
11038 create_vs_exports(&ctx);
11039 ctx.block->kind |= block_kind_export_end;
11040 }
11041
11042 if (!stream_id.isConstant()) {
11043 append_logical_end(ctx.block);
11044
11045 /* branch from then block to endif block */
11046 bld.branch(aco_opcode::p_branch);
11047 add_edge(ctx.block->index, &BB_endif);
11048 ctx.block->kind |= block_kind_uniform;
11049
11050 /* emit else block */
11051 ctx.block = ctx.program->create_and_insert_block();
11052 add_edge(BB_if_idx, ctx.block);
11053 bld.reset(ctx.block);
11054 append_logical_start(ctx.block);
11055
11056 endif_blocks.push(std::move(BB_endif));
11057 }
11058 }
11059
11060 while (!endif_blocks.empty()) {
11061 Block BB_endif = std::move(endif_blocks.top());
11062 endif_blocks.pop();
11063
11064 Block *BB_else = ctx.block;
11065
11066 append_logical_end(BB_else);
11067 /* branch from else block to endif block */
11068 bld.branch(aco_opcode::p_branch);
11069 add_edge(BB_else->index, &BB_endif);
11070 BB_else->kind |= block_kind_uniform;
11071
11072 /** emit endif merge block */
11073 ctx.block = program->insert_block(std::move(BB_endif));
11074 bld.reset(ctx.block);
11075 append_logical_start(ctx.block);
11076 }
11077
11078 program->config->float_mode = program->blocks[0].fp_mode.val;
11079
11080 append_logical_end(ctx.block);
11081 ctx.block->kind |= block_kind_uniform;
11082 bld.sopp(aco_opcode::s_endpgm);
11083
11084 cleanup_cfg(program);
11085 }
11086 }