aco: be more careful combining additions that could wrap into loads/stores
[mesa.git] / src / amd / compiler / aco_instruction_selection.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include <algorithm>
27 #include <array>
28 #include <stack>
29 #include <map>
30
31 #include "ac_shader_util.h"
32 #include "aco_ir.h"
33 #include "aco_builder.h"
34 #include "aco_interface.h"
35 #include "aco_instruction_selection_setup.cpp"
36 #include "util/fast_idiv_by_const.h"
37
38 namespace aco {
39 namespace {
40
41 class loop_info_RAII {
42 isel_context* ctx;
43 unsigned header_idx_old;
44 Block* exit_old;
45 bool divergent_cont_old;
46 bool divergent_branch_old;
47 bool divergent_if_old;
48
49 public:
50 loop_info_RAII(isel_context* ctx, unsigned loop_header_idx, Block* loop_exit)
51 : ctx(ctx),
52 header_idx_old(ctx->cf_info.parent_loop.header_idx), exit_old(ctx->cf_info.parent_loop.exit),
53 divergent_cont_old(ctx->cf_info.parent_loop.has_divergent_continue),
54 divergent_branch_old(ctx->cf_info.parent_loop.has_divergent_branch),
55 divergent_if_old(ctx->cf_info.parent_if.is_divergent)
56 {
57 ctx->cf_info.parent_loop.header_idx = loop_header_idx;
58 ctx->cf_info.parent_loop.exit = loop_exit;
59 ctx->cf_info.parent_loop.has_divergent_continue = false;
60 ctx->cf_info.parent_loop.has_divergent_branch = false;
61 ctx->cf_info.parent_if.is_divergent = false;
62 ctx->cf_info.loop_nest_depth = ctx->cf_info.loop_nest_depth + 1;
63 }
64
65 ~loop_info_RAII()
66 {
67 ctx->cf_info.parent_loop.header_idx = header_idx_old;
68 ctx->cf_info.parent_loop.exit = exit_old;
69 ctx->cf_info.parent_loop.has_divergent_continue = divergent_cont_old;
70 ctx->cf_info.parent_loop.has_divergent_branch = divergent_branch_old;
71 ctx->cf_info.parent_if.is_divergent = divergent_if_old;
72 ctx->cf_info.loop_nest_depth = ctx->cf_info.loop_nest_depth - 1;
73 if (!ctx->cf_info.loop_nest_depth && !ctx->cf_info.parent_if.is_divergent)
74 ctx->cf_info.exec_potentially_empty_discard = false;
75 }
76 };
77
78 struct if_context {
79 Temp cond;
80
81 bool divergent_old;
82 bool exec_potentially_empty_discard_old;
83 bool exec_potentially_empty_break_old;
84 uint16_t exec_potentially_empty_break_depth_old;
85
86 unsigned BB_if_idx;
87 unsigned invert_idx;
88 bool uniform_has_then_branch;
89 bool then_branch_divergent;
90 Block BB_invert;
91 Block BB_endif;
92 };
93
94 static bool visit_cf_list(struct isel_context *ctx,
95 struct exec_list *list);
96
97 static void add_logical_edge(unsigned pred_idx, Block *succ)
98 {
99 succ->logical_preds.emplace_back(pred_idx);
100 }
101
102
103 static void add_linear_edge(unsigned pred_idx, Block *succ)
104 {
105 succ->linear_preds.emplace_back(pred_idx);
106 }
107
108 static void add_edge(unsigned pred_idx, Block *succ)
109 {
110 add_logical_edge(pred_idx, succ);
111 add_linear_edge(pred_idx, succ);
112 }
113
114 static void append_logical_start(Block *b)
115 {
116 Builder(NULL, b).pseudo(aco_opcode::p_logical_start);
117 }
118
119 static void append_logical_end(Block *b)
120 {
121 Builder(NULL, b).pseudo(aco_opcode::p_logical_end);
122 }
123
124 Temp get_ssa_temp(struct isel_context *ctx, nir_ssa_def *def)
125 {
126 assert(ctx->allocated[def->index].id());
127 return ctx->allocated[def->index];
128 }
129
130 Temp emit_mbcnt(isel_context *ctx, Definition dst,
131 Operand mask_lo = Operand((uint32_t) -1), Operand mask_hi = Operand((uint32_t) -1))
132 {
133 Builder bld(ctx->program, ctx->block);
134 Definition lo_def = ctx->program->wave_size == 32 ? dst : bld.def(v1);
135 Temp thread_id_lo = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, lo_def, mask_lo, Operand(0u));
136
137 if (ctx->program->wave_size == 32) {
138 return thread_id_lo;
139 } else if (ctx->program->chip_class <= GFX7) {
140 Temp thread_id_hi = bld.vop2(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
141 return thread_id_hi;
142 } else {
143 Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32_e64, dst, mask_hi, thread_id_lo);
144 return thread_id_hi;
145 }
146 }
147
148 Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_needs_wqm = false)
149 {
150 Builder bld(ctx->program, ctx->block);
151
152 if (!dst.id())
153 dst = bld.tmp(src.regClass());
154
155 assert(src.size() == dst.size());
156
157 if (ctx->stage != fragment_fs) {
158 if (!dst.id())
159 return src;
160
161 bld.copy(Definition(dst), src);
162 return dst;
163 }
164
165 bld.pseudo(aco_opcode::p_wqm, Definition(dst), src);
166 ctx->program->needs_wqm |= program_needs_wqm;
167 return dst;
168 }
169
170 static Temp emit_bpermute(isel_context *ctx, Builder &bld, Temp index, Temp data)
171 {
172 if (index.regClass() == s1)
173 return bld.readlane(bld.def(s1), data, index);
174
175 if (ctx->options->chip_class <= GFX7) {
176 /* GFX6-7: there is no bpermute instruction */
177 Operand index_op(index);
178 Operand input_data(data);
179 index_op.setLateKill(true);
180 input_data.setLateKill(true);
181
182 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(bld.lm), bld.def(bld.lm, vcc), index_op, input_data);
183 } else if (ctx->options->chip_class >= GFX10 && ctx->program->wave_size == 64) {
184 /* GFX10 wave64 mode: emulate full-wave bpermute */
185 if (!ctx->has_gfx10_wave64_bpermute) {
186 ctx->has_gfx10_wave64_bpermute = true;
187 ctx->program->config->num_shared_vgprs = 8; /* Shared VGPRs are allocated in groups of 8 */
188 ctx->program->vgpr_limit -= 4; /* We allocate 8 shared VGPRs, so we'll have 4 fewer normal VGPRs */
189 }
190
191 Temp index_is_lo = bld.vopc(aco_opcode::v_cmp_ge_u32, bld.def(bld.lm), Operand(31u), index);
192 Builder::Result index_is_lo_split = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), index_is_lo);
193 Temp index_is_lo_n1 = bld.sop1(aco_opcode::s_not_b32, bld.def(s1), bld.def(s1, scc), index_is_lo_split.def(1).getTemp());
194 Operand same_half = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), index_is_lo_split.def(0).getTemp(), index_is_lo_n1);
195 Operand index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
196 Operand input_data(data);
197
198 index_x4.setLateKill(true);
199 input_data.setLateKill(true);
200 same_half.setLateKill(true);
201
202 return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(s2), bld.def(s1, scc), index_x4, input_data, same_half);
203 } else {
204 /* GFX8-9 or GFX10 wave32: bpermute works normally */
205 Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
206 return bld.ds(aco_opcode::ds_bpermute_b32, bld.def(v1), index_x4, data);
207 }
208 }
209
210 static Temp emit_masked_swizzle(isel_context *ctx, Builder &bld, Temp src, unsigned mask)
211 {
212 if (ctx->options->chip_class >= GFX8) {
213 unsigned and_mask = mask & 0x1f;
214 unsigned or_mask = (mask >> 5) & 0x1f;
215 unsigned xor_mask = (mask >> 10) & 0x1f;
216
217 uint16_t dpp_ctrl = 0xffff;
218
219 // TODO: we could use DPP8 for some swizzles
220 if (and_mask == 0x1f && or_mask < 4 && xor_mask < 4) {
221 unsigned res[4] = {0, 1, 2, 3};
222 for (unsigned i = 0; i < 4; i++)
223 res[i] = ((res[i] | or_mask) ^ xor_mask) & 0x3;
224 dpp_ctrl = dpp_quad_perm(res[0], res[1], res[2], res[3]);
225 } else if (and_mask == 0x1f && !or_mask && xor_mask == 8) {
226 dpp_ctrl = dpp_row_rr(8);
227 } else if (and_mask == 0x1f && !or_mask && xor_mask == 0xf) {
228 dpp_ctrl = dpp_row_mirror;
229 } else if (and_mask == 0x1f && !or_mask && xor_mask == 0x7) {
230 dpp_ctrl = dpp_row_half_mirror;
231 }
232
233 if (dpp_ctrl != 0xffff)
234 return bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
235 }
236
237 return bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false);
238 }
239
240 Temp as_vgpr(isel_context *ctx, Temp val)
241 {
242 if (val.type() == RegType::sgpr) {
243 Builder bld(ctx->program, ctx->block);
244 return bld.copy(bld.def(RegType::vgpr, val.size()), val);
245 }
246 assert(val.type() == RegType::vgpr);
247 return val;
248 }
249
250 //assumes a != 0xffffffff
251 void emit_v_div_u32(isel_context *ctx, Temp dst, Temp a, uint32_t b)
252 {
253 assert(b != 0);
254 Builder bld(ctx->program, ctx->block);
255
256 if (util_is_power_of_two_or_zero(b)) {
257 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand((uint32_t)util_logbase2(b)), a);
258 return;
259 }
260
261 util_fast_udiv_info info = util_compute_fast_udiv_info(b, 32, 32);
262
263 assert(info.multiplier <= 0xffffffff);
264
265 bool pre_shift = info.pre_shift != 0;
266 bool increment = info.increment != 0;
267 bool multiply = true;
268 bool post_shift = info.post_shift != 0;
269
270 if (!pre_shift && !increment && !multiply && !post_shift) {
271 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), a);
272 return;
273 }
274
275 Temp pre_shift_dst = a;
276 if (pre_shift) {
277 pre_shift_dst = (increment || multiply || post_shift) ? bld.tmp(v1) : dst;
278 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(pre_shift_dst), Operand((uint32_t)info.pre_shift), a);
279 }
280
281 Temp increment_dst = pre_shift_dst;
282 if (increment) {
283 increment_dst = (post_shift || multiply) ? bld.tmp(v1) : dst;
284 bld.vadd32(Definition(increment_dst), Operand((uint32_t) info.increment), pre_shift_dst);
285 }
286
287 Temp multiply_dst = increment_dst;
288 if (multiply) {
289 multiply_dst = post_shift ? bld.tmp(v1) : dst;
290 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(multiply_dst), increment_dst,
291 bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand((uint32_t)info.multiplier)));
292 }
293
294 if (post_shift) {
295 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand((uint32_t)info.post_shift), multiply_dst);
296 }
297 }
298
299 void emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, Temp dst)
300 {
301 Builder bld(ctx->program, ctx->block);
302 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(idx));
303 }
304
305
306 Temp emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, RegClass dst_rc)
307 {
308 /* no need to extract the whole vector */
309 if (src.regClass() == dst_rc) {
310 assert(idx == 0);
311 return src;
312 }
313
314 assert(src.bytes() > (idx * dst_rc.bytes()));
315 Builder bld(ctx->program, ctx->block);
316 auto it = ctx->allocated_vec.find(src.id());
317 if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) {
318 if (it->second[idx].regClass() == dst_rc) {
319 return it->second[idx];
320 } else {
321 assert(!dst_rc.is_subdword());
322 assert(dst_rc.type() == RegType::vgpr && it->second[idx].type() == RegType::sgpr);
323 return bld.copy(bld.def(dst_rc), it->second[idx]);
324 }
325 }
326
327 if (dst_rc.is_subdword())
328 src = as_vgpr(ctx, src);
329
330 if (src.bytes() == dst_rc.bytes()) {
331 assert(idx == 0);
332 return bld.copy(bld.def(dst_rc), src);
333 } else {
334 Temp dst = bld.tmp(dst_rc);
335 emit_extract_vector(ctx, src, idx, dst);
336 return dst;
337 }
338 }
339
340 void emit_split_vector(isel_context* ctx, Temp vec_src, unsigned num_components)
341 {
342 if (num_components == 1)
343 return;
344 if (ctx->allocated_vec.find(vec_src.id()) != ctx->allocated_vec.end())
345 return;
346 RegClass rc;
347 if (num_components > vec_src.size()) {
348 if (vec_src.type() == RegType::sgpr) {
349 /* should still help get_alu_src() */
350 emit_split_vector(ctx, vec_src, vec_src.size());
351 return;
352 }
353 /* sub-dword split */
354 rc = RegClass(RegType::vgpr, vec_src.bytes() / num_components).as_subdword();
355 } else {
356 rc = RegClass(vec_src.type(), vec_src.size() / num_components);
357 }
358 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, num_components)};
359 split->operands[0] = Operand(vec_src);
360 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
361 for (unsigned i = 0; i < num_components; i++) {
362 elems[i] = {ctx->program->allocateId(), rc};
363 split->definitions[i] = Definition(elems[i]);
364 }
365 ctx->block->instructions.emplace_back(std::move(split));
366 ctx->allocated_vec.emplace(vec_src.id(), elems);
367 }
368
369 /* This vector expansion uses a mask to determine which elements in the new vector
370 * come from the original vector. The other elements are undefined. */
371 void expand_vector(isel_context* ctx, Temp vec_src, Temp dst, unsigned num_components, unsigned mask)
372 {
373 emit_split_vector(ctx, vec_src, util_bitcount(mask));
374
375 if (vec_src == dst)
376 return;
377
378 Builder bld(ctx->program, ctx->block);
379 if (num_components == 1) {
380 if (dst.type() == RegType::sgpr)
381 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec_src);
382 else
383 bld.copy(Definition(dst), vec_src);
384 return;
385 }
386
387 unsigned component_size = dst.size() / num_components;
388 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
389
390 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
391 vec->definitions[0] = Definition(dst);
392 unsigned k = 0;
393 for (unsigned i = 0; i < num_components; i++) {
394 if (mask & (1 << i)) {
395 Temp src = emit_extract_vector(ctx, vec_src, k++, RegClass(vec_src.type(), component_size));
396 if (dst.type() == RegType::sgpr)
397 src = bld.as_uniform(src);
398 vec->operands[i] = Operand(src);
399 } else {
400 vec->operands[i] = Operand(0u);
401 }
402 elems[i] = vec->operands[i].getTemp();
403 }
404 ctx->block->instructions.emplace_back(std::move(vec));
405 ctx->allocated_vec.emplace(dst.id(), elems);
406 }
407
408 /* adjust misaligned small bit size loads */
409 void byte_align_scalar(isel_context *ctx, Temp vec, Operand offset, Temp dst)
410 {
411 Builder bld(ctx->program, ctx->block);
412 Operand shift;
413 Temp select = Temp();
414 if (offset.isConstant()) {
415 assert(offset.constantValue() && offset.constantValue() < 4);
416 shift = Operand(offset.constantValue() * 8);
417 } else {
418 /* bit_offset = 8 * (offset & 0x3) */
419 Temp tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), offset, Operand(3u));
420 select = bld.tmp(s1);
421 shift = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.scc(Definition(select)), tmp, Operand(3u));
422 }
423
424 if (vec.size() == 1) {
425 bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), vec, shift);
426 } else if (vec.size() == 2) {
427 Temp tmp = dst.size() == 2 ? dst : bld.tmp(s2);
428 bld.sop2(aco_opcode::s_lshr_b64, Definition(tmp), bld.def(s1, scc), vec, shift);
429 if (tmp == dst)
430 emit_split_vector(ctx, dst, 2);
431 else
432 emit_extract_vector(ctx, tmp, 0, dst);
433 } else if (vec.size() == 4) {
434 Temp lo = bld.tmp(s2), hi = bld.tmp(s2);
435 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
436 hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand(0u));
437 if (select != Temp())
438 hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), bld.scc(select));
439 lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
440 Temp mid = bld.tmp(s1);
441 lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
442 hi = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), hi, shift);
443 mid = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), hi, mid);
444 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, mid);
445 emit_split_vector(ctx, dst, 2);
446 }
447 }
448
449 void byte_align_vector(isel_context *ctx, Temp vec, Operand offset, Temp dst, unsigned component_size)
450 {
451 Builder bld(ctx->program, ctx->block);
452 if (offset.isTemp()) {
453 Temp tmp[4] = {vec, vec, vec, vec};
454
455 if (vec.size() == 4) {
456 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1), tmp[3] = bld.tmp(v1);
457 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), Definition(tmp[3]), vec);
458 } else if (vec.size() == 3) {
459 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
460 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec);
461 } else if (vec.size() == 2) {
462 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
463 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
464 }
465 for (unsigned i = 0; i < dst.size(); i++)
466 tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset);
467
468 vec = tmp[0];
469 if (dst.size() == 2)
470 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
471
472 offset = Operand(0u);
473 }
474
475 unsigned num_components = dst.bytes() / component_size;
476 if (vec.regClass() == dst.regClass()) {
477 assert(offset.constantValue() == 0);
478 bld.copy(Definition(dst), vec);
479 emit_split_vector(ctx, dst, num_components);
480 return;
481 }
482
483 emit_split_vector(ctx, vec, vec.bytes() / component_size);
484 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
485 RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
486
487 assert(offset.constantValue() % component_size == 0);
488 unsigned skip = offset.constantValue() / component_size;
489 for (unsigned i = 0; i < num_components; i++)
490 elems[i] = emit_extract_vector(ctx, vec, i + skip, rc);
491
492 /* if dst is vgpr - split the src and create a shrunk version according to the mask. */
493 if (dst.type() == RegType::vgpr) {
494 aco_ptr<Pseudo_instruction> create_vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
495 for (unsigned i = 0; i < num_components; i++)
496 create_vec->operands[i] = Operand(elems[i]);
497 create_vec->definitions[0] = Definition(dst);
498 bld.insert(std::move(create_vec));
499
500 /* if dst is sgpr - split the src, but move the original to sgpr. */
501 } else if (skip) {
502 vec = bld.pseudo(aco_opcode::p_as_uniform, bld.def(RegClass(RegType::sgpr, vec.size())), vec);
503 byte_align_scalar(ctx, vec, offset, dst);
504 } else {
505 assert(dst.size() == vec.size());
506 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
507 }
508
509 ctx->allocated_vec.emplace(dst.id(), elems);
510 }
511
512 Temp bool_to_vector_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s2))
513 {
514 Builder bld(ctx->program, ctx->block);
515 if (!dst.id())
516 dst = bld.tmp(bld.lm);
517
518 assert(val.regClass() == s1);
519 assert(dst.regClass() == bld.lm);
520
521 return bld.sop2(Builder::s_cselect, Definition(dst), Operand((uint32_t) -1), Operand(0u), bld.scc(val));
522 }
523
524 Temp bool_to_scalar_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s1))
525 {
526 Builder bld(ctx->program, ctx->block);
527 if (!dst.id())
528 dst = bld.tmp(s1);
529
530 assert(val.regClass() == bld.lm);
531 assert(dst.regClass() == s1);
532
533 /* if we're currently in WQM mode, ensure that the source is also computed in WQM */
534 Temp tmp = bld.tmp(s1);
535 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.scc(Definition(tmp)), val, Operand(exec, bld.lm));
536 return emit_wqm(ctx, tmp, dst);
537 }
538
539 Temp get_alu_src(struct isel_context *ctx, nir_alu_src src, unsigned size=1)
540 {
541 if (src.src.ssa->num_components == 1 && src.swizzle[0] == 0 && size == 1)
542 return get_ssa_temp(ctx, src.src.ssa);
543
544 if (src.src.ssa->num_components == size) {
545 bool identity_swizzle = true;
546 for (unsigned i = 0; identity_swizzle && i < size; i++) {
547 if (src.swizzle[i] != i)
548 identity_swizzle = false;
549 }
550 if (identity_swizzle)
551 return get_ssa_temp(ctx, src.src.ssa);
552 }
553
554 Temp vec = get_ssa_temp(ctx, src.src.ssa);
555 unsigned elem_size = vec.bytes() / src.src.ssa->num_components;
556 assert(elem_size > 0);
557 assert(vec.bytes() % elem_size == 0);
558
559 if (elem_size < 4 && vec.type() == RegType::sgpr) {
560 assert(src.src.ssa->bit_size == 8 || src.src.ssa->bit_size == 16);
561 assert(size == 1);
562 unsigned swizzle = src.swizzle[0];
563 if (vec.size() > 1) {
564 assert(src.src.ssa->bit_size == 16);
565 vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
566 swizzle = swizzle & 1;
567 }
568 if (swizzle == 0)
569 return vec;
570
571 Temp dst{ctx->program->allocateId(), s1};
572 aco_ptr<SOP2_instruction> bfe{create_instruction<SOP2_instruction>(aco_opcode::s_bfe_u32, Format::SOP2, 2, 2)};
573 bfe->operands[0] = Operand(vec);
574 bfe->operands[1] = Operand(uint32_t((src.src.ssa->bit_size << 16) | (src.src.ssa->bit_size * swizzle)));
575 bfe->definitions[0] = Definition(dst);
576 bfe->definitions[1] = Definition(ctx->program->allocateId(), scc, s1);
577 ctx->block->instructions.emplace_back(std::move(bfe));
578 return dst;
579 }
580
581 RegClass elem_rc = elem_size < 4 ? RegClass(vec.type(), elem_size).as_subdword() : RegClass(vec.type(), elem_size / 4);
582 if (size == 1) {
583 return emit_extract_vector(ctx, vec, src.swizzle[0], elem_rc);
584 } else {
585 assert(size <= 4);
586 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
587 aco_ptr<Pseudo_instruction> vec_instr{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, size, 1)};
588 for (unsigned i = 0; i < size; ++i) {
589 elems[i] = emit_extract_vector(ctx, vec, src.swizzle[i], elem_rc);
590 vec_instr->operands[i] = Operand{elems[i]};
591 }
592 Temp dst{ctx->program->allocateId(), RegClass(vec.type(), elem_size * size / 4)};
593 vec_instr->definitions[0] = Definition(dst);
594 ctx->block->instructions.emplace_back(std::move(vec_instr));
595 ctx->allocated_vec.emplace(dst.id(), elems);
596 return dst;
597 }
598 }
599
600 Temp convert_pointer_to_64_bit(isel_context *ctx, Temp ptr)
601 {
602 if (ptr.size() == 2)
603 return ptr;
604 Builder bld(ctx->program, ctx->block);
605 if (ptr.type() == RegType::vgpr)
606 ptr = bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), ptr);
607 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s2),
608 ptr, Operand((unsigned)ctx->options->address32_hi));
609 }
610
611 void emit_sop2_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst, bool writes_scc)
612 {
613 aco_ptr<SOP2_instruction> sop2{create_instruction<SOP2_instruction>(op, Format::SOP2, 2, writes_scc ? 2 : 1)};
614 sop2->operands[0] = Operand(get_alu_src(ctx, instr->src[0]));
615 sop2->operands[1] = Operand(get_alu_src(ctx, instr->src[1]));
616 sop2->definitions[0] = Definition(dst);
617 if (writes_scc)
618 sop2->definitions[1] = Definition(ctx->program->allocateId(), scc, s1);
619 ctx->block->instructions.emplace_back(std::move(sop2));
620 }
621
622 void emit_vop2_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
623 bool commutative, bool swap_srcs=false, bool flush_denorms = false)
624 {
625 Builder bld(ctx->program, ctx->block);
626 bld.is_precise = instr->exact;
627
628 Temp src0 = get_alu_src(ctx, instr->src[swap_srcs ? 1 : 0]);
629 Temp src1 = get_alu_src(ctx, instr->src[swap_srcs ? 0 : 1]);
630 if (src1.type() == RegType::sgpr) {
631 if (commutative && src0.type() == RegType::vgpr) {
632 Temp t = src0;
633 src0 = src1;
634 src1 = t;
635 } else {
636 src1 = as_vgpr(ctx, src1);
637 }
638 }
639
640 if (flush_denorms && ctx->program->chip_class < GFX9) {
641 assert(dst.size() == 1);
642 Temp tmp = bld.vop2(op, bld.def(v1), src0, src1);
643 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand(0x3f800000u), tmp);
644 } else {
645 bld.vop2(op, Definition(dst), src0, src1);
646 }
647 }
648
649 void emit_vop2_instruction_logic64(isel_context *ctx, nir_alu_instr *instr,
650 aco_opcode op, Temp dst)
651 {
652 Builder bld(ctx->program, ctx->block);
653 bld.is_precise = instr->exact;
654
655 Temp src0 = get_alu_src(ctx, instr->src[0]);
656 Temp src1 = get_alu_src(ctx, instr->src[1]);
657
658 if (src1.type() == RegType::sgpr) {
659 assert(src0.type() == RegType::vgpr);
660 std::swap(src0, src1);
661 }
662
663 Temp src00 = bld.tmp(src0.type(), 1);
664 Temp src01 = bld.tmp(src0.type(), 1);
665 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
666 Temp src10 = bld.tmp(v1);
667 Temp src11 = bld.tmp(v1);
668 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
669 Temp lo = bld.vop2(op, bld.def(v1), src00, src10);
670 Temp hi = bld.vop2(op, bld.def(v1), src01, src11);
671 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
672 }
673
674 void emit_vop3a_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
675 bool flush_denorms = false)
676 {
677 Temp src0 = get_alu_src(ctx, instr->src[0]);
678 Temp src1 = get_alu_src(ctx, instr->src[1]);
679 Temp src2 = get_alu_src(ctx, instr->src[2]);
680
681 /* ensure that the instruction has at most 1 sgpr operand
682 * The optimizer will inline constants for us */
683 if (src0.type() == RegType::sgpr && src1.type() == RegType::sgpr)
684 src0 = as_vgpr(ctx, src0);
685 if (src1.type() == RegType::sgpr && src2.type() == RegType::sgpr)
686 src1 = as_vgpr(ctx, src1);
687 if (src2.type() == RegType::sgpr && src0.type() == RegType::sgpr)
688 src2 = as_vgpr(ctx, src2);
689
690 Builder bld(ctx->program, ctx->block);
691 bld.is_precise = instr->exact;
692 if (flush_denorms && ctx->program->chip_class < GFX9) {
693 assert(dst.size() == 1);
694 Temp tmp = bld.vop3(op, Definition(dst), src0, src1, src2);
695 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand(0x3f800000u), tmp);
696 } else {
697 bld.vop3(op, Definition(dst), src0, src1, src2);
698 }
699 }
700
701 void emit_vop1_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
702 {
703 Builder bld(ctx->program, ctx->block);
704 bld.is_precise = instr->exact;
705 if (dst.type() == RegType::sgpr)
706 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
707 bld.vop1(op, bld.def(RegType::vgpr, dst.size()), get_alu_src(ctx, instr->src[0])));
708 else
709 bld.vop1(op, Definition(dst), get_alu_src(ctx, instr->src[0]));
710 }
711
712 void emit_vopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
713 {
714 Temp src0 = get_alu_src(ctx, instr->src[0]);
715 Temp src1 = get_alu_src(ctx, instr->src[1]);
716 assert(src0.size() == src1.size());
717
718 aco_ptr<Instruction> vopc;
719 if (src1.type() == RegType::sgpr) {
720 if (src0.type() == RegType::vgpr) {
721 /* to swap the operands, we might also have to change the opcode */
722 switch (op) {
723 case aco_opcode::v_cmp_lt_f16:
724 op = aco_opcode::v_cmp_gt_f16;
725 break;
726 case aco_opcode::v_cmp_ge_f16:
727 op = aco_opcode::v_cmp_le_f16;
728 break;
729 case aco_opcode::v_cmp_lt_i16:
730 op = aco_opcode::v_cmp_gt_i16;
731 break;
732 case aco_opcode::v_cmp_ge_i16:
733 op = aco_opcode::v_cmp_le_i16;
734 break;
735 case aco_opcode::v_cmp_lt_u16:
736 op = aco_opcode::v_cmp_gt_u16;
737 break;
738 case aco_opcode::v_cmp_ge_u16:
739 op = aco_opcode::v_cmp_le_u16;
740 break;
741 case aco_opcode::v_cmp_lt_f32:
742 op = aco_opcode::v_cmp_gt_f32;
743 break;
744 case aco_opcode::v_cmp_ge_f32:
745 op = aco_opcode::v_cmp_le_f32;
746 break;
747 case aco_opcode::v_cmp_lt_i32:
748 op = aco_opcode::v_cmp_gt_i32;
749 break;
750 case aco_opcode::v_cmp_ge_i32:
751 op = aco_opcode::v_cmp_le_i32;
752 break;
753 case aco_opcode::v_cmp_lt_u32:
754 op = aco_opcode::v_cmp_gt_u32;
755 break;
756 case aco_opcode::v_cmp_ge_u32:
757 op = aco_opcode::v_cmp_le_u32;
758 break;
759 case aco_opcode::v_cmp_lt_f64:
760 op = aco_opcode::v_cmp_gt_f64;
761 break;
762 case aco_opcode::v_cmp_ge_f64:
763 op = aco_opcode::v_cmp_le_f64;
764 break;
765 case aco_opcode::v_cmp_lt_i64:
766 op = aco_opcode::v_cmp_gt_i64;
767 break;
768 case aco_opcode::v_cmp_ge_i64:
769 op = aco_opcode::v_cmp_le_i64;
770 break;
771 case aco_opcode::v_cmp_lt_u64:
772 op = aco_opcode::v_cmp_gt_u64;
773 break;
774 case aco_opcode::v_cmp_ge_u64:
775 op = aco_opcode::v_cmp_le_u64;
776 break;
777 default: /* eq and ne are commutative */
778 break;
779 }
780 Temp t = src0;
781 src0 = src1;
782 src1 = t;
783 } else {
784 src1 = as_vgpr(ctx, src1);
785 }
786 }
787
788 Builder bld(ctx->program, ctx->block);
789 bld.vopc(op, bld.hint_vcc(Definition(dst)), src0, src1);
790 }
791
792 void emit_sopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
793 {
794 Temp src0 = get_alu_src(ctx, instr->src[0]);
795 Temp src1 = get_alu_src(ctx, instr->src[1]);
796 Builder bld(ctx->program, ctx->block);
797
798 assert(dst.regClass() == bld.lm);
799 assert(src0.type() == RegType::sgpr);
800 assert(src1.type() == RegType::sgpr);
801 assert(src0.regClass() == src1.regClass());
802
803 /* Emit the SALU comparison instruction */
804 Temp cmp = bld.sopc(op, bld.scc(bld.def(s1)), src0, src1);
805 /* Turn the result into a per-lane bool */
806 bool_to_vector_condition(ctx, cmp, dst);
807 }
808
809 void emit_comparison(isel_context *ctx, nir_alu_instr *instr, Temp dst,
810 aco_opcode v16_op, aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes, aco_opcode s64_op = aco_opcode::num_opcodes)
811 {
812 aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op : instr->src[0].src.ssa->bit_size == 32 ? s32_op : aco_opcode::num_opcodes;
813 aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op : instr->src[0].src.ssa->bit_size == 32 ? v32_op : v16_op;
814 bool use_valu = s_op == aco_opcode::num_opcodes ||
815 nir_dest_is_divergent(instr->dest.dest) ||
816 ctx->allocated[instr->src[0].src.ssa->index].type() == RegType::vgpr ||
817 ctx->allocated[instr->src[1].src.ssa->index].type() == RegType::vgpr;
818 aco_opcode op = use_valu ? v_op : s_op;
819 assert(op != aco_opcode::num_opcodes);
820 assert(dst.regClass() == ctx->program->lane_mask);
821
822 if (use_valu)
823 emit_vopc_instruction(ctx, instr, op, dst);
824 else
825 emit_sopc_instruction(ctx, instr, op, dst);
826 }
827
828 void emit_boolean_logic(isel_context *ctx, nir_alu_instr *instr, Builder::WaveSpecificOpcode op, Temp dst)
829 {
830 Builder bld(ctx->program, ctx->block);
831 Temp src0 = get_alu_src(ctx, instr->src[0]);
832 Temp src1 = get_alu_src(ctx, instr->src[1]);
833
834 assert(dst.regClass() == bld.lm);
835 assert(src0.regClass() == bld.lm);
836 assert(src1.regClass() == bld.lm);
837
838 bld.sop2(op, Definition(dst), bld.def(s1, scc), src0, src1);
839 }
840
841 void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst)
842 {
843 Builder bld(ctx->program, ctx->block);
844 Temp cond = get_alu_src(ctx, instr->src[0]);
845 Temp then = get_alu_src(ctx, instr->src[1]);
846 Temp els = get_alu_src(ctx, instr->src[2]);
847
848 assert(cond.regClass() == bld.lm);
849
850 if (dst.type() == RegType::vgpr) {
851 aco_ptr<Instruction> bcsel;
852 if (dst.size() == 1) {
853 then = as_vgpr(ctx, then);
854 els = as_vgpr(ctx, els);
855
856 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), els, then, cond);
857 } else if (dst.size() == 2) {
858 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
859 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), then);
860 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
861 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), els);
862
863 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, cond);
864 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, cond);
865
866 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
867 } else {
868 fprintf(stderr, "Unimplemented NIR instr bit size: ");
869 nir_print_instr(&instr->instr, stderr);
870 fprintf(stderr, "\n");
871 }
872 return;
873 }
874
875 if (instr->dest.dest.ssa.bit_size == 1) {
876 assert(dst.regClass() == bld.lm);
877 assert(then.regClass() == bld.lm);
878 assert(els.regClass() == bld.lm);
879 }
880
881 if (!nir_src_is_divergent(instr->src[0].src)) { /* uniform condition and values in sgpr */
882 if (dst.regClass() == s1 || dst.regClass() == s2) {
883 assert((then.regClass() == s1 || then.regClass() == s2) && els.regClass() == then.regClass());
884 assert(dst.size() == then.size());
885 aco_opcode op = dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64;
886 bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond)));
887 } else {
888 fprintf(stderr, "Unimplemented uniform bcsel bit size: ");
889 nir_print_instr(&instr->instr, stderr);
890 fprintf(stderr, "\n");
891 }
892 return;
893 }
894
895 /* divergent boolean bcsel
896 * this implements bcsel on bools: dst = s0 ? s1 : s2
897 * are going to be: dst = (s0 & s1) | (~s0 & s2) */
898 assert(instr->dest.dest.ssa.bit_size == 1);
899
900 if (cond.id() != then.id())
901 then = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), cond, then);
902
903 if (cond.id() == els.id())
904 bld.sop1(Builder::s_mov, Definition(dst), then);
905 else
906 bld.sop2(Builder::s_or, Definition(dst), bld.def(s1, scc), then,
907 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), els, cond));
908 }
909
910 void emit_scaled_op(isel_context *ctx, Builder& bld, Definition dst, Temp val,
911 aco_opcode op, uint32_t undo)
912 {
913 /* multiply by 16777216 to handle denormals */
914 Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.hint_vcc(bld.def(bld.lm)),
915 as_vgpr(ctx, val), bld.copy(bld.def(v1), Operand((1u << 7) | (1u << 4))));
916 Temp scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x4b800000u), val);
917 scaled = bld.vop1(op, bld.def(v1), scaled);
918 scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(undo), scaled);
919
920 Temp not_scaled = bld.vop1(op, bld.def(v1), val);
921
922 bld.vop2(aco_opcode::v_cndmask_b32, dst, not_scaled, scaled, is_denormal);
923 }
924
925 void emit_rcp(isel_context *ctx, Builder& bld, Definition dst, Temp val)
926 {
927 if (ctx->block->fp_mode.denorm32 == 0) {
928 bld.vop1(aco_opcode::v_rcp_f32, dst, val);
929 return;
930 }
931
932 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rcp_f32, 0x4b800000u);
933 }
934
935 void emit_rsq(isel_context *ctx, Builder& bld, Definition dst, Temp val)
936 {
937 if (ctx->block->fp_mode.denorm32 == 0) {
938 bld.vop1(aco_opcode::v_rsq_f32, dst, val);
939 return;
940 }
941
942 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rsq_f32, 0x45800000u);
943 }
944
945 void emit_sqrt(isel_context *ctx, Builder& bld, Definition dst, Temp val)
946 {
947 if (ctx->block->fp_mode.denorm32 == 0) {
948 bld.vop1(aco_opcode::v_sqrt_f32, dst, val);
949 return;
950 }
951
952 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_sqrt_f32, 0x39800000u);
953 }
954
955 void emit_log2(isel_context *ctx, Builder& bld, Definition dst, Temp val)
956 {
957 if (ctx->block->fp_mode.denorm32 == 0) {
958 bld.vop1(aco_opcode::v_log_f32, dst, val);
959 return;
960 }
961
962 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_log_f32, 0xc1c00000u);
963 }
964
965 Temp emit_trunc_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val)
966 {
967 if (ctx->options->chip_class >= GFX7)
968 return bld.vop1(aco_opcode::v_trunc_f64, Definition(dst), val);
969
970 /* GFX6 doesn't support V_TRUNC_F64, lower it. */
971 /* TODO: create more efficient code! */
972 if (val.type() == RegType::sgpr)
973 val = as_vgpr(ctx, val);
974
975 /* Split the input value. */
976 Temp val_lo = bld.tmp(v1), val_hi = bld.tmp(v1);
977 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
978
979 /* Extract the exponent and compute the unbiased value. */
980 Temp exponent = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), val_hi, Operand(20u), Operand(11u));
981 exponent = bld.vsub32(bld.def(v1), exponent, Operand(1023u));
982
983 /* Extract the fractional part. */
984 Temp fract_mask = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(-1u), Operand(0x000fffffu));
985 fract_mask = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), fract_mask, exponent);
986
987 Temp fract_mask_lo = bld.tmp(v1), fract_mask_hi = bld.tmp(v1);
988 bld.pseudo(aco_opcode::p_split_vector, Definition(fract_mask_lo), Definition(fract_mask_hi), fract_mask);
989
990 Temp fract_lo = bld.tmp(v1), fract_hi = bld.tmp(v1);
991 Temp tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_lo);
992 fract_lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_lo, tmp);
993 tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_hi);
994 fract_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_hi, tmp);
995
996 /* Get the sign bit. */
997 Temp sign = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x80000000u), val_hi);
998
999 /* Decide the operation to apply depending on the unbiased exponent. */
1000 Temp exp_lt0 = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)), exponent, Operand(0u));
1001 Temp dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_lo, bld.copy(bld.def(v1), Operand(0u)), exp_lt0);
1002 Temp dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_hi, sign, exp_lt0);
1003 Temp exp_gt51 = bld.vopc_e64(aco_opcode::v_cmp_gt_i32, bld.def(s2), exponent, Operand(51u));
1004 dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_lo, val_lo, exp_gt51);
1005 dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_hi, val_hi, exp_gt51);
1006
1007 return bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst_lo, dst_hi);
1008 }
1009
1010 Temp emit_floor_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val)
1011 {
1012 if (ctx->options->chip_class >= GFX7)
1013 return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
1014
1015 /* GFX6 doesn't support V_FLOOR_F64, lower it (note that it's actually
1016 * lowered at NIR level for precision reasons). */
1017 Temp src0 = as_vgpr(ctx, val);
1018
1019 Temp mask = bld.copy(bld.def(s1), Operand(3u)); /* isnan */
1020 Temp min_val = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(-1u), Operand(0x3fefffffu));
1021
1022 Temp isnan = bld.vopc_e64(aco_opcode::v_cmp_class_f64, bld.hint_vcc(bld.def(bld.lm)), src0, mask);
1023 Temp fract = bld.vop1(aco_opcode::v_fract_f64, bld.def(v2), src0);
1024 Temp min = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), fract, min_val);
1025
1026 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
1027 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), src0);
1028 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
1029 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), min);
1030
1031 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, isnan);
1032 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, isnan);
1033
1034 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1);
1035
1036 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, v);
1037 static_cast<VOP3A_instruction*>(add)->neg[1] = true;
1038
1039 return add->definitions[0].getTemp();
1040 }
1041
1042 Temp convert_int(isel_context *ctx, Builder& bld, Temp src, unsigned src_bits, unsigned dst_bits, bool is_signed, Temp dst=Temp()) {
1043 if (!dst.id()) {
1044 if (dst_bits % 32 == 0 || src.type() == RegType::sgpr)
1045 dst = bld.tmp(src.type(), DIV_ROUND_UP(dst_bits, 32u));
1046 else
1047 dst = bld.tmp(RegClass(RegType::vgpr, dst_bits / 8u).as_subdword());
1048 }
1049
1050 if (dst.bytes() == src.bytes() && dst_bits < src_bits)
1051 return bld.copy(Definition(dst), src);
1052 else if (dst.bytes() < src.bytes())
1053 return bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u));
1054
1055 Temp tmp = dst;
1056 if (dst_bits == 64)
1057 tmp = src_bits == 32 ? src : bld.tmp(src.type(), 1);
1058
1059 if (tmp == src) {
1060 } else if (src.regClass() == s1) {
1061 if (is_signed)
1062 bld.sop1(src_bits == 8 ? aco_opcode::s_sext_i32_i8 : aco_opcode::s_sext_i32_i16, Definition(tmp), src);
1063 else
1064 bld.sop2(aco_opcode::s_and_b32, Definition(tmp), bld.def(s1, scc), Operand(src_bits == 8 ? 0xFFu : 0xFFFFu), src);
1065 } else if (ctx->options->chip_class >= GFX8) {
1066 assert(src_bits != 8 || src.regClass() == v1b);
1067 assert(src_bits != 16 || src.regClass() == v2b);
1068 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
1069 sdwa->operands[0] = Operand(src);
1070 sdwa->definitions[0] = Definition(tmp);
1071 if (is_signed)
1072 sdwa->sel[0] = src_bits == 8 ? sdwa_sbyte : sdwa_sword;
1073 else
1074 sdwa->sel[0] = src_bits == 8 ? sdwa_ubyte : sdwa_uword;
1075 sdwa->dst_sel = tmp.bytes() == 2 ? sdwa_uword : sdwa_udword;
1076 bld.insert(std::move(sdwa));
1077 } else {
1078 assert(ctx->options->chip_class == GFX6 || ctx->options->chip_class == GFX7);
1079 aco_opcode opcode = is_signed ? aco_opcode::v_bfe_i32 : aco_opcode::v_bfe_u32;
1080 bld.vop3(opcode, Definition(tmp), src, Operand(0u), Operand(src_bits == 8 ? 8u : 16u));
1081 }
1082
1083 if (dst_bits == 64) {
1084 if (is_signed && dst.regClass() == s2) {
1085 Temp high = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), tmp, Operand(31u));
1086 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
1087 } else if (is_signed && dst.regClass() == v2) {
1088 Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), tmp);
1089 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
1090 } else {
1091 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand(0u));
1092 }
1093 }
1094
1095 return dst;
1096 }
1097
1098 void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr)
1099 {
1100 if (!instr->dest.dest.is_ssa) {
1101 fprintf(stderr, "nir alu dst not in ssa: ");
1102 nir_print_instr(&instr->instr, stderr);
1103 fprintf(stderr, "\n");
1104 abort();
1105 }
1106 Builder bld(ctx->program, ctx->block);
1107 bld.is_precise = instr->exact;
1108 Temp dst = get_ssa_temp(ctx, &instr->dest.dest.ssa);
1109 switch(instr->op) {
1110 case nir_op_vec2:
1111 case nir_op_vec3:
1112 case nir_op_vec4: {
1113 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
1114 unsigned num = instr->dest.dest.ssa.num_components;
1115 for (unsigned i = 0; i < num; ++i)
1116 elems[i] = get_alu_src(ctx, instr->src[i]);
1117
1118 if (instr->dest.dest.ssa.bit_size >= 32 || dst.type() == RegType::vgpr) {
1119 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.dest.ssa.num_components, 1)};
1120 RegClass elem_rc = RegClass::get(RegType::vgpr, instr->dest.dest.ssa.bit_size / 8u);
1121 for (unsigned i = 0; i < num; ++i) {
1122 if (elems[i].type() == RegType::sgpr && elem_rc.is_subdword())
1123 vec->operands[i] = Operand(emit_extract_vector(ctx, elems[i], 0, elem_rc));
1124 else
1125 vec->operands[i] = Operand{elems[i]};
1126 }
1127 vec->definitions[0] = Definition(dst);
1128 ctx->block->instructions.emplace_back(std::move(vec));
1129 ctx->allocated_vec.emplace(dst.id(), elems);
1130 } else {
1131 // TODO: that is a bit suboptimal..
1132 Temp mask = bld.copy(bld.def(s1), Operand((1u << instr->dest.dest.ssa.bit_size) - 1));
1133 for (unsigned i = 0; i < num - 1; ++i)
1134 if (((i+1) * instr->dest.dest.ssa.bit_size) % 32)
1135 elems[i] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), elems[i], mask);
1136 for (unsigned i = 0; i < num; ++i) {
1137 unsigned bit = i * instr->dest.dest.ssa.bit_size;
1138 if (bit % 32 == 0) {
1139 elems[bit / 32] = elems[i];
1140 } else {
1141 elems[i] = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc),
1142 elems[i], Operand((i * instr->dest.dest.ssa.bit_size) % 32));
1143 elems[bit / 32] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), elems[bit / 32], elems[i]);
1144 }
1145 }
1146 if (dst.size() == 1)
1147 bld.copy(Definition(dst), elems[0]);
1148 else
1149 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), elems[0], elems[1]);
1150 }
1151 break;
1152 }
1153 case nir_op_mov: {
1154 Temp src = get_alu_src(ctx, instr->src[0]);
1155 aco_ptr<Instruction> mov;
1156 if (dst.type() == RegType::sgpr) {
1157 if (src.type() == RegType::vgpr)
1158 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
1159 else if (src.regClass() == s1)
1160 bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src);
1161 else if (src.regClass() == s2)
1162 bld.sop1(aco_opcode::s_mov_b64, Definition(dst), src);
1163 else
1164 unreachable("wrong src register class for nir_op_imov");
1165 } else {
1166 if (dst.regClass() == v1)
1167 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), src);
1168 else if (dst.regClass() == v1b ||
1169 dst.regClass() == v2b ||
1170 dst.regClass() == v2)
1171 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
1172 else
1173 unreachable("wrong src register class for nir_op_imov");
1174 }
1175 break;
1176 }
1177 case nir_op_inot: {
1178 Temp src = get_alu_src(ctx, instr->src[0]);
1179 if (instr->dest.dest.ssa.bit_size == 1) {
1180 assert(src.regClass() == bld.lm);
1181 assert(dst.regClass() == bld.lm);
1182 /* Don't use s_andn2 here, this allows the optimizer to make a better decision */
1183 Temp tmp = bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), src);
1184 bld.sop2(Builder::s_and, Definition(dst), bld.def(s1, scc), tmp, Operand(exec, bld.lm));
1185 } else if (dst.regClass() == v1) {
1186 emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
1187 } else if (dst.regClass() == v2) {
1188 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
1189 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
1190 lo = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), lo);
1191 hi = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), hi);
1192 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
1193 } else if (dst.type() == RegType::sgpr) {
1194 aco_opcode opcode = dst.size() == 1 ? aco_opcode::s_not_b32 : aco_opcode::s_not_b64;
1195 bld.sop1(opcode, Definition(dst), bld.def(s1, scc), src);
1196 } else {
1197 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1198 nir_print_instr(&instr->instr, stderr);
1199 fprintf(stderr, "\n");
1200 }
1201 break;
1202 }
1203 case nir_op_ineg: {
1204 Temp src = get_alu_src(ctx, instr->src[0]);
1205 if (dst.regClass() == v1) {
1206 bld.vsub32(Definition(dst), Operand(0u), Operand(src));
1207 } else if (dst.regClass() == s1) {
1208 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand((uint32_t) -1), src);
1209 } else if (dst.size() == 2) {
1210 Temp src0 = bld.tmp(dst.type(), 1);
1211 Temp src1 = bld.tmp(dst.type(), 1);
1212 bld.pseudo(aco_opcode::p_split_vector, Definition(src0), Definition(src1), src);
1213
1214 if (dst.regClass() == s2) {
1215 Temp carry = bld.tmp(s1);
1216 Temp dst0 = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(carry)), Operand(0u), src0);
1217 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), src1, carry);
1218 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1219 } else {
1220 Temp lower = bld.tmp(v1);
1221 Temp borrow = bld.vsub32(Definition(lower), Operand(0u), src0, true).def(1).getTemp();
1222 Temp upper = bld.vsub32(bld.def(v1), Operand(0u), src1, false, borrow);
1223 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1224 }
1225 } else {
1226 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1227 nir_print_instr(&instr->instr, stderr);
1228 fprintf(stderr, "\n");
1229 }
1230 break;
1231 }
1232 case nir_op_iabs: {
1233 if (dst.regClass() == s1) {
1234 bld.sop1(aco_opcode::s_abs_i32, Definition(dst), bld.def(s1, scc), get_alu_src(ctx, instr->src[0]));
1235 } else if (dst.regClass() == v1) {
1236 Temp src = get_alu_src(ctx, instr->src[0]);
1237 bld.vop2(aco_opcode::v_max_i32, Definition(dst), src, bld.vsub32(bld.def(v1), Operand(0u), src));
1238 } else {
1239 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1240 nir_print_instr(&instr->instr, stderr);
1241 fprintf(stderr, "\n");
1242 }
1243 break;
1244 }
1245 case nir_op_isign: {
1246 Temp src = get_alu_src(ctx, instr->src[0]);
1247 if (dst.regClass() == s1) {
1248 Temp tmp = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), src, Operand((uint32_t)-1));
1249 bld.sop2(aco_opcode::s_min_i32, Definition(dst), bld.def(s1, scc), tmp, Operand(1u));
1250 } else if (dst.regClass() == s2) {
1251 Temp neg = bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand(63u));
1252 Temp neqz;
1253 if (ctx->program->chip_class >= GFX8)
1254 neqz = bld.sopc(aco_opcode::s_cmp_lg_u64, bld.def(s1, scc), src, Operand(0u));
1255 else
1256 neqz = bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), src, Operand(0u)).def(1).getTemp();
1257 /* SCC gets zero-extended to 64 bit */
1258 bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz));
1259 } else if (dst.regClass() == v1) {
1260 bld.vop3(aco_opcode::v_med3_i32, Definition(dst), Operand((uint32_t)-1), src, Operand(1u));
1261 } else if (dst.regClass() == v2) {
1262 Temp upper = emit_extract_vector(ctx, src, 1, v1);
1263 Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), upper);
1264 Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
1265 Temp lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(1u), neg, gtz);
1266 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), neg, gtz);
1267 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1268 } else {
1269 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1270 nir_print_instr(&instr->instr, stderr);
1271 fprintf(stderr, "\n");
1272 }
1273 break;
1274 }
1275 case nir_op_imax: {
1276 if (dst.regClass() == v1) {
1277 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_i32, dst, true);
1278 } else if (dst.regClass() == s1) {
1279 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_i32, dst, true);
1280 } else {
1281 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1282 nir_print_instr(&instr->instr, stderr);
1283 fprintf(stderr, "\n");
1284 }
1285 break;
1286 }
1287 case nir_op_umax: {
1288 if (dst.regClass() == v1) {
1289 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_u32, dst, true);
1290 } else if (dst.regClass() == s1) {
1291 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_u32, dst, true);
1292 } else {
1293 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1294 nir_print_instr(&instr->instr, stderr);
1295 fprintf(stderr, "\n");
1296 }
1297 break;
1298 }
1299 case nir_op_imin: {
1300 if (dst.regClass() == v1) {
1301 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_i32, dst, true);
1302 } else if (dst.regClass() == s1) {
1303 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_i32, dst, true);
1304 } else {
1305 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1306 nir_print_instr(&instr->instr, stderr);
1307 fprintf(stderr, "\n");
1308 }
1309 break;
1310 }
1311 case nir_op_umin: {
1312 if (dst.regClass() == v1) {
1313 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_u32, dst, true);
1314 } else if (dst.regClass() == s1) {
1315 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_u32, dst, true);
1316 } else {
1317 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1318 nir_print_instr(&instr->instr, stderr);
1319 fprintf(stderr, "\n");
1320 }
1321 break;
1322 }
1323 case nir_op_ior: {
1324 if (instr->dest.dest.ssa.bit_size == 1) {
1325 emit_boolean_logic(ctx, instr, Builder::s_or, dst);
1326 } else if (dst.regClass() == v1) {
1327 emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
1328 } else if (dst.regClass() == v2) {
1329 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_or_b32, dst);
1330 } else if (dst.regClass() == s1) {
1331 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b32, dst, true);
1332 } else if (dst.regClass() == s2) {
1333 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b64, dst, true);
1334 } else {
1335 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1336 nir_print_instr(&instr->instr, stderr);
1337 fprintf(stderr, "\n");
1338 }
1339 break;
1340 }
1341 case nir_op_iand: {
1342 if (instr->dest.dest.ssa.bit_size == 1) {
1343 emit_boolean_logic(ctx, instr, Builder::s_and, dst);
1344 } else if (dst.regClass() == v1) {
1345 emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
1346 } else if (dst.regClass() == v2) {
1347 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_and_b32, dst);
1348 } else if (dst.regClass() == s1) {
1349 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b32, dst, true);
1350 } else if (dst.regClass() == s2) {
1351 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b64, dst, true);
1352 } else {
1353 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1354 nir_print_instr(&instr->instr, stderr);
1355 fprintf(stderr, "\n");
1356 }
1357 break;
1358 }
1359 case nir_op_ixor: {
1360 if (instr->dest.dest.ssa.bit_size == 1) {
1361 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
1362 } else if (dst.regClass() == v1) {
1363 emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
1364 } else if (dst.regClass() == v2) {
1365 emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_xor_b32, dst);
1366 } else if (dst.regClass() == s1) {
1367 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b32, dst, true);
1368 } else if (dst.regClass() == s2) {
1369 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b64, dst, true);
1370 } else {
1371 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1372 nir_print_instr(&instr->instr, stderr);
1373 fprintf(stderr, "\n");
1374 }
1375 break;
1376 }
1377 case nir_op_ushr: {
1378 if (dst.regClass() == v1) {
1379 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b32, dst, false, true);
1380 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1381 bld.vop3(aco_opcode::v_lshrrev_b64, Definition(dst),
1382 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1383 } else if (dst.regClass() == v2) {
1384 bld.vop3(aco_opcode::v_lshr_b64, Definition(dst),
1385 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1386 } else if (dst.regClass() == s2) {
1387 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b64, dst, true);
1388 } else if (dst.regClass() == s1) {
1389 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b32, dst, true);
1390 } else {
1391 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1392 nir_print_instr(&instr->instr, stderr);
1393 fprintf(stderr, "\n");
1394 }
1395 break;
1396 }
1397 case nir_op_ishl: {
1398 if (dst.regClass() == v1) {
1399 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b32, dst, false, true);
1400 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1401 bld.vop3(aco_opcode::v_lshlrev_b64, Definition(dst),
1402 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1403 } else if (dst.regClass() == v2) {
1404 bld.vop3(aco_opcode::v_lshl_b64, Definition(dst),
1405 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1406 } else if (dst.regClass() == s1) {
1407 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b32, dst, true);
1408 } else if (dst.regClass() == s2) {
1409 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b64, dst, true);
1410 } else {
1411 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1412 nir_print_instr(&instr->instr, stderr);
1413 fprintf(stderr, "\n");
1414 }
1415 break;
1416 }
1417 case nir_op_ishr: {
1418 if (dst.regClass() == v1) {
1419 emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i32, dst, false, true);
1420 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1421 bld.vop3(aco_opcode::v_ashrrev_i64, Definition(dst),
1422 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1423 } else if (dst.regClass() == v2) {
1424 bld.vop3(aco_opcode::v_ashr_i64, Definition(dst),
1425 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1426 } else if (dst.regClass() == s1) {
1427 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i32, dst, true);
1428 } else if (dst.regClass() == s2) {
1429 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i64, dst, true);
1430 } else {
1431 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1432 nir_print_instr(&instr->instr, stderr);
1433 fprintf(stderr, "\n");
1434 }
1435 break;
1436 }
1437 case nir_op_find_lsb: {
1438 Temp src = get_alu_src(ctx, instr->src[0]);
1439 if (src.regClass() == s1) {
1440 bld.sop1(aco_opcode::s_ff1_i32_b32, Definition(dst), src);
1441 } else if (src.regClass() == v1) {
1442 emit_vop1_instruction(ctx, instr, aco_opcode::v_ffbl_b32, dst);
1443 } else if (src.regClass() == s2) {
1444 bld.sop1(aco_opcode::s_ff1_i32_b64, Definition(dst), src);
1445 } else {
1446 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1447 nir_print_instr(&instr->instr, stderr);
1448 fprintf(stderr, "\n");
1449 }
1450 break;
1451 }
1452 case nir_op_ufind_msb:
1453 case nir_op_ifind_msb: {
1454 Temp src = get_alu_src(ctx, instr->src[0]);
1455 if (src.regClass() == s1 || src.regClass() == s2) {
1456 aco_opcode op = src.regClass() == s2 ?
1457 (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b64 : aco_opcode::s_flbit_i32_i64) :
1458 (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b32 : aco_opcode::s_flbit_i32);
1459 Temp msb_rev = bld.sop1(op, bld.def(s1), src);
1460
1461 Builder::Result sub = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
1462 Operand(src.size() * 32u - 1u), msb_rev);
1463 Temp msb = sub.def(0).getTemp();
1464 Temp carry = sub.def(1).getTemp();
1465
1466 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand((uint32_t)-1), msb, bld.scc(carry));
1467 } else if (src.regClass() == v1) {
1468 aco_opcode op = instr->op == nir_op_ufind_msb ? aco_opcode::v_ffbh_u32 : aco_opcode::v_ffbh_i32;
1469 Temp msb_rev = bld.tmp(v1);
1470 emit_vop1_instruction(ctx, instr, op, msb_rev);
1471 Temp msb = bld.tmp(v1);
1472 Temp carry = bld.vsub32(Definition(msb), Operand(31u), Operand(msb_rev), true).def(1).getTemp();
1473 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), msb, Operand((uint32_t)-1), carry);
1474 } else {
1475 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1476 nir_print_instr(&instr->instr, stderr);
1477 fprintf(stderr, "\n");
1478 }
1479 break;
1480 }
1481 case nir_op_bitfield_reverse: {
1482 if (dst.regClass() == s1) {
1483 bld.sop1(aco_opcode::s_brev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1484 } else if (dst.regClass() == v1) {
1485 bld.vop1(aco_opcode::v_bfrev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1486 } else {
1487 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1488 nir_print_instr(&instr->instr, stderr);
1489 fprintf(stderr, "\n");
1490 }
1491 break;
1492 }
1493 case nir_op_iadd: {
1494 if (dst.regClass() == s1) {
1495 emit_sop2_instruction(ctx, instr, aco_opcode::s_add_u32, dst, true);
1496 break;
1497 }
1498
1499 Temp src0 = get_alu_src(ctx, instr->src[0]);
1500 Temp src1 = get_alu_src(ctx, instr->src[1]);
1501 if (dst.regClass() == v1) {
1502 bld.vadd32(Definition(dst), Operand(src0), Operand(src1));
1503 break;
1504 }
1505
1506 assert(src0.size() == 2 && src1.size() == 2);
1507 Temp src00 = bld.tmp(src0.type(), 1);
1508 Temp src01 = bld.tmp(dst.type(), 1);
1509 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1510 Temp src10 = bld.tmp(src1.type(), 1);
1511 Temp src11 = bld.tmp(dst.type(), 1);
1512 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1513
1514 if (dst.regClass() == s2) {
1515 Temp carry = bld.tmp(s1);
1516 Temp dst0 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1517 Temp dst1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), src01, src11, bld.scc(carry));
1518 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1519 } else if (dst.regClass() == v2) {
1520 Temp dst0 = bld.tmp(v1);
1521 Temp carry = bld.vadd32(Definition(dst0), src00, src10, true).def(1).getTemp();
1522 Temp dst1 = bld.vadd32(bld.def(v1), src01, src11, false, carry);
1523 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1524 } else {
1525 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1526 nir_print_instr(&instr->instr, stderr);
1527 fprintf(stderr, "\n");
1528 }
1529 break;
1530 }
1531 case nir_op_uadd_sat: {
1532 Temp src0 = get_alu_src(ctx, instr->src[0]);
1533 Temp src1 = get_alu_src(ctx, instr->src[1]);
1534 if (dst.regClass() == s1) {
1535 Temp tmp = bld.tmp(s1), carry = bld.tmp(s1);
1536 bld.sop2(aco_opcode::s_add_u32, Definition(tmp), bld.scc(Definition(carry)),
1537 src0, src1);
1538 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand((uint32_t) -1), tmp, bld.scc(carry));
1539 } else if (dst.regClass() == v1) {
1540 if (ctx->options->chip_class >= GFX9) {
1541 aco_ptr<VOP3A_instruction> add{create_instruction<VOP3A_instruction>(aco_opcode::v_add_u32, asVOP3(Format::VOP2), 2, 1)};
1542 add->operands[0] = Operand(src0);
1543 add->operands[1] = Operand(src1);
1544 add->definitions[0] = Definition(dst);
1545 add->clamp = 1;
1546 ctx->block->instructions.emplace_back(std::move(add));
1547 } else {
1548 if (src1.regClass() != v1)
1549 std::swap(src0, src1);
1550 assert(src1.regClass() == v1);
1551 Temp tmp = bld.tmp(v1);
1552 Temp carry = bld.vadd32(Definition(tmp), src0, src1, true).def(1).getTemp();
1553 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), tmp, Operand((uint32_t) -1), carry);
1554 }
1555 } else {
1556 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1557 nir_print_instr(&instr->instr, stderr);
1558 fprintf(stderr, "\n");
1559 }
1560 break;
1561 }
1562 case nir_op_uadd_carry: {
1563 Temp src0 = get_alu_src(ctx, instr->src[0]);
1564 Temp src1 = get_alu_src(ctx, instr->src[1]);
1565 if (dst.regClass() == s1) {
1566 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
1567 break;
1568 }
1569 if (dst.regClass() == v1) {
1570 Temp carry = bld.vadd32(bld.def(v1), src0, src1, true).def(1).getTemp();
1571 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), carry);
1572 break;
1573 }
1574
1575 Temp src00 = bld.tmp(src0.type(), 1);
1576 Temp src01 = bld.tmp(dst.type(), 1);
1577 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1578 Temp src10 = bld.tmp(src1.type(), 1);
1579 Temp src11 = bld.tmp(dst.type(), 1);
1580 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1581 if (dst.regClass() == s2) {
1582 Temp carry = bld.tmp(s1);
1583 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1584 carry = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11, bld.scc(carry)).def(1).getTemp();
1585 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand(0u));
1586 } else if (dst.regClass() == v2) {
1587 Temp carry = bld.vadd32(bld.def(v1), src00, src10, true).def(1).getTemp();
1588 carry = bld.vadd32(bld.def(v1), src01, src11, true, carry).def(1).getTemp();
1589 carry = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), carry);
1590 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand(0u));
1591 } else {
1592 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1593 nir_print_instr(&instr->instr, stderr);
1594 fprintf(stderr, "\n");
1595 }
1596 break;
1597 }
1598 case nir_op_isub: {
1599 if (dst.regClass() == s1) {
1600 emit_sop2_instruction(ctx, instr, aco_opcode::s_sub_i32, dst, true);
1601 break;
1602 }
1603
1604 Temp src0 = get_alu_src(ctx, instr->src[0]);
1605 Temp src1 = get_alu_src(ctx, instr->src[1]);
1606 if (dst.regClass() == v1) {
1607 bld.vsub32(Definition(dst), src0, src1);
1608 break;
1609 }
1610
1611 Temp src00 = bld.tmp(src0.type(), 1);
1612 Temp src01 = bld.tmp(dst.type(), 1);
1613 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1614 Temp src10 = bld.tmp(src1.type(), 1);
1615 Temp src11 = bld.tmp(dst.type(), 1);
1616 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1617 if (dst.regClass() == s2) {
1618 Temp carry = bld.tmp(s1);
1619 Temp dst0 = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1620 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), src01, src11, carry);
1621 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1622 } else if (dst.regClass() == v2) {
1623 Temp lower = bld.tmp(v1);
1624 Temp borrow = bld.vsub32(Definition(lower), src00, src10, true).def(1).getTemp();
1625 Temp upper = bld.vsub32(bld.def(v1), src01, src11, false, borrow);
1626 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1627 } else {
1628 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1629 nir_print_instr(&instr->instr, stderr);
1630 fprintf(stderr, "\n");
1631 }
1632 break;
1633 }
1634 case nir_op_usub_borrow: {
1635 Temp src0 = get_alu_src(ctx, instr->src[0]);
1636 Temp src1 = get_alu_src(ctx, instr->src[1]);
1637 if (dst.regClass() == s1) {
1638 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
1639 break;
1640 } else if (dst.regClass() == v1) {
1641 Temp borrow = bld.vsub32(bld.def(v1), src0, src1, true).def(1).getTemp();
1642 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), borrow);
1643 break;
1644 }
1645
1646 Temp src00 = bld.tmp(src0.type(), 1);
1647 Temp src01 = bld.tmp(dst.type(), 1);
1648 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1649 Temp src10 = bld.tmp(src1.type(), 1);
1650 Temp src11 = bld.tmp(dst.type(), 1);
1651 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1652 if (dst.regClass() == s2) {
1653 Temp borrow = bld.tmp(s1);
1654 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), src00, src10);
1655 borrow = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11, bld.scc(borrow)).def(1).getTemp();
1656 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand(0u));
1657 } else if (dst.regClass() == v2) {
1658 Temp borrow = bld.vsub32(bld.def(v1), src00, src10, true).def(1).getTemp();
1659 borrow = bld.vsub32(bld.def(v1), src01, src11, true, Operand(borrow)).def(1).getTemp();
1660 borrow = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), borrow);
1661 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand(0u));
1662 } else {
1663 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1664 nir_print_instr(&instr->instr, stderr);
1665 fprintf(stderr, "\n");
1666 }
1667 break;
1668 }
1669 case nir_op_imul: {
1670 if (dst.regClass() == v1) {
1671 bld.vop3(aco_opcode::v_mul_lo_u32, Definition(dst),
1672 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1673 } else if (dst.regClass() == s1) {
1674 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_i32, dst, false);
1675 } else {
1676 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1677 nir_print_instr(&instr->instr, stderr);
1678 fprintf(stderr, "\n");
1679 }
1680 break;
1681 }
1682 case nir_op_umul_high: {
1683 if (dst.regClass() == v1) {
1684 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1685 } else if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
1686 bld.sop2(aco_opcode::s_mul_hi_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1687 } else if (dst.regClass() == s1) {
1688 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_u32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
1689 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1690 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
1691 } else {
1692 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1693 nir_print_instr(&instr->instr, stderr);
1694 fprintf(stderr, "\n");
1695 }
1696 break;
1697 }
1698 case nir_op_imul_high: {
1699 if (dst.regClass() == v1) {
1700 bld.vop3(aco_opcode::v_mul_hi_i32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1701 } else if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
1702 bld.sop2(aco_opcode::s_mul_hi_i32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1703 } else if (dst.regClass() == s1) {
1704 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
1705 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1706 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
1707 } else {
1708 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1709 nir_print_instr(&instr->instr, stderr);
1710 fprintf(stderr, "\n");
1711 }
1712 break;
1713 }
1714 case nir_op_fmul: {
1715 Temp src0 = get_alu_src(ctx, instr->src[0]);
1716 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1717 if (dst.regClass() == v2b) {
1718 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f16, dst, true);
1719 } else if (dst.regClass() == v1) {
1720 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f32, dst, true);
1721 } else if (dst.regClass() == v2) {
1722 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), src0, src1);
1723 } else {
1724 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1725 nir_print_instr(&instr->instr, stderr);
1726 fprintf(stderr, "\n");
1727 }
1728 break;
1729 }
1730 case nir_op_fadd: {
1731 Temp src0 = get_alu_src(ctx, instr->src[0]);
1732 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1733 if (dst.regClass() == v2b) {
1734 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f16, dst, true);
1735 } else if (dst.regClass() == v1) {
1736 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f32, dst, true);
1737 } else if (dst.regClass() == v2) {
1738 bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, src1);
1739 } else {
1740 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1741 nir_print_instr(&instr->instr, stderr);
1742 fprintf(stderr, "\n");
1743 }
1744 break;
1745 }
1746 case nir_op_fsub: {
1747 Temp src0 = get_alu_src(ctx, instr->src[0]);
1748 Temp src1 = get_alu_src(ctx, instr->src[1]);
1749 if (dst.regClass() == v2b) {
1750 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
1751 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f16, dst, false);
1752 else
1753 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f16, dst, true);
1754 } else if (dst.regClass() == v1) {
1755 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
1756 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f32, dst, false);
1757 else
1758 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f32, dst, true);
1759 } else if (dst.regClass() == v2) {
1760 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst),
1761 as_vgpr(ctx, src0), as_vgpr(ctx, src1));
1762 VOP3A_instruction* sub = static_cast<VOP3A_instruction*>(add);
1763 sub->neg[1] = true;
1764 } else {
1765 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1766 nir_print_instr(&instr->instr, stderr);
1767 fprintf(stderr, "\n");
1768 }
1769 break;
1770 }
1771 case nir_op_fmax: {
1772 Temp src0 = get_alu_src(ctx, instr->src[0]);
1773 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1774 if (dst.regClass() == v2b) {
1775 // TODO: check fp_mode.must_flush_denorms16_64
1776 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f16, dst, true);
1777 } else if (dst.regClass() == v1) {
1778 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
1779 } else if (dst.regClass() == v2) {
1780 if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
1781 Temp tmp = bld.vop3(aco_opcode::v_max_f64, bld.def(v2), src0, src1);
1782 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
1783 } else {
1784 bld.vop3(aco_opcode::v_max_f64, Definition(dst), src0, src1);
1785 }
1786 } else {
1787 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1788 nir_print_instr(&instr->instr, stderr);
1789 fprintf(stderr, "\n");
1790 }
1791 break;
1792 }
1793 case nir_op_fmin: {
1794 Temp src0 = get_alu_src(ctx, instr->src[0]);
1795 Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
1796 if (dst.regClass() == v2b) {
1797 // TODO: check fp_mode.must_flush_denorms16_64
1798 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f16, dst, true);
1799 } else if (dst.regClass() == v1) {
1800 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
1801 } else if (dst.regClass() == v2) {
1802 if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
1803 Temp tmp = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), src0, src1);
1804 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
1805 } else {
1806 bld.vop3(aco_opcode::v_min_f64, Definition(dst), src0, src1);
1807 }
1808 } else {
1809 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1810 nir_print_instr(&instr->instr, stderr);
1811 fprintf(stderr, "\n");
1812 }
1813 break;
1814 }
1815 case nir_op_fmax3: {
1816 if (dst.regClass() == v2b) {
1817 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f16, dst, false);
1818 } else if (dst.regClass() == v1) {
1819 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1820 } else {
1821 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1822 nir_print_instr(&instr->instr, stderr);
1823 fprintf(stderr, "\n");
1824 }
1825 break;
1826 }
1827 case nir_op_fmin3: {
1828 if (dst.regClass() == v2b) {
1829 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f16, dst, false);
1830 } else if (dst.regClass() == v1) {
1831 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1832 } else {
1833 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1834 nir_print_instr(&instr->instr, stderr);
1835 fprintf(stderr, "\n");
1836 }
1837 break;
1838 }
1839 case nir_op_fmed3: {
1840 if (dst.regClass() == v2b) {
1841 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f16, dst, false);
1842 } else if (dst.regClass() == v1) {
1843 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1844 } else {
1845 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1846 nir_print_instr(&instr->instr, stderr);
1847 fprintf(stderr, "\n");
1848 }
1849 break;
1850 }
1851 case nir_op_umax3: {
1852 if (dst.size() == 1) {
1853 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_u32, dst);
1854 } else {
1855 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1856 nir_print_instr(&instr->instr, stderr);
1857 fprintf(stderr, "\n");
1858 }
1859 break;
1860 }
1861 case nir_op_umin3: {
1862 if (dst.size() == 1) {
1863 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_u32, dst);
1864 } else {
1865 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1866 nir_print_instr(&instr->instr, stderr);
1867 fprintf(stderr, "\n");
1868 }
1869 break;
1870 }
1871 case nir_op_umed3: {
1872 if (dst.size() == 1) {
1873 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_u32, dst);
1874 } else {
1875 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1876 nir_print_instr(&instr->instr, stderr);
1877 fprintf(stderr, "\n");
1878 }
1879 break;
1880 }
1881 case nir_op_imax3: {
1882 if (dst.size() == 1) {
1883 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_i32, dst);
1884 } else {
1885 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1886 nir_print_instr(&instr->instr, stderr);
1887 fprintf(stderr, "\n");
1888 }
1889 break;
1890 }
1891 case nir_op_imin3: {
1892 if (dst.size() == 1) {
1893 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_i32, dst);
1894 } else {
1895 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1896 nir_print_instr(&instr->instr, stderr);
1897 fprintf(stderr, "\n");
1898 }
1899 break;
1900 }
1901 case nir_op_imed3: {
1902 if (dst.size() == 1) {
1903 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_i32, dst);
1904 } else {
1905 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1906 nir_print_instr(&instr->instr, stderr);
1907 fprintf(stderr, "\n");
1908 }
1909 break;
1910 }
1911 case nir_op_cube_face_coord: {
1912 Temp in = get_alu_src(ctx, instr->src[0], 3);
1913 Temp src[3] = { emit_extract_vector(ctx, in, 0, v1),
1914 emit_extract_vector(ctx, in, 1, v1),
1915 emit_extract_vector(ctx, in, 2, v1) };
1916 Temp ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), src[0], src[1], src[2]);
1917 ma = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ma);
1918 Temp sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), src[0], src[1], src[2]);
1919 Temp tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), src[0], src[1], src[2]);
1920 sc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), sc, ma, Operand(0x3f000000u/*0.5*/));
1921 tc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), tc, ma, Operand(0x3f000000u/*0.5*/));
1922 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), sc, tc);
1923 break;
1924 }
1925 case nir_op_cube_face_index: {
1926 Temp in = get_alu_src(ctx, instr->src[0], 3);
1927 Temp src[3] = { emit_extract_vector(ctx, in, 0, v1),
1928 emit_extract_vector(ctx, in, 1, v1),
1929 emit_extract_vector(ctx, in, 2, v1) };
1930 bld.vop3(aco_opcode::v_cubeid_f32, Definition(dst), src[0], src[1], src[2]);
1931 break;
1932 }
1933 case nir_op_bcsel: {
1934 emit_bcsel(ctx, instr, dst);
1935 break;
1936 }
1937 case nir_op_frsq: {
1938 Temp src = get_alu_src(ctx, instr->src[0]);
1939 if (dst.regClass() == v2b) {
1940 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f16, dst);
1941 } else if (dst.regClass() == v1) {
1942 emit_rsq(ctx, bld, Definition(dst), src);
1943 } else if (dst.regClass() == v2) {
1944 /* Lowered at NIR level for precision reasons. */
1945 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
1946 } else {
1947 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1948 nir_print_instr(&instr->instr, stderr);
1949 fprintf(stderr, "\n");
1950 }
1951 break;
1952 }
1953 case nir_op_fneg: {
1954 Temp src = get_alu_src(ctx, instr->src[0]);
1955 if (dst.regClass() == v2b) {
1956 if (ctx->block->fp_mode.must_flush_denorms16_64)
1957 src = bld.vop2(aco_opcode::v_mul_f16, bld.def(v2b), Operand((uint16_t)0x3C00), as_vgpr(ctx, src));
1958 bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x8000u), as_vgpr(ctx, src));
1959 } else if (dst.regClass() == v1) {
1960 if (ctx->block->fp_mode.must_flush_denorms32)
1961 src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
1962 bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x80000000u), as_vgpr(ctx, src));
1963 } else if (dst.regClass() == v2) {
1964 if (ctx->block->fp_mode.must_flush_denorms16_64)
1965 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
1966 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
1967 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
1968 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand(0x80000000u), upper);
1969 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1970 } else {
1971 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1972 nir_print_instr(&instr->instr, stderr);
1973 fprintf(stderr, "\n");
1974 }
1975 break;
1976 }
1977 case nir_op_fabs: {
1978 Temp src = get_alu_src(ctx, instr->src[0]);
1979 if (dst.regClass() == v2b) {
1980 if (ctx->block->fp_mode.must_flush_denorms16_64)
1981 src = bld.vop2(aco_opcode::v_mul_f16, bld.def(v2b), Operand((uint16_t)0x3C00), as_vgpr(ctx, src));
1982 bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFu), as_vgpr(ctx, src));
1983 } else if (dst.regClass() == v1) {
1984 if (ctx->block->fp_mode.must_flush_denorms32)
1985 src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
1986 bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFFFFFu), as_vgpr(ctx, src));
1987 } else if (dst.regClass() == v2) {
1988 if (ctx->block->fp_mode.must_flush_denorms16_64)
1989 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
1990 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
1991 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
1992 upper = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7FFFFFFFu), upper);
1993 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1994 } else {
1995 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1996 nir_print_instr(&instr->instr, stderr);
1997 fprintf(stderr, "\n");
1998 }
1999 break;
2000 }
2001 case nir_op_fsat: {
2002 Temp src = get_alu_src(ctx, instr->src[0]);
2003 if (dst.regClass() == v2b) {
2004 bld.vop3(aco_opcode::v_med3_f16, Definition(dst), Operand((uint16_t)0u), Operand((uint16_t)0x3c00), src);
2005 } else if (dst.regClass() == v1) {
2006 bld.vop3(aco_opcode::v_med3_f32, Definition(dst), Operand(0u), Operand(0x3f800000u), src);
2007 /* apparently, it is not necessary to flush denorms if this instruction is used with these operands */
2008 // TODO: confirm that this holds under any circumstances
2009 } else if (dst.regClass() == v2) {
2010 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src, Operand(0u));
2011 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(add);
2012 vop3->clamp = true;
2013 } else {
2014 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2015 nir_print_instr(&instr->instr, stderr);
2016 fprintf(stderr, "\n");
2017 }
2018 break;
2019 }
2020 case nir_op_flog2: {
2021 Temp src = get_alu_src(ctx, instr->src[0]);
2022 if (dst.regClass() == v2b) {
2023 emit_vop1_instruction(ctx, instr, aco_opcode::v_log_f16, dst);
2024 } else if (dst.regClass() == v1) {
2025 emit_log2(ctx, bld, Definition(dst), src);
2026 } else {
2027 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2028 nir_print_instr(&instr->instr, stderr);
2029 fprintf(stderr, "\n");
2030 }
2031 break;
2032 }
2033 case nir_op_frcp: {
2034 Temp src = get_alu_src(ctx, instr->src[0]);
2035 if (dst.regClass() == v2b) {
2036 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f16, dst);
2037 } else if (dst.regClass() == v1) {
2038 emit_rcp(ctx, bld, Definition(dst), src);
2039 } else if (dst.regClass() == v2) {
2040 /* Lowered at NIR level for precision reasons. */
2041 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
2042 } else {
2043 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2044 nir_print_instr(&instr->instr, stderr);
2045 fprintf(stderr, "\n");
2046 }
2047 break;
2048 }
2049 case nir_op_fexp2: {
2050 if (dst.regClass() == v2b) {
2051 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f16, dst);
2052 } else if (dst.regClass() == v1) {
2053 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f32, dst);
2054 } else {
2055 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2056 nir_print_instr(&instr->instr, stderr);
2057 fprintf(stderr, "\n");
2058 }
2059 break;
2060 }
2061 case nir_op_fsqrt: {
2062 Temp src = get_alu_src(ctx, instr->src[0]);
2063 if (dst.regClass() == v2b) {
2064 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f16, dst);
2065 } else if (dst.regClass() == v1) {
2066 emit_sqrt(ctx, bld, Definition(dst), src);
2067 } else if (dst.regClass() == v2) {
2068 /* Lowered at NIR level for precision reasons. */
2069 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
2070 } else {
2071 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2072 nir_print_instr(&instr->instr, stderr);
2073 fprintf(stderr, "\n");
2074 }
2075 break;
2076 }
2077 case nir_op_ffract: {
2078 if (dst.regClass() == v2b) {
2079 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f16, dst);
2080 } else if (dst.regClass() == v1) {
2081 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f32, dst);
2082 } else if (dst.regClass() == v2) {
2083 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f64, dst);
2084 } else {
2085 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2086 nir_print_instr(&instr->instr, stderr);
2087 fprintf(stderr, "\n");
2088 }
2089 break;
2090 }
2091 case nir_op_ffloor: {
2092 Temp src = get_alu_src(ctx, instr->src[0]);
2093 if (dst.regClass() == v2b) {
2094 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f16, dst);
2095 } else if (dst.regClass() == v1) {
2096 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst);
2097 } else if (dst.regClass() == v2) {
2098 emit_floor_f64(ctx, bld, Definition(dst), src);
2099 } else {
2100 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2101 nir_print_instr(&instr->instr, stderr);
2102 fprintf(stderr, "\n");
2103 }
2104 break;
2105 }
2106 case nir_op_fceil: {
2107 Temp src0 = get_alu_src(ctx, instr->src[0]);
2108 if (dst.regClass() == v2b) {
2109 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f16, dst);
2110 } else if (dst.regClass() == v1) {
2111 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f32, dst);
2112 } else if (dst.regClass() == v2) {
2113 if (ctx->options->chip_class >= GFX7) {
2114 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f64, dst);
2115 } else {
2116 /* GFX6 doesn't support V_CEIL_F64, lower it. */
2117 /* trunc = trunc(src0)
2118 * if (src0 > 0.0 && src0 != trunc)
2119 * trunc += 1.0
2120 */
2121 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src0);
2122 Temp tmp0 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.def(bld.lm), src0, Operand(0u));
2123 Temp tmp1 = bld.vopc(aco_opcode::v_cmp_lg_f64, bld.hint_vcc(bld.def(bld.lm)), src0, trunc);
2124 Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.hint_vcc(bld.def(s2)), bld.def(s1, scc), tmp0, tmp1);
2125 Temp add = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), bld.copy(bld.def(v1), Operand(0u)), bld.copy(bld.def(v1), Operand(0x3ff00000u)), cond);
2126 add = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), bld.copy(bld.def(v1), Operand(0u)), add);
2127 bld.vop3(aco_opcode::v_add_f64, Definition(dst), trunc, add);
2128 }
2129 } else {
2130 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2131 nir_print_instr(&instr->instr, stderr);
2132 fprintf(stderr, "\n");
2133 }
2134 break;
2135 }
2136 case nir_op_ftrunc: {
2137 Temp src = get_alu_src(ctx, instr->src[0]);
2138 if (dst.regClass() == v2b) {
2139 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f16, dst);
2140 } else if (dst.regClass() == v1) {
2141 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f32, dst);
2142 } else if (dst.regClass() == v2) {
2143 emit_trunc_f64(ctx, bld, Definition(dst), src);
2144 } else {
2145 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2146 nir_print_instr(&instr->instr, stderr);
2147 fprintf(stderr, "\n");
2148 }
2149 break;
2150 }
2151 case nir_op_fround_even: {
2152 Temp src0 = get_alu_src(ctx, instr->src[0]);
2153 if (dst.regClass() == v2b) {
2154 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f16, dst);
2155 } else if (dst.regClass() == v1) {
2156 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f32, dst);
2157 } else if (dst.regClass() == v2) {
2158 if (ctx->options->chip_class >= GFX7) {
2159 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f64, dst);
2160 } else {
2161 /* GFX6 doesn't support V_RNDNE_F64, lower it. */
2162 Temp src0_lo = bld.tmp(v1), src0_hi = bld.tmp(v1);
2163 bld.pseudo(aco_opcode::p_split_vector, Definition(src0_lo), Definition(src0_hi), src0);
2164
2165 Temp bitmask = bld.sop1(aco_opcode::s_brev_b32, bld.def(s1), bld.copy(bld.def(s1), Operand(-2u)));
2166 Temp bfi = bld.vop3(aco_opcode::v_bfi_b32, bld.def(v1), bitmask, bld.copy(bld.def(v1), Operand(0x43300000u)), as_vgpr(ctx, src0_hi));
2167 Temp tmp = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), src0, bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), bfi));
2168 Instruction *sub = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), tmp, bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), bfi));
2169 static_cast<VOP3A_instruction*>(sub)->neg[1] = true;
2170 tmp = sub->definitions[0].getTemp();
2171
2172 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(-1u), Operand(0x432fffffu));
2173 Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.hint_vcc(bld.def(bld.lm)), src0, v);
2174 static_cast<VOP3A_instruction*>(vop3)->abs[0] = true;
2175 Temp cond = vop3->definitions[0].getTemp();
2176
2177 Temp tmp_lo = bld.tmp(v1), tmp_hi = bld.tmp(v1);
2178 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp_lo), Definition(tmp_hi), tmp);
2179 Temp dst0 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_lo, as_vgpr(ctx, src0_lo), cond);
2180 Temp dst1 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_hi, as_vgpr(ctx, src0_hi), cond);
2181
2182 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
2183 }
2184 } else {
2185 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2186 nir_print_instr(&instr->instr, stderr);
2187 fprintf(stderr, "\n");
2188 }
2189 break;
2190 }
2191 case nir_op_fsin:
2192 case nir_op_fcos: {
2193 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2194 aco_ptr<Instruction> norm;
2195 if (dst.regClass() == v2b) {
2196 Temp half_pi = bld.copy(bld.def(s1), Operand(0x3118u));
2197 Temp tmp = bld.vop2(aco_opcode::v_mul_f16, bld.def(v1), half_pi, src);
2198 aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f16 : aco_opcode::v_cos_f16;
2199 bld.vop1(opcode, Definition(dst), tmp);
2200 } else if (dst.regClass() == v1) {
2201 Temp half_pi = bld.copy(bld.def(s1), Operand(0x3e22f983u));
2202 Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, src);
2203
2204 /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
2205 if (ctx->options->chip_class < GFX9)
2206 tmp = bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), tmp);
2207
2208 aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f32 : aco_opcode::v_cos_f32;
2209 bld.vop1(opcode, Definition(dst), tmp);
2210 } else {
2211 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2212 nir_print_instr(&instr->instr, stderr);
2213 fprintf(stderr, "\n");
2214 }
2215 break;
2216 }
2217 case nir_op_ldexp: {
2218 Temp src0 = get_alu_src(ctx, instr->src[0]);
2219 Temp src1 = get_alu_src(ctx, instr->src[1]);
2220 if (dst.regClass() == v2b) {
2221 emit_vop2_instruction(ctx, instr, aco_opcode::v_ldexp_f16, dst, false);
2222 } else if (dst.regClass() == v1) {
2223 bld.vop3(aco_opcode::v_ldexp_f32, Definition(dst), as_vgpr(ctx, src0), src1);
2224 } else if (dst.regClass() == v2) {
2225 bld.vop3(aco_opcode::v_ldexp_f64, Definition(dst), as_vgpr(ctx, src0), src1);
2226 } else {
2227 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2228 nir_print_instr(&instr->instr, stderr);
2229 fprintf(stderr, "\n");
2230 }
2231 break;
2232 }
2233 case nir_op_frexp_sig: {
2234 Temp src = get_alu_src(ctx, instr->src[0]);
2235 if (dst.regClass() == v2b) {
2236 bld.vop1(aco_opcode::v_frexp_mant_f16, Definition(dst), src);
2237 } else if (dst.regClass() == v1) {
2238 bld.vop1(aco_opcode::v_frexp_mant_f32, Definition(dst), src);
2239 } else if (dst.regClass() == v2) {
2240 bld.vop1(aco_opcode::v_frexp_mant_f64, Definition(dst), src);
2241 } else {
2242 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2243 nir_print_instr(&instr->instr, stderr);
2244 fprintf(stderr, "\n");
2245 }
2246 break;
2247 }
2248 case nir_op_frexp_exp: {
2249 Temp src = get_alu_src(ctx, instr->src[0]);
2250 if (instr->src[0].src.ssa->bit_size == 16) {
2251 Temp tmp = bld.vop1(aco_opcode::v_frexp_exp_i16_f16, bld.def(v1), src);
2252 tmp = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v1b), tmp, Operand(0u));
2253 convert_int(ctx, bld, tmp, 8, 32, true, dst);
2254 } else if (instr->src[0].src.ssa->bit_size == 32) {
2255 bld.vop1(aco_opcode::v_frexp_exp_i32_f32, Definition(dst), src);
2256 } else if (instr->src[0].src.ssa->bit_size == 64) {
2257 bld.vop1(aco_opcode::v_frexp_exp_i32_f64, Definition(dst), src);
2258 } else {
2259 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2260 nir_print_instr(&instr->instr, stderr);
2261 fprintf(stderr, "\n");
2262 }
2263 break;
2264 }
2265 case nir_op_fsign: {
2266 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2267 if (dst.regClass() == v2b) {
2268 Temp one = bld.copy(bld.def(v1), Operand(0x3c00u));
2269 Temp minus_one = bld.copy(bld.def(v1), Operand(0xbc00u));
2270 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2271 src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), one, src, cond);
2272 cond = bld.vopc(aco_opcode::v_cmp_le_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2273 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), minus_one, src, cond);
2274 } else if (dst.regClass() == v1) {
2275 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2276 src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0x3f800000u), src, cond);
2277 cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2278 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0xbf800000u), src, cond);
2279 } else if (dst.regClass() == v2) {
2280 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2281 Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u));
2282 Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond);
2283
2284 cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2285 tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0xBFF00000u));
2286 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, upper, cond);
2287
2288 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand(0u), upper);
2289 } else {
2290 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2291 nir_print_instr(&instr->instr, stderr);
2292 fprintf(stderr, "\n");
2293 }
2294 break;
2295 }
2296 case nir_op_f2f16:
2297 case nir_op_f2f16_rtne: {
2298 Temp src = get_alu_src(ctx, instr->src[0]);
2299 if (instr->src[0].src.ssa->bit_size == 64)
2300 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2301 if (instr->op == nir_op_f2f16_rtne && ctx->block->fp_mode.round16_64 != fp_round_ne)
2302 /* We emit s_round_mode/s_setreg_imm32 in lower_to_hw_instr to
2303 * keep value numbering and the scheduler simpler.
2304 */
2305 bld.vop1(aco_opcode::p_cvt_f16_f32_rtne, Definition(dst), src);
2306 else
2307 bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
2308 break;
2309 }
2310 case nir_op_f2f16_rtz: {
2311 Temp src = get_alu_src(ctx, instr->src[0]);
2312 if (instr->src[0].src.ssa->bit_size == 64)
2313 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2314 bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src, Operand(0u));
2315 break;
2316 }
2317 case nir_op_f2f32: {
2318 if (instr->src[0].src.ssa->bit_size == 16) {
2319 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, dst);
2320 } else if (instr->src[0].src.ssa->bit_size == 64) {
2321 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f64, dst);
2322 } else {
2323 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2324 nir_print_instr(&instr->instr, stderr);
2325 fprintf(stderr, "\n");
2326 }
2327 break;
2328 }
2329 case nir_op_f2f64: {
2330 Temp src = get_alu_src(ctx, instr->src[0]);
2331 if (instr