8bc0e58cf48ae49be837248b9a0be764f3c81dc3
[mesa.git] / src / amd / compiler / aco_instruction_selection.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include <algorithm>
27 #include <array>
28 #include <stack>
29 #include <map>
30
31 #include "ac_shader_util.h"
32 #include "aco_ir.h"
33 #include "aco_builder.h"
34 #include "aco_interface.h"
35 #include "aco_instruction_selection_setup.cpp"
36 #include "util/fast_idiv_by_const.h"
37
38 namespace aco {
39 namespace {
40
41 class loop_info_RAII {
42 isel_context* ctx;
43 unsigned header_idx_old;
44 Block* exit_old;
45 bool divergent_cont_old;
46 bool divergent_branch_old;
47 bool divergent_if_old;
48
49 public:
50 loop_info_RAII(isel_context* ctx, unsigned loop_header_idx, Block* loop_exit)
51 : ctx(ctx),
52 header_idx_old(ctx->cf_info.parent_loop.header_idx), exit_old(ctx->cf_info.parent_loop.exit),
53 divergent_cont_old(ctx->cf_info.parent_loop.has_divergent_continue),
54 divergent_branch_old(ctx->cf_info.parent_loop.has_divergent_branch),
55 divergent_if_old(ctx->cf_info.parent_if.is_divergent)
56 {
57 ctx->cf_info.parent_loop.header_idx = loop_header_idx;
58 ctx->cf_info.parent_loop.exit = loop_exit;
59 ctx->cf_info.parent_loop.has_divergent_continue = false;
60 ctx->cf_info.parent_loop.has_divergent_branch = false;
61 ctx->cf_info.parent_if.is_divergent = false;
62 ctx->cf_info.loop_nest_depth = ctx->cf_info.loop_nest_depth + 1;
63 }
64
65 ~loop_info_RAII()
66 {
67 ctx->cf_info.parent_loop.header_idx = header_idx_old;
68 ctx->cf_info.parent_loop.exit = exit_old;
69 ctx->cf_info.parent_loop.has_divergent_continue = divergent_cont_old;
70 ctx->cf_info.parent_loop.has_divergent_branch = divergent_branch_old;
71 ctx->cf_info.parent_if.is_divergent = divergent_if_old;
72 ctx->cf_info.loop_nest_depth = ctx->cf_info.loop_nest_depth - 1;
73 if (!ctx->cf_info.loop_nest_depth && !ctx->cf_info.parent_if.is_divergent)
74 ctx->cf_info.exec_potentially_empty_discard = false;
75 }
76 };
77
78 struct if_context {
79 Temp cond;
80
81 bool divergent_old;
82 bool exec_potentially_empty_discard_old;
83 bool exec_potentially_empty_break_old;
84 uint16_t exec_potentially_empty_break_depth_old;
85
86 unsigned BB_if_idx;
87 unsigned invert_idx;
88 bool uniform_has_then_branch;
89 bool then_branch_divergent;
90 Block BB_invert;
91 Block BB_endif;
92 };
93
94 static bool visit_cf_list(struct isel_context *ctx,
95 struct exec_list *list);
96
97 static void add_logical_edge(unsigned pred_idx, Block *succ)
98 {
99 succ->logical_preds.emplace_back(pred_idx);
100 }
101
102
103 static void add_linear_edge(unsigned pred_idx, Block *succ)
104 {
105 succ->linear_preds.emplace_back(pred_idx);
106 }
107
108 static void add_edge(unsigned pred_idx, Block *succ)
109 {
110 add_logical_edge(pred_idx, succ);
111 add_linear_edge(pred_idx, succ);
112 }
113
114 static void append_logical_start(Block *b)
115 {
116 Builder(NULL, b).pseudo(aco_opcode::p_logical_start);
117 }
118
119 static void append_logical_end(Block *b)
120 {
121 Builder(NULL, b).pseudo(aco_opcode::p_logical_end);
122 }
123
124 Temp get_ssa_temp(struct isel_context *ctx, nir_ssa_def *def)
125 {
126 assert(ctx->allocated[def->index].id());
127 return ctx->allocated[def->index];
128 }
129
130 Temp emit_mbcnt(isel_context *ctx, Definition dst,
131 Operand mask_lo = Operand((uint32_t) -1), Operand mask_hi = Operand((uint32_t) -1))
132 {
133 Builder bld(ctx->program, ctx->block);
134 Definition lo_def = ctx->program->wave_size == 32 ? dst : bld.def(v1);
135 Temp thread_id_lo = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, lo_def, mask_lo, Operand(0u));
136
137 if (ctx->program->wave_size == 32) {
138 return thread_id_lo;
139 } else {
140 Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
141 return thread_id_hi;
142 }
143 }
144
145 Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_needs_wqm = false)
146 {
147 Builder bld(ctx->program, ctx->block);
148
149 if (!dst.id())
150 dst = bld.tmp(src.regClass());
151
152 assert(src.size() == dst.size());
153
154 if (ctx->stage != fragment_fs) {
155 if (!dst.id())
156 return src;
157
158 bld.copy(Definition(dst), src);
159 return dst;
160 }
161
162 bld.pseudo(aco_opcode::p_wqm, Definition(dst), src);
163 ctx->program->needs_wqm |= program_needs_wqm;
164 return dst;
165 }
166
167 static Temp emit_bpermute(isel_context *ctx, Builder &bld, Temp index, Temp data)
168 {
169 if (index.regClass() == s1)
170 return bld.readlane(bld.def(s1), data, index);
171
172 Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
173
174 /* Currently not implemented on GFX6-7 */
175 assert(ctx->options->chip_class >= GFX8);
176
177 if (ctx->options->chip_class <= GFX9 || ctx->program->wave_size == 32) {
178 return bld.ds(aco_opcode::ds_bpermute_b32, bld.def(v1), index_x4, data);
179 }
180
181 /* GFX10, wave64 mode:
182 * The bpermute instruction is limited to half-wave operation, which means that it can't
183 * properly support subgroup shuffle like older generations (or wave32 mode), so we
184 * emulate it here.
185 */
186 if (!ctx->has_gfx10_wave64_bpermute) {
187 ctx->has_gfx10_wave64_bpermute = true;
188 ctx->program->config->num_shared_vgprs = 8; /* Shared VGPRs are allocated in groups of 8 */
189 ctx->program->vgpr_limit -= 4; /* We allocate 8 shared VGPRs, so we'll have 4 fewer normal VGPRs */
190 }
191
192 Temp lane_id = emit_mbcnt(ctx, bld.def(v1));
193 Temp lane_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), lane_id);
194 Temp index_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), index);
195 Temp cmp = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm, vcc), lane_is_hi, index_is_hi);
196
197 return bld.reduction(aco_opcode::p_wave64_bpermute, bld.def(v1), bld.def(s2), bld.def(s1, scc),
198 bld.vcc(cmp), Operand(v2.as_linear()), index_x4, data, gfx10_wave64_bpermute);
199 }
200
201 Temp as_vgpr(isel_context *ctx, Temp val)
202 {
203 if (val.type() == RegType::sgpr) {
204 Builder bld(ctx->program, ctx->block);
205 return bld.copy(bld.def(RegType::vgpr, val.size()), val);
206 }
207 assert(val.type() == RegType::vgpr);
208 return val;
209 }
210
211 //assumes a != 0xffffffff
212 void emit_v_div_u32(isel_context *ctx, Temp dst, Temp a, uint32_t b)
213 {
214 assert(b != 0);
215 Builder bld(ctx->program, ctx->block);
216
217 if (util_is_power_of_two_or_zero(b)) {
218 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand((uint32_t)util_logbase2(b)), a);
219 return;
220 }
221
222 util_fast_udiv_info info = util_compute_fast_udiv_info(b, 32, 32);
223
224 assert(info.multiplier <= 0xffffffff);
225
226 bool pre_shift = info.pre_shift != 0;
227 bool increment = info.increment != 0;
228 bool multiply = true;
229 bool post_shift = info.post_shift != 0;
230
231 if (!pre_shift && !increment && !multiply && !post_shift) {
232 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), a);
233 return;
234 }
235
236 Temp pre_shift_dst = a;
237 if (pre_shift) {
238 pre_shift_dst = (increment || multiply || post_shift) ? bld.tmp(v1) : dst;
239 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(pre_shift_dst), Operand((uint32_t)info.pre_shift), a);
240 }
241
242 Temp increment_dst = pre_shift_dst;
243 if (increment) {
244 increment_dst = (post_shift || multiply) ? bld.tmp(v1) : dst;
245 bld.vadd32(Definition(increment_dst), Operand((uint32_t) info.increment), pre_shift_dst);
246 }
247
248 Temp multiply_dst = increment_dst;
249 if (multiply) {
250 multiply_dst = post_shift ? bld.tmp(v1) : dst;
251 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(multiply_dst), increment_dst,
252 bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand((uint32_t)info.multiplier)));
253 }
254
255 if (post_shift) {
256 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(dst), Operand((uint32_t)info.post_shift), multiply_dst);
257 }
258 }
259
260 void emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, Temp dst)
261 {
262 Builder bld(ctx->program, ctx->block);
263 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(idx));
264 }
265
266
267 Temp emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, RegClass dst_rc)
268 {
269 /* no need to extract the whole vector */
270 if (src.regClass() == dst_rc) {
271 assert(idx == 0);
272 return src;
273 }
274
275 assert(src.bytes() > (idx * dst_rc.bytes()));
276 Builder bld(ctx->program, ctx->block);
277 auto it = ctx->allocated_vec.find(src.id());
278 if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) {
279 if (it->second[idx].regClass() == dst_rc) {
280 return it->second[idx];
281 } else {
282 assert(!dst_rc.is_subdword());
283 assert(dst_rc.type() == RegType::vgpr && it->second[idx].type() == RegType::sgpr);
284 return bld.copy(bld.def(dst_rc), it->second[idx]);
285 }
286 }
287
288 if (dst_rc.is_subdword())
289 src = as_vgpr(ctx, src);
290
291 if (src.bytes() == dst_rc.bytes()) {
292 assert(idx == 0);
293 return bld.copy(bld.def(dst_rc), src);
294 } else {
295 Temp dst = bld.tmp(dst_rc);
296 emit_extract_vector(ctx, src, idx, dst);
297 return dst;
298 }
299 }
300
301 void emit_split_vector(isel_context* ctx, Temp vec_src, unsigned num_components)
302 {
303 if (num_components == 1)
304 return;
305 if (ctx->allocated_vec.find(vec_src.id()) != ctx->allocated_vec.end())
306 return;
307 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, num_components)};
308 split->operands[0] = Operand(vec_src);
309 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
310 RegClass rc;
311 if (num_components > vec_src.size()) {
312 if (vec_src.type() == RegType::sgpr)
313 return;
314
315 /* sub-dword split */
316 assert(vec_src.type() == RegType::vgpr);
317 rc = RegClass(RegType::vgpr, vec_src.bytes() / num_components).as_subdword();
318 } else {
319 rc = RegClass(vec_src.type(), vec_src.size() / num_components);
320 }
321 for (unsigned i = 0; i < num_components; i++) {
322 elems[i] = {ctx->program->allocateId(), rc};
323 split->definitions[i] = Definition(elems[i]);
324 }
325 ctx->block->instructions.emplace_back(std::move(split));
326 ctx->allocated_vec.emplace(vec_src.id(), elems);
327 }
328
329 /* This vector expansion uses a mask to determine which elements in the new vector
330 * come from the original vector. The other elements are undefined. */
331 void expand_vector(isel_context* ctx, Temp vec_src, Temp dst, unsigned num_components, unsigned mask)
332 {
333 emit_split_vector(ctx, vec_src, util_bitcount(mask));
334
335 if (vec_src == dst)
336 return;
337
338 Builder bld(ctx->program, ctx->block);
339 if (num_components == 1) {
340 if (dst.type() == RegType::sgpr)
341 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec_src);
342 else
343 bld.copy(Definition(dst), vec_src);
344 return;
345 }
346
347 unsigned component_size = dst.size() / num_components;
348 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
349
350 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
351 vec->definitions[0] = Definition(dst);
352 unsigned k = 0;
353 for (unsigned i = 0; i < num_components; i++) {
354 if (mask & (1 << i)) {
355 Temp src = emit_extract_vector(ctx, vec_src, k++, RegClass(vec_src.type(), component_size));
356 if (dst.type() == RegType::sgpr)
357 src = bld.as_uniform(src);
358 vec->operands[i] = Operand(src);
359 } else {
360 vec->operands[i] = Operand(0u);
361 }
362 elems[i] = vec->operands[i].getTemp();
363 }
364 ctx->block->instructions.emplace_back(std::move(vec));
365 ctx->allocated_vec.emplace(dst.id(), elems);
366 }
367
368 /* adjust misaligned small bit size loads */
369 void byte_align_scalar(isel_context *ctx, Temp vec, Operand offset, Temp dst)
370 {
371 Builder bld(ctx->program, ctx->block);
372 Operand shift;
373 Temp select = Temp();
374 if (offset.isConstant()) {
375 assert(offset.constantValue() && offset.constantValue() < 4);
376 shift = Operand(offset.constantValue() * 8);
377 } else {
378 /* bit_offset = 8 * (offset & 0x3) */
379 Temp tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), offset, Operand(3u));
380 select = bld.tmp(s1);
381 shift = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.scc(Definition(select)), tmp, Operand(3u));
382 }
383
384 if (vec.size() == 1) {
385 bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), vec, shift);
386 } else if (vec.size() == 2) {
387 Temp tmp = dst.size() == 2 ? dst : bld.tmp(s2);
388 bld.sop2(aco_opcode::s_lshr_b64, Definition(tmp), bld.def(s1, scc), vec, shift);
389 if (tmp == dst)
390 emit_split_vector(ctx, dst, 2);
391 else
392 emit_extract_vector(ctx, tmp, 0, dst);
393 } else if (vec.size() == 4) {
394 Temp lo = bld.tmp(s2), hi = bld.tmp(s2);
395 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
396 hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand(0u));
397 if (select != Temp())
398 hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), select);
399 lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
400 Temp mid = bld.tmp(s1);
401 lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
402 hi = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), hi, shift);
403 mid = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), hi, mid);
404 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, mid);
405 emit_split_vector(ctx, dst, 2);
406 }
407 }
408
409 /* this function trims subdword vectors:
410 * if dst is vgpr - split the src and create a shrunk version according to the mask.
411 * if dst is sgpr - split the src, but move the original to sgpr. */
412 void trim_subdword_vector(isel_context *ctx, Temp vec_src, Temp dst, unsigned num_components, unsigned mask)
413 {
414 assert(vec_src.type() == RegType::vgpr);
415 emit_split_vector(ctx, vec_src, num_components);
416
417 Builder bld(ctx->program, ctx->block);
418 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
419 unsigned component_size = vec_src.bytes() / num_components;
420 RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
421
422 unsigned k = 0;
423 for (unsigned i = 0; i < num_components; i++) {
424 if (mask & (1 << i))
425 elems[k++] = emit_extract_vector(ctx, vec_src, i, rc);
426 }
427
428 if (dst.type() == RegType::vgpr) {
429 assert(dst.bytes() == k * component_size);
430 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, k, 1)};
431 for (unsigned i = 0; i < k; i++)
432 vec->operands[i] = Operand(elems[i]);
433 vec->definitions[0] = Definition(dst);
434 bld.insert(std::move(vec));
435 } else {
436 // TODO: alignbyte if mask doesn't start with 1?
437 assert(mask & 1);
438 assert(dst.size() == vec_src.size());
439 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec_src);
440 }
441 ctx->allocated_vec.emplace(dst.id(), elems);
442 }
443
444 Temp bool_to_vector_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s2))
445 {
446 Builder bld(ctx->program, ctx->block);
447 if (!dst.id())
448 dst = bld.tmp(bld.lm);
449
450 assert(val.regClass() == s1);
451 assert(dst.regClass() == bld.lm);
452
453 return bld.sop2(Builder::s_cselect, Definition(dst), Operand((uint32_t) -1), Operand(0u), bld.scc(val));
454 }
455
456 Temp bool_to_scalar_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s1))
457 {
458 Builder bld(ctx->program, ctx->block);
459 if (!dst.id())
460 dst = bld.tmp(s1);
461
462 assert(val.regClass() == bld.lm);
463 assert(dst.regClass() == s1);
464
465 /* if we're currently in WQM mode, ensure that the source is also computed in WQM */
466 Temp tmp = bld.tmp(s1);
467 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.scc(Definition(tmp)), val, Operand(exec, bld.lm));
468 return emit_wqm(ctx, tmp, dst);
469 }
470
471 Temp get_alu_src(struct isel_context *ctx, nir_alu_src src, unsigned size=1)
472 {
473 if (src.src.ssa->num_components == 1 && src.swizzle[0] == 0 && size == 1)
474 return get_ssa_temp(ctx, src.src.ssa);
475
476 if (src.src.ssa->num_components == size) {
477 bool identity_swizzle = true;
478 for (unsigned i = 0; identity_swizzle && i < size; i++) {
479 if (src.swizzle[i] != i)
480 identity_swizzle = false;
481 }
482 if (identity_swizzle)
483 return get_ssa_temp(ctx, src.src.ssa);
484 }
485
486 Temp vec = get_ssa_temp(ctx, src.src.ssa);
487 unsigned elem_size = vec.bytes() / src.src.ssa->num_components;
488 assert(elem_size > 0);
489 assert(vec.bytes() % elem_size == 0);
490
491 if (elem_size < 4 && vec.type() == RegType::sgpr) {
492 assert(src.src.ssa->bit_size == 8 || src.src.ssa->bit_size == 16);
493 assert(size == 1);
494 unsigned swizzle = src.swizzle[0];
495 if (vec.size() > 1) {
496 assert(src.src.ssa->bit_size == 16);
497 vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
498 swizzle = swizzle & 1;
499 }
500 if (swizzle == 0)
501 return vec;
502
503 Temp dst{ctx->program->allocateId(), s1};
504 aco_ptr<SOP2_instruction> bfe{create_instruction<SOP2_instruction>(aco_opcode::s_bfe_u32, Format::SOP2, 2, 1)};
505 bfe->operands[0] = Operand(vec);
506 bfe->operands[1] = Operand(uint32_t((src.src.ssa->bit_size << 16) | (src.src.ssa->bit_size * swizzle)));
507 bfe->definitions[0] = Definition(dst);
508 ctx->block->instructions.emplace_back(std::move(bfe));
509 return dst;
510 }
511
512 RegClass elem_rc = elem_size < 4 ? RegClass(vec.type(), elem_size).as_subdword() : RegClass(vec.type(), elem_size / 4);
513 if (size == 1) {
514 return emit_extract_vector(ctx, vec, src.swizzle[0], elem_rc);
515 } else {
516 assert(size <= 4);
517 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
518 aco_ptr<Pseudo_instruction> vec_instr{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, size, 1)};
519 for (unsigned i = 0; i < size; ++i) {
520 elems[i] = emit_extract_vector(ctx, vec, src.swizzle[i], elem_rc);
521 vec_instr->operands[i] = Operand{elems[i]};
522 }
523 Temp dst{ctx->program->allocateId(), RegClass(vec.type(), elem_size * size / 4)};
524 vec_instr->definitions[0] = Definition(dst);
525 ctx->block->instructions.emplace_back(std::move(vec_instr));
526 ctx->allocated_vec.emplace(dst.id(), elems);
527 return dst;
528 }
529 }
530
531 Temp convert_pointer_to_64_bit(isel_context *ctx, Temp ptr)
532 {
533 if (ptr.size() == 2)
534 return ptr;
535 Builder bld(ctx->program, ctx->block);
536 if (ptr.type() == RegType::vgpr)
537 ptr = bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), ptr);
538 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s2),
539 ptr, Operand((unsigned)ctx->options->address32_hi));
540 }
541
542 void emit_sop2_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst, bool writes_scc)
543 {
544 aco_ptr<SOP2_instruction> sop2{create_instruction<SOP2_instruction>(op, Format::SOP2, 2, writes_scc ? 2 : 1)};
545 sop2->operands[0] = Operand(get_alu_src(ctx, instr->src[0]));
546 sop2->operands[1] = Operand(get_alu_src(ctx, instr->src[1]));
547 sop2->definitions[0] = Definition(dst);
548 if (writes_scc)
549 sop2->definitions[1] = Definition(ctx->program->allocateId(), scc, s1);
550 ctx->block->instructions.emplace_back(std::move(sop2));
551 }
552
553 void emit_vop2_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
554 bool commutative, bool swap_srcs=false, bool flush_denorms = false)
555 {
556 Builder bld(ctx->program, ctx->block);
557 Temp src0 = get_alu_src(ctx, instr->src[swap_srcs ? 1 : 0]);
558 Temp src1 = get_alu_src(ctx, instr->src[swap_srcs ? 0 : 1]);
559 if (src1.type() == RegType::sgpr) {
560 if (commutative && src0.type() == RegType::vgpr) {
561 Temp t = src0;
562 src0 = src1;
563 src1 = t;
564 } else if (src0.type() == RegType::vgpr &&
565 op != aco_opcode::v_madmk_f32 &&
566 op != aco_opcode::v_madak_f32 &&
567 op != aco_opcode::v_madmk_f16 &&
568 op != aco_opcode::v_madak_f16) {
569 /* If the instruction is not commutative, we emit a VOP3A instruction */
570 bld.vop2_e64(op, Definition(dst), src0, src1);
571 return;
572 } else {
573 src1 = bld.copy(bld.def(RegType::vgpr, src1.size()), src1); //TODO: as_vgpr
574 }
575 }
576
577 if (flush_denorms && ctx->program->chip_class < GFX9) {
578 assert(dst.size() == 1);
579 Temp tmp = bld.vop2(op, bld.def(v1), src0, src1);
580 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand(0x3f800000u), tmp);
581 } else {
582 bld.vop2(op, Definition(dst), src0, src1);
583 }
584 }
585
586 void emit_vop3a_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
587 bool flush_denorms = false)
588 {
589 Temp src0 = get_alu_src(ctx, instr->src[0]);
590 Temp src1 = get_alu_src(ctx, instr->src[1]);
591 Temp src2 = get_alu_src(ctx, instr->src[2]);
592
593 /* ensure that the instruction has at most 1 sgpr operand
594 * The optimizer will inline constants for us */
595 if (src0.type() == RegType::sgpr && src1.type() == RegType::sgpr)
596 src0 = as_vgpr(ctx, src0);
597 if (src1.type() == RegType::sgpr && src2.type() == RegType::sgpr)
598 src1 = as_vgpr(ctx, src1);
599 if (src2.type() == RegType::sgpr && src0.type() == RegType::sgpr)
600 src2 = as_vgpr(ctx, src2);
601
602 Builder bld(ctx->program, ctx->block);
603 if (flush_denorms && ctx->program->chip_class < GFX9) {
604 assert(dst.size() == 1);
605 Temp tmp = bld.vop3(op, Definition(dst), src0, src1, src2);
606 bld.vop2(aco_opcode::v_mul_f32, Definition(dst), Operand(0x3f800000u), tmp);
607 } else {
608 bld.vop3(op, Definition(dst), src0, src1, src2);
609 }
610 }
611
612 void emit_vop1_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
613 {
614 Builder bld(ctx->program, ctx->block);
615 bld.vop1(op, Definition(dst), get_alu_src(ctx, instr->src[0]));
616 }
617
618 void emit_vopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
619 {
620 Temp src0 = get_alu_src(ctx, instr->src[0]);
621 Temp src1 = get_alu_src(ctx, instr->src[1]);
622 assert(src0.size() == src1.size());
623
624 aco_ptr<Instruction> vopc;
625 if (src1.type() == RegType::sgpr) {
626 if (src0.type() == RegType::vgpr) {
627 /* to swap the operands, we might also have to change the opcode */
628 switch (op) {
629 case aco_opcode::v_cmp_lt_f32:
630 op = aco_opcode::v_cmp_gt_f32;
631 break;
632 case aco_opcode::v_cmp_ge_f32:
633 op = aco_opcode::v_cmp_le_f32;
634 break;
635 case aco_opcode::v_cmp_lt_i32:
636 op = aco_opcode::v_cmp_gt_i32;
637 break;
638 case aco_opcode::v_cmp_ge_i32:
639 op = aco_opcode::v_cmp_le_i32;
640 break;
641 case aco_opcode::v_cmp_lt_u32:
642 op = aco_opcode::v_cmp_gt_u32;
643 break;
644 case aco_opcode::v_cmp_ge_u32:
645 op = aco_opcode::v_cmp_le_u32;
646 break;
647 case aco_opcode::v_cmp_lt_f64:
648 op = aco_opcode::v_cmp_gt_f64;
649 break;
650 case aco_opcode::v_cmp_ge_f64:
651 op = aco_opcode::v_cmp_le_f64;
652 break;
653 case aco_opcode::v_cmp_lt_i64:
654 op = aco_opcode::v_cmp_gt_i64;
655 break;
656 case aco_opcode::v_cmp_ge_i64:
657 op = aco_opcode::v_cmp_le_i64;
658 break;
659 case aco_opcode::v_cmp_lt_u64:
660 op = aco_opcode::v_cmp_gt_u64;
661 break;
662 case aco_opcode::v_cmp_ge_u64:
663 op = aco_opcode::v_cmp_le_u64;
664 break;
665 default: /* eq and ne are commutative */
666 break;
667 }
668 Temp t = src0;
669 src0 = src1;
670 src1 = t;
671 } else {
672 src1 = as_vgpr(ctx, src1);
673 }
674 }
675
676 Builder bld(ctx->program, ctx->block);
677 bld.vopc(op, bld.hint_vcc(Definition(dst)), src0, src1);
678 }
679
680 void emit_sopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
681 {
682 Temp src0 = get_alu_src(ctx, instr->src[0]);
683 Temp src1 = get_alu_src(ctx, instr->src[1]);
684 Builder bld(ctx->program, ctx->block);
685
686 assert(dst.regClass() == bld.lm);
687 assert(src0.type() == RegType::sgpr);
688 assert(src1.type() == RegType::sgpr);
689 assert(src0.regClass() == src1.regClass());
690
691 /* Emit the SALU comparison instruction */
692 Temp cmp = bld.sopc(op, bld.scc(bld.def(s1)), src0, src1);
693 /* Turn the result into a per-lane bool */
694 bool_to_vector_condition(ctx, cmp, dst);
695 }
696
697 void emit_comparison(isel_context *ctx, nir_alu_instr *instr, Temp dst,
698 aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes, aco_opcode s64_op = aco_opcode::num_opcodes)
699 {
700 aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op : s32_op;
701 aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op : v32_op;
702 bool divergent_vals = ctx->divergent_vals[instr->dest.dest.ssa.index];
703 bool use_valu = s_op == aco_opcode::num_opcodes ||
704 divergent_vals ||
705 ctx->allocated[instr->src[0].src.ssa->index].type() == RegType::vgpr ||
706 ctx->allocated[instr->src[1].src.ssa->index].type() == RegType::vgpr;
707 aco_opcode op = use_valu ? v_op : s_op;
708 assert(op != aco_opcode::num_opcodes);
709 assert(dst.regClass() == ctx->program->lane_mask);
710
711 if (use_valu)
712 emit_vopc_instruction(ctx, instr, op, dst);
713 else
714 emit_sopc_instruction(ctx, instr, op, dst);
715 }
716
717 void emit_boolean_logic(isel_context *ctx, nir_alu_instr *instr, Builder::WaveSpecificOpcode op, Temp dst)
718 {
719 Builder bld(ctx->program, ctx->block);
720 Temp src0 = get_alu_src(ctx, instr->src[0]);
721 Temp src1 = get_alu_src(ctx, instr->src[1]);
722
723 assert(dst.regClass() == bld.lm);
724 assert(src0.regClass() == bld.lm);
725 assert(src1.regClass() == bld.lm);
726
727 bld.sop2(op, Definition(dst), bld.def(s1, scc), src0, src1);
728 }
729
730 void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst)
731 {
732 Builder bld(ctx->program, ctx->block);
733 Temp cond = get_alu_src(ctx, instr->src[0]);
734 Temp then = get_alu_src(ctx, instr->src[1]);
735 Temp els = get_alu_src(ctx, instr->src[2]);
736
737 assert(cond.regClass() == bld.lm);
738
739 if (dst.type() == RegType::vgpr) {
740 aco_ptr<Instruction> bcsel;
741 if (dst.size() == 1) {
742 then = as_vgpr(ctx, then);
743 els = as_vgpr(ctx, els);
744
745 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), els, then, cond);
746 } else if (dst.size() == 2) {
747 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
748 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), then);
749 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
750 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), els);
751
752 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, cond);
753 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, cond);
754
755 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
756 } else {
757 fprintf(stderr, "Unimplemented NIR instr bit size: ");
758 nir_print_instr(&instr->instr, stderr);
759 fprintf(stderr, "\n");
760 }
761 return;
762 }
763
764 if (instr->dest.dest.ssa.bit_size == 1) {
765 assert(dst.regClass() == bld.lm);
766 assert(then.regClass() == bld.lm);
767 assert(els.regClass() == bld.lm);
768 }
769
770 if (!ctx->divergent_vals[instr->src[0].src.ssa->index]) { /* uniform condition and values in sgpr */
771 if (dst.regClass() == s1 || dst.regClass() == s2) {
772 assert((then.regClass() == s1 || then.regClass() == s2) && els.regClass() == then.regClass());
773 assert(dst.size() == then.size());
774 aco_opcode op = dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64;
775 bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond)));
776 } else {
777 fprintf(stderr, "Unimplemented uniform bcsel bit size: ");
778 nir_print_instr(&instr->instr, stderr);
779 fprintf(stderr, "\n");
780 }
781 return;
782 }
783
784 /* divergent boolean bcsel
785 * this implements bcsel on bools: dst = s0 ? s1 : s2
786 * are going to be: dst = (s0 & s1) | (~s0 & s2) */
787 assert(instr->dest.dest.ssa.bit_size == 1);
788
789 if (cond.id() != then.id())
790 then = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), cond, then);
791
792 if (cond.id() == els.id())
793 bld.sop1(Builder::s_mov, Definition(dst), then);
794 else
795 bld.sop2(Builder::s_or, Definition(dst), bld.def(s1, scc), then,
796 bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), els, cond));
797 }
798
799 void emit_scaled_op(isel_context *ctx, Builder& bld, Definition dst, Temp val,
800 aco_opcode op, uint32_t undo)
801 {
802 /* multiply by 16777216 to handle denormals */
803 Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.hint_vcc(bld.def(bld.lm)),
804 as_vgpr(ctx, val), bld.copy(bld.def(v1), Operand((1u << 7) | (1u << 4))));
805 Temp scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x4b800000u), val);
806 scaled = bld.vop1(op, bld.def(v1), scaled);
807 scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(undo), scaled);
808
809 Temp not_scaled = bld.vop1(op, bld.def(v1), val);
810
811 bld.vop2(aco_opcode::v_cndmask_b32, dst, not_scaled, scaled, is_denormal);
812 }
813
814 void emit_rcp(isel_context *ctx, Builder& bld, Definition dst, Temp val)
815 {
816 if (ctx->block->fp_mode.denorm32 == 0) {
817 bld.vop1(aco_opcode::v_rcp_f32, dst, val);
818 return;
819 }
820
821 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rcp_f32, 0x4b800000u);
822 }
823
824 void emit_rsq(isel_context *ctx, Builder& bld, Definition dst, Temp val)
825 {
826 if (ctx->block->fp_mode.denorm32 == 0) {
827 bld.vop1(aco_opcode::v_rsq_f32, dst, val);
828 return;
829 }
830
831 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rsq_f32, 0x45800000u);
832 }
833
834 void emit_sqrt(isel_context *ctx, Builder& bld, Definition dst, Temp val)
835 {
836 if (ctx->block->fp_mode.denorm32 == 0) {
837 bld.vop1(aco_opcode::v_sqrt_f32, dst, val);
838 return;
839 }
840
841 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_sqrt_f32, 0x39800000u);
842 }
843
844 void emit_log2(isel_context *ctx, Builder& bld, Definition dst, Temp val)
845 {
846 if (ctx->block->fp_mode.denorm32 == 0) {
847 bld.vop1(aco_opcode::v_log_f32, dst, val);
848 return;
849 }
850
851 emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_log_f32, 0xc1c00000u);
852 }
853
854 Temp emit_trunc_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val)
855 {
856 if (ctx->options->chip_class >= GFX7)
857 return bld.vop1(aco_opcode::v_trunc_f64, Definition(dst), val);
858
859 /* GFX6 doesn't support V_TRUNC_F64, lower it. */
860 /* TODO: create more efficient code! */
861 if (val.type() == RegType::sgpr)
862 val = as_vgpr(ctx, val);
863
864 /* Split the input value. */
865 Temp val_lo = bld.tmp(v1), val_hi = bld.tmp(v1);
866 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
867
868 /* Extract the exponent and compute the unbiased value. */
869 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f64, bld.def(v1), val);
870
871 /* Extract the fractional part. */
872 Temp fract_mask = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(-1u), Operand(0x000fffffu));
873 fract_mask = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), fract_mask, exponent);
874
875 Temp fract_mask_lo = bld.tmp(v1), fract_mask_hi = bld.tmp(v1);
876 bld.pseudo(aco_opcode::p_split_vector, Definition(fract_mask_lo), Definition(fract_mask_hi), fract_mask);
877
878 Temp fract_lo = bld.tmp(v1), fract_hi = bld.tmp(v1);
879 Temp tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_lo);
880 fract_lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_lo, tmp);
881 tmp = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), fract_mask_hi);
882 fract_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_hi, tmp);
883
884 /* Get the sign bit. */
885 Temp sign = bld.vop2(aco_opcode::v_ashr_i32, bld.def(v1), Operand(31u), val_hi);
886
887 /* Decide the operation to apply depending on the unbiased exponent. */
888 Temp exp_lt0 = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)), exponent, Operand(0u));
889 Temp dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_lo, bld.copy(bld.def(v1), Operand(0u)), exp_lt0);
890 Temp dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), fract_hi, sign, exp_lt0);
891 Temp exp_gt51 = bld.vopc_e64(aco_opcode::v_cmp_gt_i32, bld.def(s2), exponent, Operand(51u));
892 dst_lo = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_lo, val_lo, exp_gt51);
893 dst_hi = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), dst_hi, val_hi, exp_gt51);
894
895 return bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst_lo, dst_hi);
896 }
897
898 Temp emit_floor_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val)
899 {
900 if (ctx->options->chip_class >= GFX7)
901 return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
902
903 /* GFX6 doesn't support V_FLOOR_F64, lower it. */
904 Temp src0 = as_vgpr(ctx, val);
905
906 Temp mask = bld.copy(bld.def(s1), Operand(3u)); /* isnan */
907 Temp min_val = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(-1u), Operand(0x3fefffffu));
908
909 Temp isnan = bld.vopc_e64(aco_opcode::v_cmp_class_f64, bld.hint_vcc(bld.def(bld.lm)), src0, mask);
910 Temp fract = bld.vop1(aco_opcode::v_fract_f64, bld.def(v2), src0);
911 Temp min = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), fract, min_val);
912
913 Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1);
914 bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), src0);
915 Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1);
916 bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), min);
917
918 Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, isnan);
919 Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, isnan);
920
921 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1);
922
923 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, v);
924 static_cast<VOP3A_instruction*>(add)->neg[1] = true;
925
926 return add->definitions[0].getTemp();
927 }
928
929 void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr)
930 {
931 if (!instr->dest.dest.is_ssa) {
932 fprintf(stderr, "nir alu dst not in ssa: ");
933 nir_print_instr(&instr->instr, stderr);
934 fprintf(stderr, "\n");
935 abort();
936 }
937 Builder bld(ctx->program, ctx->block);
938 Temp dst = get_ssa_temp(ctx, &instr->dest.dest.ssa);
939 switch(instr->op) {
940 case nir_op_vec2:
941 case nir_op_vec3:
942 case nir_op_vec4: {
943 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
944 unsigned num = instr->dest.dest.ssa.num_components;
945 for (unsigned i = 0; i < num; ++i)
946 elems[i] = get_alu_src(ctx, instr->src[i]);
947
948 if (instr->dest.dest.ssa.bit_size >= 32 || dst.type() == RegType::vgpr) {
949 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.dest.ssa.num_components, 1)};
950 for (unsigned i = 0; i < num; ++i)
951 vec->operands[i] = Operand{elems[i]};
952 vec->definitions[0] = Definition(dst);
953 ctx->block->instructions.emplace_back(std::move(vec));
954 ctx->allocated_vec.emplace(dst.id(), elems);
955 } else {
956 // TODO: that is a bit suboptimal..
957 Temp mask = bld.copy(bld.def(s1), Operand((1u << instr->dest.dest.ssa.bit_size) - 1));
958 for (unsigned i = 0; i < num - 1; ++i)
959 if (((i+1) * instr->dest.dest.ssa.bit_size) % 32)
960 elems[i] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), elems[i], mask);
961 for (unsigned i = 0; i < num; ++i) {
962 unsigned bit = i * instr->dest.dest.ssa.bit_size;
963 if (bit % 32 == 0) {
964 elems[bit / 32] = elems[i];
965 } else {
966 elems[i] = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc),
967 elems[i], Operand((i * instr->dest.dest.ssa.bit_size) % 32));
968 elems[bit / 32] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), elems[bit / 32], elems[i]);
969 }
970 }
971 if (dst.size() == 1)
972 bld.copy(Definition(dst), elems[0]);
973 else
974 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), elems[0], elems[1]);
975 }
976 break;
977 }
978 case nir_op_mov: {
979 Temp src = get_alu_src(ctx, instr->src[0]);
980 aco_ptr<Instruction> mov;
981 if (dst.type() == RegType::sgpr) {
982 if (src.type() == RegType::vgpr)
983 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
984 else if (src.regClass() == s1)
985 bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src);
986 else if (src.regClass() == s2)
987 bld.sop1(aco_opcode::s_mov_b64, Definition(dst), src);
988 else
989 unreachable("wrong src register class for nir_op_imov");
990 } else if (dst.regClass() == v1) {
991 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), src);
992 } else if (dst.regClass() == v2) {
993 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
994 } else {
995 nir_print_instr(&instr->instr, stderr);
996 unreachable("Should have been lowered to scalar.");
997 }
998 break;
999 }
1000 case nir_op_inot: {
1001 Temp src = get_alu_src(ctx, instr->src[0]);
1002 if (instr->dest.dest.ssa.bit_size == 1) {
1003 assert(src.regClass() == bld.lm);
1004 assert(dst.regClass() == bld.lm);
1005 /* Don't use s_andn2 here, this allows the optimizer to make a better decision */
1006 Temp tmp = bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), src);
1007 bld.sop2(Builder::s_and, Definition(dst), bld.def(s1, scc), tmp, Operand(exec, bld.lm));
1008 } else if (dst.regClass() == v1) {
1009 emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
1010 } else if (dst.type() == RegType::sgpr) {
1011 aco_opcode opcode = dst.size() == 1 ? aco_opcode::s_not_b32 : aco_opcode::s_not_b64;
1012 bld.sop1(opcode, Definition(dst), bld.def(s1, scc), src);
1013 } else {
1014 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1015 nir_print_instr(&instr->instr, stderr);
1016 fprintf(stderr, "\n");
1017 }
1018 break;
1019 }
1020 case nir_op_ineg: {
1021 Temp src = get_alu_src(ctx, instr->src[0]);
1022 if (dst.regClass() == v1) {
1023 bld.vsub32(Definition(dst), Operand(0u), Operand(src));
1024 } else if (dst.regClass() == s1) {
1025 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand((uint32_t) -1), src);
1026 } else if (dst.size() == 2) {
1027 Temp src0 = bld.tmp(dst.type(), 1);
1028 Temp src1 = bld.tmp(dst.type(), 1);
1029 bld.pseudo(aco_opcode::p_split_vector, Definition(src0), Definition(src1), src);
1030
1031 if (dst.regClass() == s2) {
1032 Temp carry = bld.tmp(s1);
1033 Temp dst0 = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(carry)), Operand(0u), src0);
1034 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), src1, carry);
1035 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1036 } else {
1037 Temp lower = bld.tmp(v1);
1038 Temp borrow = bld.vsub32(Definition(lower), Operand(0u), src0, true).def(1).getTemp();
1039 Temp upper = bld.vsub32(bld.def(v1), Operand(0u), src1, false, borrow);
1040 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1041 }
1042 } else {
1043 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1044 nir_print_instr(&instr->instr, stderr);
1045 fprintf(stderr, "\n");
1046 }
1047 break;
1048 }
1049 case nir_op_iabs: {
1050 if (dst.regClass() == s1) {
1051 bld.sop1(aco_opcode::s_abs_i32, Definition(dst), bld.def(s1, scc), get_alu_src(ctx, instr->src[0]));
1052 } else if (dst.regClass() == v1) {
1053 Temp src = get_alu_src(ctx, instr->src[0]);
1054 bld.vop2(aco_opcode::v_max_i32, Definition(dst), src, bld.vsub32(bld.def(v1), Operand(0u), src));
1055 } else {
1056 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1057 nir_print_instr(&instr->instr, stderr);
1058 fprintf(stderr, "\n");
1059 }
1060 break;
1061 }
1062 case nir_op_isign: {
1063 Temp src = get_alu_src(ctx, instr->src[0]);
1064 if (dst.regClass() == s1) {
1065 Temp tmp = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u));
1066 Temp gtz = bld.sopc(aco_opcode::s_cmp_gt_i32, bld.def(s1, scc), src, Operand(0u));
1067 bld.sop2(aco_opcode::s_add_i32, Definition(dst), bld.def(s1, scc), gtz, tmp);
1068 } else if (dst.regClass() == s2) {
1069 Temp neg = bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand(63u));
1070 Temp neqz;
1071 if (ctx->program->chip_class >= GFX8)
1072 neqz = bld.sopc(aco_opcode::s_cmp_lg_u64, bld.def(s1, scc), src, Operand(0u));
1073 else
1074 neqz = bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), src, Operand(0u)).def(1).getTemp();
1075 /* SCC gets zero-extended to 64 bit */
1076 bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz));
1077 } else if (dst.regClass() == v1) {
1078 Temp tmp = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
1079 Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
1080 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(1u), tmp, gtz);
1081 } else if (dst.regClass() == v2) {
1082 Temp upper = emit_extract_vector(ctx, src, 1, v1);
1083 Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), upper);
1084 Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
1085 Temp lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(1u), neg, gtz);
1086 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), neg, gtz);
1087 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1088 } else {
1089 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1090 nir_print_instr(&instr->instr, stderr);
1091 fprintf(stderr, "\n");
1092 }
1093 break;
1094 }
1095 case nir_op_imax: {
1096 if (dst.regClass() == v1) {
1097 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_i32, dst, true);
1098 } else if (dst.regClass() == s1) {
1099 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_i32, dst, true);
1100 } else {
1101 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1102 nir_print_instr(&instr->instr, stderr);
1103 fprintf(stderr, "\n");
1104 }
1105 break;
1106 }
1107 case nir_op_umax: {
1108 if (dst.regClass() == v1) {
1109 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_u32, dst, true);
1110 } else if (dst.regClass() == s1) {
1111 emit_sop2_instruction(ctx, instr, aco_opcode::s_max_u32, dst, true);
1112 } else {
1113 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1114 nir_print_instr(&instr->instr, stderr);
1115 fprintf(stderr, "\n");
1116 }
1117 break;
1118 }
1119 case nir_op_imin: {
1120 if (dst.regClass() == v1) {
1121 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_i32, dst, true);
1122 } else if (dst.regClass() == s1) {
1123 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_i32, dst, true);
1124 } else {
1125 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1126 nir_print_instr(&instr->instr, stderr);
1127 fprintf(stderr, "\n");
1128 }
1129 break;
1130 }
1131 case nir_op_umin: {
1132 if (dst.regClass() == v1) {
1133 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_u32, dst, true);
1134 } else if (dst.regClass() == s1) {
1135 emit_sop2_instruction(ctx, instr, aco_opcode::s_min_u32, dst, true);
1136 } else {
1137 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1138 nir_print_instr(&instr->instr, stderr);
1139 fprintf(stderr, "\n");
1140 }
1141 break;
1142 }
1143 case nir_op_ior: {
1144 if (instr->dest.dest.ssa.bit_size == 1) {
1145 emit_boolean_logic(ctx, instr, Builder::s_or, dst);
1146 } else if (dst.regClass() == v1) {
1147 emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
1148 } else if (dst.regClass() == s1) {
1149 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b32, dst, true);
1150 } else if (dst.regClass() == s2) {
1151 emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b64, dst, true);
1152 } else {
1153 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1154 nir_print_instr(&instr->instr, stderr);
1155 fprintf(stderr, "\n");
1156 }
1157 break;
1158 }
1159 case nir_op_iand: {
1160 if (instr->dest.dest.ssa.bit_size == 1) {
1161 emit_boolean_logic(ctx, instr, Builder::s_and, dst);
1162 } else if (dst.regClass() == v1) {
1163 emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
1164 } else if (dst.regClass() == s1) {
1165 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b32, dst, true);
1166 } else if (dst.regClass() == s2) {
1167 emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b64, dst, true);
1168 } else {
1169 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1170 nir_print_instr(&instr->instr, stderr);
1171 fprintf(stderr, "\n");
1172 }
1173 break;
1174 }
1175 case nir_op_ixor: {
1176 if (instr->dest.dest.ssa.bit_size == 1) {
1177 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
1178 } else if (dst.regClass() == v1) {
1179 emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
1180 } else if (dst.regClass() == s1) {
1181 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b32, dst, true);
1182 } else if (dst.regClass() == s2) {
1183 emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b64, dst, true);
1184 } else {
1185 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1186 nir_print_instr(&instr->instr, stderr);
1187 fprintf(stderr, "\n");
1188 }
1189 break;
1190 }
1191 case nir_op_ushr: {
1192 if (dst.regClass() == v1) {
1193 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b32, dst, false, true);
1194 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1195 bld.vop3(aco_opcode::v_lshrrev_b64, Definition(dst),
1196 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1197 } else if (dst.regClass() == v2) {
1198 bld.vop3(aco_opcode::v_lshr_b64, Definition(dst),
1199 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1200 } else if (dst.regClass() == s2) {
1201 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b64, dst, true);
1202 } else if (dst.regClass() == s1) {
1203 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b32, dst, true);
1204 } else {
1205 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1206 nir_print_instr(&instr->instr, stderr);
1207 fprintf(stderr, "\n");
1208 }
1209 break;
1210 }
1211 case nir_op_ishl: {
1212 if (dst.regClass() == v1) {
1213 emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b32, dst, false, true);
1214 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1215 bld.vop3(aco_opcode::v_lshlrev_b64, Definition(dst),
1216 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1217 } else if (dst.regClass() == v2) {
1218 bld.vop3(aco_opcode::v_lshl_b64, Definition(dst),
1219 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1220 } else if (dst.regClass() == s1) {
1221 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b32, dst, true);
1222 } else if (dst.regClass() == s2) {
1223 emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b64, dst, true);
1224 } else {
1225 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1226 nir_print_instr(&instr->instr, stderr);
1227 fprintf(stderr, "\n");
1228 }
1229 break;
1230 }
1231 case nir_op_ishr: {
1232 if (dst.regClass() == v1) {
1233 emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i32, dst, false, true);
1234 } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) {
1235 bld.vop3(aco_opcode::v_ashrrev_i64, Definition(dst),
1236 get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0]));
1237 } else if (dst.regClass() == v2) {
1238 bld.vop3(aco_opcode::v_ashr_i64, Definition(dst),
1239 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1240 } else if (dst.regClass() == s1) {
1241 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i32, dst, true);
1242 } else if (dst.regClass() == s2) {
1243 emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i64, dst, true);
1244 } else {
1245 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1246 nir_print_instr(&instr->instr, stderr);
1247 fprintf(stderr, "\n");
1248 }
1249 break;
1250 }
1251 case nir_op_find_lsb: {
1252 Temp src = get_alu_src(ctx, instr->src[0]);
1253 if (src.regClass() == s1) {
1254 bld.sop1(aco_opcode::s_ff1_i32_b32, Definition(dst), src);
1255 } else if (src.regClass() == v1) {
1256 emit_vop1_instruction(ctx, instr, aco_opcode::v_ffbl_b32, dst);
1257 } else if (src.regClass() == s2) {
1258 bld.sop1(aco_opcode::s_ff1_i32_b64, Definition(dst), src);
1259 } else {
1260 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1261 nir_print_instr(&instr->instr, stderr);
1262 fprintf(stderr, "\n");
1263 }
1264 break;
1265 }
1266 case nir_op_ufind_msb:
1267 case nir_op_ifind_msb: {
1268 Temp src = get_alu_src(ctx, instr->src[0]);
1269 if (src.regClass() == s1 || src.regClass() == s2) {
1270 aco_opcode op = src.regClass() == s2 ?
1271 (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b64 : aco_opcode::s_flbit_i32_i64) :
1272 (instr->op == nir_op_ufind_msb ? aco_opcode::s_flbit_i32_b32 : aco_opcode::s_flbit_i32);
1273 Temp msb_rev = bld.sop1(op, bld.def(s1), src);
1274
1275 Builder::Result sub = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc),
1276 Operand(src.size() * 32u - 1u), msb_rev);
1277 Temp msb = sub.def(0).getTemp();
1278 Temp carry = sub.def(1).getTemp();
1279
1280 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand((uint32_t)-1), msb, bld.scc(carry));
1281 } else if (src.regClass() == v1) {
1282 aco_opcode op = instr->op == nir_op_ufind_msb ? aco_opcode::v_ffbh_u32 : aco_opcode::v_ffbh_i32;
1283 Temp msb_rev = bld.tmp(v1);
1284 emit_vop1_instruction(ctx, instr, op, msb_rev);
1285 Temp msb = bld.tmp(v1);
1286 Temp carry = bld.vsub32(Definition(msb), Operand(31u), Operand(msb_rev), true).def(1).getTemp();
1287 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), msb, Operand((uint32_t)-1), carry);
1288 } else {
1289 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1290 nir_print_instr(&instr->instr, stderr);
1291 fprintf(stderr, "\n");
1292 }
1293 break;
1294 }
1295 case nir_op_bitfield_reverse: {
1296 if (dst.regClass() == s1) {
1297 bld.sop1(aco_opcode::s_brev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1298 } else if (dst.regClass() == v1) {
1299 bld.vop1(aco_opcode::v_bfrev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
1300 } else {
1301 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1302 nir_print_instr(&instr->instr, stderr);
1303 fprintf(stderr, "\n");
1304 }
1305 break;
1306 }
1307 case nir_op_iadd: {
1308 if (dst.regClass() == s1) {
1309 emit_sop2_instruction(ctx, instr, aco_opcode::s_add_u32, dst, true);
1310 break;
1311 }
1312
1313 Temp src0 = get_alu_src(ctx, instr->src[0]);
1314 Temp src1 = get_alu_src(ctx, instr->src[1]);
1315 if (dst.regClass() == v1) {
1316 bld.vadd32(Definition(dst), Operand(src0), Operand(src1));
1317 break;
1318 }
1319
1320 assert(src0.size() == 2 && src1.size() == 2);
1321 Temp src00 = bld.tmp(src0.type(), 1);
1322 Temp src01 = bld.tmp(dst.type(), 1);
1323 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1324 Temp src10 = bld.tmp(src1.type(), 1);
1325 Temp src11 = bld.tmp(dst.type(), 1);
1326 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1327
1328 if (dst.regClass() == s2) {
1329 Temp carry = bld.tmp(s1);
1330 Temp dst0 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1331 Temp dst1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), src01, src11, bld.scc(carry));
1332 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1333 } else if (dst.regClass() == v2) {
1334 Temp dst0 = bld.tmp(v1);
1335 Temp carry = bld.vadd32(Definition(dst0), src00, src10, true).def(1).getTemp();
1336 Temp dst1 = bld.vadd32(bld.def(v1), src01, src11, false, carry);
1337 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1338 } else {
1339 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1340 nir_print_instr(&instr->instr, stderr);
1341 fprintf(stderr, "\n");
1342 }
1343 break;
1344 }
1345 case nir_op_uadd_sat: {
1346 Temp src0 = get_alu_src(ctx, instr->src[0]);
1347 Temp src1 = get_alu_src(ctx, instr->src[1]);
1348 if (dst.regClass() == s1) {
1349 Temp tmp = bld.tmp(s1), carry = bld.tmp(s1);
1350 bld.sop2(aco_opcode::s_add_u32, Definition(tmp), bld.scc(Definition(carry)),
1351 src0, src1);
1352 bld.sop2(aco_opcode::s_cselect_b32, Definition(dst), Operand((uint32_t) -1), tmp, bld.scc(carry));
1353 } else if (dst.regClass() == v1) {
1354 if (ctx->options->chip_class >= GFX9) {
1355 aco_ptr<VOP3A_instruction> add{create_instruction<VOP3A_instruction>(aco_opcode::v_add_u32, asVOP3(Format::VOP2), 2, 1)};
1356 add->operands[0] = Operand(src0);
1357 add->operands[1] = Operand(src1);
1358 add->definitions[0] = Definition(dst);
1359 add->clamp = 1;
1360 ctx->block->instructions.emplace_back(std::move(add));
1361 } else {
1362 if (src1.regClass() != v1)
1363 std::swap(src0, src1);
1364 assert(src1.regClass() == v1);
1365 Temp tmp = bld.tmp(v1);
1366 Temp carry = bld.vadd32(Definition(tmp), src0, src1, true).def(1).getTemp();
1367 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), tmp, Operand((uint32_t) -1), carry);
1368 }
1369 } else {
1370 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1371 nir_print_instr(&instr->instr, stderr);
1372 fprintf(stderr, "\n");
1373 }
1374 break;
1375 }
1376 case nir_op_uadd_carry: {
1377 Temp src0 = get_alu_src(ctx, instr->src[0]);
1378 Temp src1 = get_alu_src(ctx, instr->src[1]);
1379 if (dst.regClass() == s1) {
1380 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
1381 break;
1382 }
1383 if (dst.regClass() == v1) {
1384 Temp carry = bld.vadd32(bld.def(v1), src0, src1, true).def(1).getTemp();
1385 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), carry);
1386 break;
1387 }
1388
1389 Temp src00 = bld.tmp(src0.type(), 1);
1390 Temp src01 = bld.tmp(dst.type(), 1);
1391 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1392 Temp src10 = bld.tmp(src1.type(), 1);
1393 Temp src11 = bld.tmp(dst.type(), 1);
1394 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1395 if (dst.regClass() == s2) {
1396 Temp carry = bld.tmp(s1);
1397 bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1398 carry = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11, bld.scc(carry)).def(1).getTemp();
1399 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand(0u));
1400 } else if (dst.regClass() == v2) {
1401 Temp carry = bld.vadd32(bld.def(v1), src00, src10, true).def(1).getTemp();
1402 carry = bld.vadd32(bld.def(v1), src01, src11, true, carry).def(1).getTemp();
1403 carry = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), carry);
1404 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand(0u));
1405 } else {
1406 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1407 nir_print_instr(&instr->instr, stderr);
1408 fprintf(stderr, "\n");
1409 }
1410 break;
1411 }
1412 case nir_op_isub: {
1413 if (dst.regClass() == s1) {
1414 emit_sop2_instruction(ctx, instr, aco_opcode::s_sub_i32, dst, true);
1415 break;
1416 }
1417
1418 Temp src0 = get_alu_src(ctx, instr->src[0]);
1419 Temp src1 = get_alu_src(ctx, instr->src[1]);
1420 if (dst.regClass() == v1) {
1421 bld.vsub32(Definition(dst), src0, src1);
1422 break;
1423 }
1424
1425 Temp src00 = bld.tmp(src0.type(), 1);
1426 Temp src01 = bld.tmp(dst.type(), 1);
1427 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1428 Temp src10 = bld.tmp(src1.type(), 1);
1429 Temp src11 = bld.tmp(dst.type(), 1);
1430 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1431 if (dst.regClass() == s2) {
1432 Temp carry = bld.tmp(s1);
1433 Temp dst0 = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(carry)), src00, src10);
1434 Temp dst1 = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), src01, src11, carry);
1435 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1436 } else if (dst.regClass() == v2) {
1437 Temp lower = bld.tmp(v1);
1438 Temp borrow = bld.vsub32(Definition(lower), src00, src10, true).def(1).getTemp();
1439 Temp upper = bld.vsub32(bld.def(v1), src01, src11, false, borrow);
1440 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1441 } else {
1442 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1443 nir_print_instr(&instr->instr, stderr);
1444 fprintf(stderr, "\n");
1445 }
1446 break;
1447 }
1448 case nir_op_usub_borrow: {
1449 Temp src0 = get_alu_src(ctx, instr->src[0]);
1450 Temp src1 = get_alu_src(ctx, instr->src[1]);
1451 if (dst.regClass() == s1) {
1452 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(dst)), src0, src1);
1453 break;
1454 } else if (dst.regClass() == v1) {
1455 Temp borrow = bld.vsub32(bld.def(v1), src0, src1, true).def(1).getTemp();
1456 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), borrow);
1457 break;
1458 }
1459
1460 Temp src00 = bld.tmp(src0.type(), 1);
1461 Temp src01 = bld.tmp(dst.type(), 1);
1462 bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
1463 Temp src10 = bld.tmp(src1.type(), 1);
1464 Temp src11 = bld.tmp(dst.type(), 1);
1465 bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
1466 if (dst.regClass() == s2) {
1467 Temp borrow = bld.tmp(s1);
1468 bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), src00, src10);
1469 borrow = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.scc(bld.def(s1)), src01, src11, bld.scc(borrow)).def(1).getTemp();
1470 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand(0u));
1471 } else if (dst.regClass() == v2) {
1472 Temp borrow = bld.vsub32(bld.def(v1), src00, src10, true).def(1).getTemp();
1473 borrow = bld.vsub32(bld.def(v1), src01, src11, true, Operand(borrow)).def(1).getTemp();
1474 borrow = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), borrow);
1475 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand(0u));
1476 } else {
1477 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1478 nir_print_instr(&instr->instr, stderr);
1479 fprintf(stderr, "\n");
1480 }
1481 break;
1482 }
1483 case nir_op_imul: {
1484 if (dst.regClass() == v1) {
1485 bld.vop3(aco_opcode::v_mul_lo_u32, Definition(dst),
1486 get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1487 } else if (dst.regClass() == s1) {
1488 emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_i32, dst, false);
1489 } else {
1490 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1491 nir_print_instr(&instr->instr, stderr);
1492 fprintf(stderr, "\n");
1493 }
1494 break;
1495 }
1496 case nir_op_umul_high: {
1497 if (dst.regClass() == v1) {
1498 bld.vop3(aco_opcode::v_mul_hi_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1499 } else if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
1500 bld.sop2(aco_opcode::s_mul_hi_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1501 } else if (dst.regClass() == s1) {
1502 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_u32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
1503 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1504 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
1505 } else {
1506 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1507 nir_print_instr(&instr->instr, stderr);
1508 fprintf(stderr, "\n");
1509 }
1510 break;
1511 }
1512 case nir_op_imul_high: {
1513 if (dst.regClass() == v1) {
1514 bld.vop3(aco_opcode::v_mul_hi_i32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1515 } else if (dst.regClass() == s1 && ctx->options->chip_class >= GFX9) {
1516 bld.sop2(aco_opcode::s_mul_hi_i32, Definition(dst), get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1]));
1517 } else if (dst.regClass() == s1) {
1518 Temp tmp = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), get_alu_src(ctx, instr->src[0]),
1519 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1520 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
1521 } else {
1522 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1523 nir_print_instr(&instr->instr, stderr);
1524 fprintf(stderr, "\n");
1525 }
1526 break;
1527 }
1528 case nir_op_fmul: {
1529 if (dst.size() == 1) {
1530 emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f32, dst, true);
1531 } else if (dst.size() == 2) {
1532 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), get_alu_src(ctx, instr->src[0]),
1533 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1534 } else {
1535 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1536 nir_print_instr(&instr->instr, stderr);
1537 fprintf(stderr, "\n");
1538 }
1539 break;
1540 }
1541 case nir_op_fadd: {
1542 if (dst.size() == 1) {
1543 emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f32, dst, true);
1544 } else if (dst.size() == 2) {
1545 bld.vop3(aco_opcode::v_add_f64, Definition(dst), get_alu_src(ctx, instr->src[0]),
1546 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1547 } else {
1548 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1549 nir_print_instr(&instr->instr, stderr);
1550 fprintf(stderr, "\n");
1551 }
1552 break;
1553 }
1554 case nir_op_fsub: {
1555 Temp src0 = get_alu_src(ctx, instr->src[0]);
1556 Temp src1 = get_alu_src(ctx, instr->src[1]);
1557 if (dst.size() == 1) {
1558 if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
1559 emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f32, dst, false);
1560 else
1561 emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f32, dst, true);
1562 } else if (dst.size() == 2) {
1563 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst),
1564 get_alu_src(ctx, instr->src[0]),
1565 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1566 VOP3A_instruction* sub = static_cast<VOP3A_instruction*>(add);
1567 sub->neg[1] = true;
1568 } else {
1569 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1570 nir_print_instr(&instr->instr, stderr);
1571 fprintf(stderr, "\n");
1572 }
1573 break;
1574 }
1575 case nir_op_fmax: {
1576 if (dst.size() == 1) {
1577 emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
1578 } else if (dst.size() == 2) {
1579 if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
1580 Temp tmp = bld.vop3(aco_opcode::v_max_f64, bld.def(v2),
1581 get_alu_src(ctx, instr->src[0]),
1582 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1583 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
1584 } else {
1585 bld.vop3(aco_opcode::v_max_f64, Definition(dst),
1586 get_alu_src(ctx, instr->src[0]),
1587 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1588 }
1589 } else {
1590 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1591 nir_print_instr(&instr->instr, stderr);
1592 fprintf(stderr, "\n");
1593 }
1594 break;
1595 }
1596 case nir_op_fmin: {
1597 if (dst.size() == 1) {
1598 emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
1599 } else if (dst.size() == 2) {
1600 if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
1601 Temp tmp = bld.vop3(aco_opcode::v_min_f64, bld.def(v2),
1602 get_alu_src(ctx, instr->src[0]),
1603 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1604 bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
1605 } else {
1606 bld.vop3(aco_opcode::v_min_f64, Definition(dst),
1607 get_alu_src(ctx, instr->src[0]),
1608 as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
1609 }
1610 } else {
1611 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1612 nir_print_instr(&instr->instr, stderr);
1613 fprintf(stderr, "\n");
1614 }
1615 break;
1616 }
1617 case nir_op_fmax3: {
1618 if (dst.size() == 1) {
1619 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1620 } else {
1621 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1622 nir_print_instr(&instr->instr, stderr);
1623 fprintf(stderr, "\n");
1624 }
1625 break;
1626 }
1627 case nir_op_fmin3: {
1628 if (dst.size() == 1) {
1629 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1630 } else {
1631 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1632 nir_print_instr(&instr->instr, stderr);
1633 fprintf(stderr, "\n");
1634 }
1635 break;
1636 }
1637 case nir_op_fmed3: {
1638 if (dst.size() == 1) {
1639 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
1640 } else {
1641 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1642 nir_print_instr(&instr->instr, stderr);
1643 fprintf(stderr, "\n");
1644 }
1645 break;
1646 }
1647 case nir_op_umax3: {
1648 if (dst.size() == 1) {
1649 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_u32, dst);
1650 } else {
1651 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1652 nir_print_instr(&instr->instr, stderr);
1653 fprintf(stderr, "\n");
1654 }
1655 break;
1656 }
1657 case nir_op_umin3: {
1658 if (dst.size() == 1) {
1659 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_u32, dst);
1660 } else {
1661 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1662 nir_print_instr(&instr->instr, stderr);
1663 fprintf(stderr, "\n");
1664 }
1665 break;
1666 }
1667 case nir_op_umed3: {
1668 if (dst.size() == 1) {
1669 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_u32, dst);
1670 } else {
1671 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1672 nir_print_instr(&instr->instr, stderr);
1673 fprintf(stderr, "\n");
1674 }
1675 break;
1676 }
1677 case nir_op_imax3: {
1678 if (dst.size() == 1) {
1679 emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_i32, dst);
1680 } else {
1681 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1682 nir_print_instr(&instr->instr, stderr);
1683 fprintf(stderr, "\n");
1684 }
1685 break;
1686 }
1687 case nir_op_imin3: {
1688 if (dst.size() == 1) {
1689 emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_i32, dst);
1690 } else {
1691 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1692 nir_print_instr(&instr->instr, stderr);
1693 fprintf(stderr, "\n");
1694 }
1695 break;
1696 }
1697 case nir_op_imed3: {
1698 if (dst.size() == 1) {
1699 emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_i32, dst);
1700 } else {
1701 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1702 nir_print_instr(&instr->instr, stderr);
1703 fprintf(stderr, "\n");
1704 }
1705 break;
1706 }
1707 case nir_op_cube_face_coord: {
1708 Temp in = get_alu_src(ctx, instr->src[0], 3);
1709 Temp src[3] = { emit_extract_vector(ctx, in, 0, v1),
1710 emit_extract_vector(ctx, in, 1, v1),
1711 emit_extract_vector(ctx, in, 2, v1) };
1712 Temp ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), src[0], src[1], src[2]);
1713 ma = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ma);
1714 Temp sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), src[0], src[1], src[2]);
1715 Temp tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), src[0], src[1], src[2]);
1716 sc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), sc, ma, Operand(0x3f000000u/*0.5*/));
1717 tc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), tc, ma, Operand(0x3f000000u/*0.5*/));
1718 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), sc, tc);
1719 break;
1720 }
1721 case nir_op_cube_face_index: {
1722 Temp in = get_alu_src(ctx, instr->src[0], 3);
1723 Temp src[3] = { emit_extract_vector(ctx, in, 0, v1),
1724 emit_extract_vector(ctx, in, 1, v1),
1725 emit_extract_vector(ctx, in, 2, v1) };
1726 bld.vop3(aco_opcode::v_cubeid_f32, Definition(dst), src[0], src[1], src[2]);
1727 break;
1728 }
1729 case nir_op_bcsel: {
1730 emit_bcsel(ctx, instr, dst);
1731 break;
1732 }
1733 case nir_op_frsq: {
1734 if (dst.size() == 1) {
1735 emit_rsq(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
1736 } else if (dst.size() == 2) {
1737 emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
1738 } else {
1739 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1740 nir_print_instr(&instr->instr, stderr);
1741 fprintf(stderr, "\n");
1742 }
1743 break;
1744 }
1745 case nir_op_fneg: {
1746 Temp src = get_alu_src(ctx, instr->src[0]);
1747 if (dst.size() == 1) {
1748 if (ctx->block->fp_mode.must_flush_denorms32)
1749 src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
1750 bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x80000000u), as_vgpr(ctx, src));
1751 } else if (dst.size() == 2) {
1752 if (ctx->block->fp_mode.must_flush_denorms16_64)
1753 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
1754 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
1755 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
1756 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand(0x80000000u), upper);
1757 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1758 } else {
1759 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1760 nir_print_instr(&instr->instr, stderr);
1761 fprintf(stderr, "\n");
1762 }
1763 break;
1764 }
1765 case nir_op_fabs: {
1766 Temp src = get_alu_src(ctx, instr->src[0]);
1767 if (dst.size() == 1) {
1768 if (ctx->block->fp_mode.must_flush_denorms32)
1769 src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
1770 bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFFFFFu), as_vgpr(ctx, src));
1771 } else if (dst.size() == 2) {
1772 if (ctx->block->fp_mode.must_flush_denorms16_64)
1773 src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
1774 Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
1775 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
1776 upper = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7FFFFFFFu), upper);
1777 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
1778 } else {
1779 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1780 nir_print_instr(&instr->instr, stderr);
1781 fprintf(stderr, "\n");
1782 }
1783 break;
1784 }
1785 case nir_op_fsat: {
1786 Temp src = get_alu_src(ctx, instr->src[0]);
1787 if (dst.size() == 1) {
1788 bld.vop3(aco_opcode::v_med3_f32, Definition(dst), Operand(0u), Operand(0x3f800000u), src);
1789 /* apparently, it is not necessary to flush denorms if this instruction is used with these operands */
1790 // TODO: confirm that this holds under any circumstances
1791 } else if (dst.size() == 2) {
1792 Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src, Operand(0u));
1793 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(add);
1794 vop3->clamp = true;
1795 } else {
1796 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1797 nir_print_instr(&instr->instr, stderr);
1798 fprintf(stderr, "\n");
1799 }
1800 break;
1801 }
1802 case nir_op_flog2: {
1803 if (dst.size() == 1) {
1804 emit_log2(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
1805 } else {
1806 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1807 nir_print_instr(&instr->instr, stderr);
1808 fprintf(stderr, "\n");
1809 }
1810 break;
1811 }
1812 case nir_op_frcp: {
1813 if (dst.size() == 1) {
1814 emit_rcp(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
1815 } else if (dst.size() == 2) {
1816 emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
1817 } else {
1818 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1819 nir_print_instr(&instr->instr, stderr);
1820 fprintf(stderr, "\n");
1821 }
1822 break;
1823 }
1824 case nir_op_fexp2: {
1825 if (dst.size() == 1) {
1826 emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f32, dst);
1827 } else {
1828 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1829 nir_print_instr(&instr->instr, stderr);
1830 fprintf(stderr, "\n");
1831 }
1832 break;
1833 }
1834 case nir_op_fsqrt: {
1835 if (dst.size() == 1) {
1836 emit_sqrt(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
1837 } else if (dst.size() == 2) {
1838 emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
1839 } else {
1840 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1841 nir_print_instr(&instr->instr, stderr);
1842 fprintf(stderr, "\n");
1843 }
1844 break;
1845 }
1846 case nir_op_ffract: {
1847 if (dst.size() == 1) {
1848 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f32, dst);
1849 } else if (dst.size() == 2) {
1850 emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f64, dst);
1851 } else {
1852 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1853 nir_print_instr(&instr->instr, stderr);
1854 fprintf(stderr, "\n");
1855 }
1856 break;
1857 }
1858 case nir_op_ffloor: {
1859 if (dst.size() == 1) {
1860 emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst);
1861 } else if (dst.size() == 2) {
1862 emit_floor_f64(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
1863 } else {
1864 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1865 nir_print_instr(&instr->instr, stderr);
1866 fprintf(stderr, "\n");
1867 }
1868 break;
1869 }
1870 case nir_op_fceil: {
1871 if (dst.size() == 1) {
1872 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f32, dst);
1873 } else if (dst.size() == 2) {
1874 if (ctx->options->chip_class >= GFX7) {
1875 emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f64, dst);
1876 } else {
1877 /* GFX6 doesn't support V_CEIL_F64, lower it. */
1878 Temp src0 = get_alu_src(ctx, instr->src[0]);
1879
1880 /* trunc = trunc(src0)
1881 * if (src0 > 0.0 && src0 != trunc)
1882 * trunc += 1.0
1883 */
1884 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src0);
1885 Temp tmp0 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.def(bld.lm), src0, Operand(0u));
1886 Temp tmp1 = bld.vopc(aco_opcode::v_cmp_lg_f64, bld.hint_vcc(bld.def(bld.lm)), src0, trunc);
1887 Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.hint_vcc(bld.def(s2)), bld.def(s1, scc), tmp0, tmp1);
1888 Temp add = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), bld.copy(bld.def(v1), Operand(0u)), bld.copy(bld.def(v1), Operand(0x3ff00000u)), cond);
1889 add = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), bld.copy(bld.def(v1), Operand(0u)), add);
1890 bld.vop3(aco_opcode::v_add_f64, Definition(dst), trunc, add);
1891 }
1892 } else {
1893 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1894 nir_print_instr(&instr->instr, stderr);
1895 fprintf(stderr, "\n");
1896 }
1897 break;
1898 }
1899 case nir_op_ftrunc: {
1900 if (dst.size() == 1) {
1901 emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f32, dst);
1902 } else if (dst.size() == 2) {
1903 emit_trunc_f64(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
1904 } else {
1905 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1906 nir_print_instr(&instr->instr, stderr);
1907 fprintf(stderr, "\n");
1908 }
1909 break;
1910 }
1911 case nir_op_fround_even: {
1912 if (dst.size() == 1) {
1913 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f32, dst);
1914 } else if (dst.size() == 2) {
1915 if (ctx->options->chip_class >= GFX7) {
1916 emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f64, dst);
1917 } else {
1918 /* GFX6 doesn't support V_RNDNE_F64, lower it. */
1919 Temp src0 = get_alu_src(ctx, instr->src[0]);
1920
1921 Temp src0_lo = bld.tmp(v1), src0_hi = bld.tmp(v1);
1922 bld.pseudo(aco_opcode::p_split_vector, Definition(src0_lo), Definition(src0_hi), src0);
1923
1924 Temp bitmask = bld.sop1(aco_opcode::s_brev_b32, bld.def(s1), bld.copy(bld.def(s1), Operand(-2u)));
1925 Temp bfi = bld.vop3(aco_opcode::v_bfi_b32, bld.def(v1), bitmask, bld.copy(bld.def(v1), Operand(0x43300000u)), as_vgpr(ctx, src0_hi));
1926 Temp tmp = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), src0, bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), bfi));
1927 Instruction *sub = bld.vop3(aco_opcode::v_add_f64, bld.def(v2), tmp, bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), bfi));
1928 static_cast<VOP3A_instruction*>(sub)->neg[1] = true;
1929 tmp = sub->definitions[0].getTemp();
1930
1931 Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(-1u), Operand(0x432fffffu));
1932 Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_gt_f64, bld.hint_vcc(bld.def(bld.lm)), src0, v);
1933 static_cast<VOP3A_instruction*>(vop3)->abs[0] = true;
1934 Temp cond = vop3->definitions[0].getTemp();
1935
1936 Temp tmp_lo = bld.tmp(v1), tmp_hi = bld.tmp(v1);
1937 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp_lo), Definition(tmp_hi), tmp);
1938 Temp dst0 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_lo, as_vgpr(ctx, src0_lo), cond);
1939 Temp dst1 = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp_hi, as_vgpr(ctx, src0_hi), cond);
1940
1941 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
1942 }
1943 } else {
1944 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1945 nir_print_instr(&instr->instr, stderr);
1946 fprintf(stderr, "\n");
1947 }
1948 break;
1949 }
1950 case nir_op_fsin:
1951 case nir_op_fcos: {
1952 Temp src = get_alu_src(ctx, instr->src[0]);
1953 aco_ptr<Instruction> norm;
1954 if (dst.size() == 1) {
1955 Temp half_pi = bld.copy(bld.def(s1), Operand(0x3e22f983u));
1956 Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, as_vgpr(ctx, src));
1957
1958 /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
1959 if (ctx->options->chip_class < GFX9)
1960 tmp = bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), tmp);
1961
1962 aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f32 : aco_opcode::v_cos_f32;
1963 bld.vop1(opcode, Definition(dst), tmp);
1964 } else {
1965 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1966 nir_print_instr(&instr->instr, stderr);
1967 fprintf(stderr, "\n");
1968 }
1969 break;
1970 }
1971 case nir_op_ldexp: {
1972 if (dst.size() == 1) {
1973 bld.vop3(aco_opcode::v_ldexp_f32, Definition(dst),
1974 as_vgpr(ctx, get_alu_src(ctx, instr->src[0])),
1975 get_alu_src(ctx, instr->src[1]));
1976 } else if (dst.size() == 2) {
1977 bld.vop3(aco_opcode::v_ldexp_f64, Definition(dst),
1978 as_vgpr(ctx, get_alu_src(ctx, instr->src[0])),
1979 get_alu_src(ctx, instr->src[1]));
1980 } else {
1981 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1982 nir_print_instr(&instr->instr, stderr);
1983 fprintf(stderr, "\n");
1984 }
1985 break;
1986 }
1987 case nir_op_frexp_sig: {
1988 if (dst.size() == 1) {
1989 bld.vop1(aco_opcode::v_frexp_mant_f32, Definition(dst),
1990 get_alu_src(ctx, instr->src[0]));
1991 } else if (dst.size() == 2) {
1992 bld.vop1(aco_opcode::v_frexp_mant_f64, Definition(dst),
1993 get_alu_src(ctx, instr->src[0]));
1994 } else {
1995 fprintf(stderr, "Unimplemented NIR instr bit size: ");
1996 nir_print_instr(&instr->instr, stderr);
1997 fprintf(stderr, "\n");
1998 }
1999 break;
2000 }
2001 case nir_op_frexp_exp: {
2002 if (instr->src[0].src.ssa->bit_size == 32) {
2003 bld.vop1(aco_opcode::v_frexp_exp_i32_f32, Definition(dst),
2004 get_alu_src(ctx, instr->src[0]));
2005 } else if (instr->src[0].src.ssa->bit_size == 64) {
2006 bld.vop1(aco_opcode::v_frexp_exp_i32_f64, Definition(dst),
2007 get_alu_src(ctx, instr->src[0]));
2008 } else {
2009 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2010 nir_print_instr(&instr->instr, stderr);
2011 fprintf(stderr, "\n");
2012 }
2013 break;
2014 }
2015 case nir_op_fsign: {
2016 Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
2017 if (dst.size() == 1) {
2018 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2019 src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0x3f800000u), src, cond);
2020 cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2021 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0xbf800000u), src, cond);
2022 } else if (dst.size() == 2) {
2023 Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2024 Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u));
2025 Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond);
2026
2027 cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
2028 tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0xBFF00000u));
2029 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, upper, cond);
2030
2031 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand(0u), upper);
2032 } else {
2033 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2034 nir_print_instr(&instr->instr, stderr);
2035 fprintf(stderr, "\n");
2036 }
2037 break;
2038 }
2039 case nir_op_f2f16:
2040 case nir_op_f2f16_rtne: {
2041 Temp src = get_alu_src(ctx, instr->src[0]);
2042 if (instr->src[0].src.ssa->bit_size == 64)
2043 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2044 src = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), src);
2045 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), src);
2046 break;
2047 }
2048 case nir_op_f2f16_rtz: {
2049 Temp src = get_alu_src(ctx, instr->src[0]);
2050 if (instr->src[0].src.ssa->bit_size == 64)
2051 src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
2052 src = bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, bld.def(v1), src, Operand(0u));
2053 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), src);
2054 break;
2055 }
2056 case nir_op_f2f32: {
2057 if (instr->src[0].src.ssa->bit_size == 16) {
2058 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, dst);
2059 } else if (instr->src[0].src.ssa->bit_size == 64) {
2060 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f64, dst);
2061 } else {
2062 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2063 nir_print_instr(&instr->instr, stderr);
2064 fprintf(stderr, "\n");
2065 }
2066 break;
2067 }
2068 case nir_op_f2f64: {
2069 Temp src = get_alu_src(ctx, instr->src[0]);
2070 if (instr->src[0].src.ssa->bit_size == 16)
2071 src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
2072 bld.vop1(aco_opcode::v_cvt_f64_f32, Definition(dst), src);
2073 break;
2074 }
2075 case nir_op_i2f32: {
2076 assert(dst.size() == 1);
2077 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_i32, dst);
2078 break;
2079 }
2080 case nir_op_i2f64: {
2081 if (instr->src[0].src.ssa->bit_size == 32) {
2082 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f64_i32, dst);
2083 } else if (instr->src[0].src.ssa->bit_size == 64) {
2084 Temp src = get_alu_src(ctx, instr->src[0]);
2085 RegClass rc = RegClass(src.type(), 1);
2086 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2087 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2088 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2089 upper = bld.vop1(aco_opcode::v_cvt_f64_i32, bld.def(v2), upper);
2090 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand(32u));
2091 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
2092
2093 } else {
2094 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2095 nir_print_instr(&instr->instr, stderr);
2096 fprintf(stderr, "\n");
2097 }
2098 break;
2099 }
2100 case nir_op_u2f32: {
2101 assert(dst.size() == 1);
2102 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_u32, dst);
2103 break;
2104 }
2105 case nir_op_u2f64: {
2106 if (instr->src[0].src.ssa->bit_size == 32) {
2107 emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f64_u32, dst);
2108 } else if (instr->src[0].src.ssa->bit_size == 64) {
2109 Temp src = get_alu_src(ctx, instr->src[0]);
2110 RegClass rc = RegClass(src.type(), 1);
2111 Temp lower = bld.tmp(rc), upper = bld.tmp(rc);
2112 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src);
2113 lower = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), lower);
2114 upper = bld.vop1(aco_opcode::v_cvt_f64_u32, bld.def(v2), upper);
2115 upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand(32u));
2116 bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
2117 } else {
2118 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2119 nir_print_instr(&instr->instr, stderr);
2120 fprintf(stderr, "\n");
2121 }
2122 break;
2123 }
2124 case nir_op_f2i16: {
2125 Temp src = get_alu_src(ctx, instr->src[0]);
2126 if (instr->src[0].src.ssa->bit_size == 16)
2127 src = bld.vop1(aco_opcode::v_cvt_i16_f16, bld.def(v1), src);
2128 else if (instr->src[0].src.ssa->bit_size == 32)
2129 src = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), src);
2130 else
2131 src = bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), src);
2132
2133 if (dst.type() == RegType::vgpr)
2134 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), src);
2135 else
2136 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
2137 break;
2138 }
2139 case nir_op_f2u16: {
2140 Temp src = get_alu_src(ctx, instr->src[0]);
2141 if (instr->src[0].src.ssa->bit_size == 16)
2142 src = bld.vop1(aco_opcode::v_cvt_u16_f16, bld.def(v1), src);
2143 else if (instr->src[0].src.ssa->bit_size == 32)
2144 src = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), src);
2145 else
2146 src = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), src);
2147
2148 if (dst.type() == RegType::vgpr)
2149 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), src);
2150 else
2151 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
2152 break;
2153 }
2154 case nir_op_f2i32: {
2155 Temp src = get_alu_src(ctx, instr->src[0]);
2156 if (instr->src[0].src.ssa->bit_size == 32) {
2157 if (dst.type() == RegType::vgpr)
2158 bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), src);
2159 else
2160 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2161 bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), src));
2162
2163 } else if (instr->src[0].src.ssa->bit_size == 64) {
2164 if (dst.type() == RegType::vgpr)
2165 bld.vop1(aco_opcode::v_cvt_i32_f64, Definition(dst), src);
2166 else
2167 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2168 bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), src));
2169
2170 } else {
2171 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2172 nir_print_instr(&instr->instr, stderr);
2173 fprintf(stderr, "\n");
2174 }
2175 break;
2176 }
2177 case nir_op_f2u32: {
2178 Temp src = get_alu_src(ctx, instr->src[0]);
2179 if (instr->src[0].src.ssa->bit_size == 32) {
2180 if (dst.type() == RegType::vgpr)
2181 bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), src);
2182 else
2183 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2184 bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), src));
2185
2186 } else if (instr->src[0].src.ssa->bit_size == 64) {
2187 if (dst.type() == RegType::vgpr)
2188 bld.vop1(aco_opcode::v_cvt_u32_f64, Definition(dst), src);
2189 else
2190 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
2191 bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), src));
2192
2193 } else {
2194 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2195 nir_print_instr(&instr->instr, stderr);
2196 fprintf(stderr, "\n");
2197 }
2198 break;
2199 }
2200 case nir_op_f2i64: {
2201 Temp src = get_alu_src(ctx, instr->src[0]);
2202 if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) {
2203 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
2204 exponent = bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand(0x0u), exponent, Operand(64u));
2205 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src);
2206 Temp sign = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
2207 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(0x800000u), mantissa);
2208 mantissa = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(7u), mantissa);
2209 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), mantissa);
2210 Temp new_exponent = bld.tmp(v1);
2211 Temp borrow = bld.vsub32(Definition(new_exponent), Operand(63u), exponent, true).def(1).getTemp();
2212 if (ctx->program->chip_class >= GFX8)
2213 mantissa = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), new_exponent, mantissa);
2214 else
2215 mantissa = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), mantissa, new_exponent);
2216 Temp saturate = bld.vop1(aco_opcode::v_bfrev_b32, bld.def(v1), Operand(0xfffffffeu));
2217 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
2218 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2219 lower = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), lower, Operand(0xffffffffu), borrow);
2220 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), upper, saturate, borrow);
2221 lower = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, lower);
2222 upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), sign, upper);
2223 Temp new_lower = bld.tmp(v1);
2224 borrow = bld.vsub32(Definition(new_lower), lower, sign, true).def(1).getTemp();
2225 Temp new_upper = bld.vsub32(bld.def(v1), upper, sign, false, borrow);
2226 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), new_lower, new_upper);
2227
2228 } else if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::sgpr) {
2229 if (src.type() == RegType::vgpr)
2230 src = bld.as_uniform(src);
2231 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u));
2232 exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
2233 exponent = bld.sop2(aco_opcode::s_max_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
2234 exponent = bld.sop2(aco_opcode::s_min_u32, bld.def(s1), bld.def(s1, scc), Operand(64u), exponent);
2235 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src);
2236 Temp sign = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u));
2237 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa);
2238 mantissa = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), mantissa, Operand(7u));
2239 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), mantissa);
2240 exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), Operand(63u), exponent);
2241 mantissa = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), mantissa, exponent);
2242 Temp cond = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), exponent, Operand(0xffffffffu)); // exp >= 64
2243 Temp saturate = bld.sop1(aco_opcode::s_brev_b64, bld.def(s2), Operand(0xfffffffeu));
2244 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), saturate, mantissa, cond);
2245 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
2246 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2247 lower = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, lower);
2248 upper = bld.sop2(aco_opcode::s_xor_b32, bld.def(s1), bld.def(s1, scc), sign, upper);
2249 Temp borrow = bld.tmp(s1);
2250 lower = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.scc(Definition(borrow)), lower, sign);
2251 upper = bld.sop2(aco_opcode::s_subb_u32, bld.def(s1), bld.def(s1, scc), upper, sign, borrow);
2252 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2253
2254 } else if (instr->src[0].src.ssa->bit_size == 64) {
2255 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0x3df00000u));
2256 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
2257 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
2258 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0xc1f00000u));
2259 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
2260 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
2261 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
2262 Temp upper = bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), floor);
2263 if (dst.type() == RegType::sgpr) {
2264 lower = bld.as_uniform(lower);
2265 upper = bld.as_uniform(upper);
2266 }
2267 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2268
2269 } else {
2270 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2271 nir_print_instr(&instr->instr, stderr);
2272 fprintf(stderr, "\n");
2273 }
2274 break;
2275 }
2276 case nir_op_f2u64: {
2277 Temp src = get_alu_src(ctx, instr->src[0]);
2278 if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) {
2279 Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
2280 Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(64u), exponent);
2281 exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand(0x0u), exponent);
2282 Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src);
2283 mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(0x800000u), mantissa);
2284 Temp exponent_small = bld.vsub32(bld.def(v1), Operand(24u), exponent);
2285 Temp small = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), exponent_small, mantissa);
2286 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), mantissa);
2287 Temp new_exponent = bld.tmp(v1);
2288 Temp cond_small = bld.vsub32(Definition(new_exponent), exponent, Operand(24u), true).def(1).getTemp();
2289 if (ctx->program->chip_class >= GFX8)
2290 mantissa = bld.vop3(aco_opcode::v_lshlrev_b64, bld.def(v2), new_exponent, mantissa);
2291 else
2292 mantissa = bld.vop3(aco_opcode::v_lshl_b64, bld.def(v2), mantissa, new_exponent);
2293 Temp lower = bld.tmp(v1), upper = bld.tmp(v1);
2294 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2295 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), lower, small, cond_small);
2296 upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), upper, Operand(0u), cond_small);
2297 lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xffffffffu), lower, exponent_in_range);
2298 upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xffffffffu), upper, exponent_in_range);
2299 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2300
2301 } else if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::sgpr) {
2302 if (src.type() == RegType::vgpr)
2303 src = bld.as_uniform(src);
2304 Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u));
2305 exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
2306 exponent = bld.sop2(aco_opcode::s_max_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
2307 Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src);
2308 mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa);
2309 Temp exponent_small = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), Operand(24u), exponent);
2310 Temp small = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), mantissa, exponent_small);
2311 mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), mantissa);
2312 Temp exponent_large = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(24u));
2313 mantissa = bld.sop2(aco_opcode::s_lshl_b64, bld.def(s2), bld.def(s1, scc), mantissa, exponent_large);
2314 Temp cond = bld.sopc(aco_opcode::s_cmp_ge_i32, bld.def(s1, scc), Operand(64u), exponent);
2315 mantissa = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), mantissa, Operand(0xffffffffu), cond);
2316 Temp lower = bld.tmp(s1), upper = bld.tmp(s1);
2317 bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa);
2318 Temp cond_small = bld.sopc(aco_opcode::s_cmp_le_i32, bld.def(s1, scc), exponent, Operand(24u));
2319 lower = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), small, lower, cond_small);
2320 upper = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), Operand(0u), upper, cond_small);
2321 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2322
2323 } else if (instr->src[0].src.ssa->bit_size == 64) {
2324 Temp vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0x3df00000u));
2325 Temp trunc = emit_trunc_f64(ctx, bld, bld.def(v2), src);
2326 Temp mul = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), trunc, vec);
2327 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(0u), Operand(0xc1f00000u));
2328 Temp floor = emit_floor_f64(ctx, bld, bld.def(v2), mul);
2329 Temp fma = bld.vop3(aco_opcode::v_fma_f64, bld.def(v2), floor, vec, trunc);
2330 Temp lower = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), fma);
2331 Temp upper = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), floor);
2332 if (dst.type() == RegType::sgpr) {
2333 lower = bld.as_uniform(lower);
2334 upper = bld.as_uniform(upper);
2335 }
2336 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
2337
2338 } else {
2339 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2340 nir_print_instr(&instr->instr, stderr);
2341 fprintf(stderr, "\n");
2342 }
2343 break;
2344 }
2345 case nir_op_b2f32: {
2346 Temp src = get_alu_src(ctx, instr->src[0]);
2347 assert(src.regClass() == bld.lm);
2348
2349 if (dst.regClass() == s1) {
2350 src = bool_to_scalar_condition(ctx, src);
2351 bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand(0x3f800000u), src);
2352 } else if (dst.regClass() == v1) {
2353 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(0x3f800000u), src);
2354 } else {
2355 unreachable("Wrong destination register class for nir_op_b2f32.");
2356 }
2357 break;
2358 }
2359 case nir_op_b2f64: {
2360 Temp src = get_alu_src(ctx, instr->src[0]);
2361 assert(src.regClass() == bld.lm);
2362
2363 if (dst.regClass() == s2) {
2364 src = bool_to_scalar_condition(ctx, src);
2365 bld.sop2(aco_opcode::s_cselect_b64, Definition(dst), Operand(0x3f800000u), Operand(0u), bld.scc(src));
2366 } else if (dst.regClass() == v2) {
2367 Temp one = bld.vop1(aco_opcode::v_mov_b32, bld.def(v2), Operand(0x3FF00000u));
2368 Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), one, src);
2369 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand(0u), upper);
2370 } else {
2371 unreachable("Wrong destination register class for nir_op_b2f64.");
2372 }
2373 break;
2374 }
2375 case nir_op_i2i8:
2376 case nir_op_u2u8: {
2377 Temp src = get_alu_src(ctx, instr->src[0]);
2378 /* we can actually just say dst = src */
2379 if (src.regClass() == s1)
2380 bld.copy(Definition(dst), src);
2381 else
2382 emit_extract_vector(ctx, src, 0, dst);
2383 break;
2384 }
2385 case nir_op_i2i16: {
2386 Temp src = get_alu_src(ctx, instr->src[0]);
2387 if (instr->src[0].src.ssa->bit_size == 8) {
2388 if (dst.regClass() == s1) {
2389 bld.sop1(aco_opcode::s_sext_i32_i8, Definition(dst), Operand(src));
2390 } else {
2391 assert(src.regClass() == v1b);
2392 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
2393 sdwa->operands[0] = Operand(src);
2394 sdwa->definitions[0] = Definition(dst);
2395 sdwa->sel[0] = sdwa_sbyte;
2396 sdwa->dst_sel = sdwa_sword;
2397 ctx->block->instructions.emplace_back(std::move(sdwa));
2398 }
2399 } else {
2400 Temp src = get_alu_src(ctx, instr->src[0]);
2401 /* we can actually just say dst = src */
2402 if (src.regClass() == s1)
2403 bld.copy(Definition(dst), src);
2404 else
2405 emit_extract_vector(ctx, src, 0, dst);
2406 }
2407 break;
2408 }
2409 case nir_op_u2u16: {
2410 Temp src = get_alu_src(ctx, instr->src[0]);
2411 if (instr->src[0].src.ssa->bit_size == 8) {
2412 if (dst.regClass() == s1)
2413 bld.sop2(aco_opcode::s_and_b32, Definition(dst), bld.def(s1, scc), Operand(0xFFu), src);
2414 else {
2415 assert(src.regClass() == v1b);
2416 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
2417 sdwa->operands[0] = Operand(src);
2418 sdwa->definitions[0] = Definition(dst);
2419 sdwa->sel[0] = sdwa_ubyte;
2420 sdwa->dst_sel = sdwa_uword;
2421 ctx->block->instructions.emplace_back(std::move(sdwa));
2422 }
2423 } else {
2424 Temp src = get_alu_src(ctx, instr->src[0]);
2425 /* we can actually just say dst = src */
2426 if (src.regClass() == s1)
2427 bld.copy(Definition(dst), src);
2428 else
2429 emit_extract_vector(ctx, src, 0, dst);
2430 }
2431 break;
2432 }
2433 case nir_op_i2i32: {
2434 Temp src = get_alu_src(ctx, instr->src[0]);
2435 if (instr->src[0].src.ssa->bit_size == 8) {
2436 if (dst.regClass() == s1) {
2437 bld.sop1(aco_opcode::s_sext_i32_i8, Definition(dst), Operand(src));
2438 } else {
2439 assert(src.regClass() == v1b);
2440 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
2441 sdwa->operands[0] = Operand(src);
2442 sdwa->definitions[0] = Definition(dst);
2443 sdwa->sel[0] = sdwa_sbyte;
2444 sdwa->dst_sel = sdwa_sdword;
2445 ctx->block->instructions.emplace_back(std::move(sdwa));
2446 }
2447 } else if (instr->src[0].src.ssa->bit_size == 16) {
2448 if (dst.regClass() == s1) {
2449 bld.sop1(aco_opcode::s_sext_i32_i16, Definition(dst), Operand(src));
2450 } else {
2451 assert(src.regClass() == v2b);
2452 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
2453 sdwa->operands[0] = Operand(src);
2454 sdwa->definitions[0] = Definition(dst);
2455 sdwa->sel[0] = sdwa_sword;
2456 sdwa->dst_sel = sdwa_udword;
2457 ctx->block->instructions.emplace_back(std::move(sdwa));
2458 }
2459 } else if (instr->src[0].src.ssa->bit_size == 64) {
2460 /* we can actually just say dst = src, as it would map the lower register */
2461 emit_extract_vector(ctx, src, 0, dst);
2462 } else {
2463 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2464 nir_print_instr(&instr->instr, stderr);
2465 fprintf(stderr, "\n");
2466 }
2467 break;
2468 }
2469 case nir_op_u2u32: {
2470 Temp src = get_alu_src(ctx, instr->src[0]);
2471 if (instr->src[0].src.ssa->bit_size == 8) {
2472 if (dst.regClass() == s1)
2473 bld.sop2(aco_opcode::s_and_b32, Definition(dst), bld.def(s1, scc), Operand(0xFFu), src);
2474 else {
2475 assert(src.regClass() == v1b);
2476 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
2477 sdwa->operands[0] = Operand(src);
2478 sdwa->definitions[0] = Definition(dst);
2479 sdwa->sel[0] = sdwa_ubyte;
2480 sdwa->dst_sel = sdwa_udword;
2481 ctx->block->instructions.emplace_back(std::move(sdwa));
2482 }
2483 } else if (instr->src[0].src.ssa->bit_size == 16) {
2484 if (dst.regClass() == s1) {
2485 bld.sop2(aco_opcode::s_and_b32, Definition(dst), bld.def(s1, scc), Operand(0xFFFFu), src);
2486 } else {
2487 assert(src.regClass() == v2b);
2488 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
2489 sdwa->operands[0] = Operand(src);
2490 sdwa->definitions[0] = Definition(dst);
2491 sdwa->sel[0] = sdwa_uword;
2492 sdwa->dst_sel = sdwa_udword;
2493 ctx->block->instructions.emplace_back(std::move(sdwa));
2494 }
2495 } else if (instr->src[0].src.ssa->bit_size == 64) {
2496 /* we can actually just say dst = src, as it would map the lower register */
2497 emit_extract_vector(ctx, src, 0, dst);
2498 } else {
2499 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2500 nir_print_instr(&instr->instr, stderr);
2501 fprintf(stderr, "\n");
2502 }
2503 break;
2504 }
2505 case nir_op_i2i64: {
2506 Temp src = get_alu_src(ctx, instr->src[0]);
2507 if (src.regClass() == s1) {
2508 Temp high = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u));
2509 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high);
2510 } else if (src.regClass() == v1) {
2511 Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
2512 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high);
2513 } else {
2514 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2515 nir_print_instr(&instr->instr, stderr);
2516 fprintf(stderr, "\n");
2517 }
2518 break;
2519 }
2520 case nir_op_u2u64: {
2521 Temp src = get_alu_src(ctx, instr->src[0]);
2522 if (instr->src[0].src.ssa->bit_size == 32) {
2523 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, Operand(0u));
2524 } else {
2525 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2526 nir_print_instr(&instr->instr, stderr);
2527 fprintf(stderr, "\n");
2528 }
2529 break;
2530 }
2531 case nir_op_b2b32:
2532 case nir_op_b2i32: {
2533 Temp src = get_alu_src(ctx, instr->src[0]);
2534 assert(src.regClass() == bld.lm);
2535
2536 if (dst.regClass() == s1) {
2537 // TODO: in a post-RA optimization, we can check if src is in VCC, and directly use VCCNZ
2538 bool_to_scalar_condition(ctx, src, dst);
2539 } else if (dst.regClass() == v1) {
2540 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), src);
2541 } else {
2542 unreachable("Invalid register class for b2i32");
2543 }
2544 break;
2545 }
2546 case nir_op_b2b1:
2547 case nir_op_i2b1: {
2548 Temp src = get_alu_src(ctx, instr->src[0]);
2549 assert(dst.regClass() == bld.lm);
2550
2551 if (src.type() == RegType::vgpr) {
2552 assert(src.regClass() == v1 || src.regClass() == v2);
2553 assert(dst.regClass() == bld.lm);
2554 bld.vopc(src.size() == 2 ? aco_opcode::v_cmp_lg_u64 : aco_opcode::v_cmp_lg_u32,
2555 Definition(dst), Operand(0u), src).def(0).setHint(vcc);
2556 } else {
2557 assert(src.regClass() == s1 || src.regClass() == s2);
2558 Temp tmp;
2559 if (src.regClass() == s2 && ctx->program->chip_class <= GFX7) {
2560 tmp = bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), Operand(0u), src).def(1).getTemp();
2561 } else {
2562 tmp = bld.sopc(src.size() == 2 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::s_cmp_lg_u32,
2563 bld.scc(bld.def(s1)), Operand(0u), src);
2564 }
2565 bool_to_vector_condition(ctx, tmp, dst);
2566 }
2567 break;
2568 }
2569 case nir_op_pack_64_2x32_split: {
2570 Temp src0 = get_alu_src(ctx, instr->src[0]);
2571 Temp src1 = get_alu_src(ctx, instr->src[1]);
2572
2573 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
2574 break;
2575 }
2576 case nir_op_unpack_64_2x32_split_x:
2577 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()), get_alu_src(ctx, instr->src[0]));
2578 break;
2579 case nir_op_unpack_64_2x32_split_y:
2580 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst), get_alu_src(ctx, instr->src[0]));
2581 break;
2582 case nir_op_unpack_32_2x16_split_x:
2583 if (dst.type() == RegType::vgpr) {
2584 bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()), get_alu_src(ctx, instr->src[0]));
2585 } else {
2586 bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
2587 }
2588 break;
2589 case nir_op_unpack_32_2x16_split_y:
2590 if (dst.type() == RegType::vgpr) {
2591 bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst), get_alu_src(ctx, instr->src[0]));
2592 } else {
2593 bld.sop2(aco_opcode::s_bfe_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), Operand(uint32_t(16 << 16 | 16)));
2594 }
2595 break;
2596 case nir_op_pack_32_2x16_split: {
2597 Temp src0 = get_alu_src(ctx, instr->src[0]);
2598 Temp src1 = get_alu_src(ctx, instr->src[1]);
2599 if (dst.regClass() == v1) {
2600 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
2601 } else {
2602 src0 = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), src0, Operand(0xFFFFu));
2603 src1 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), src1, Operand(16u));
2604 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), src0, src1);
2605 }
2606 break;
2607 }
2608 case nir_op_pack_half_2x16: {
2609 Temp src = get_alu_src(ctx, instr->src[0], 2);
2610
2611 if (dst.regClass() == v1) {
2612 Temp src0 = bld.tmp(v1);
2613 Temp src1 = bld.tmp(v1);
2614 bld.pseudo(aco_opcode::p_split_vector, Definition(src0), Definition(src1), src);
2615 if (!ctx->block->fp_mode.care_about_round32 || ctx->block->fp_mode.round32 == fp_round_tz)
2616 bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src0, src1);
2617 else
2618 bld.vop3(aco_opcode::v_cvt_pk_u16_u32, Definition(dst),
2619 bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src0),
2620 bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src1));
2621 } else {
2622 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2623 nir_print_instr(&instr->instr, stderr);
2624 fprintf(stderr, "\n");
2625 }
2626 break;
2627 }
2628 case nir_op_unpack_half_2x16_split_x: {
2629 if (dst.regClass() == v1) {
2630 Builder bld(ctx->program, ctx->block);
2631 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst), get_alu_src(ctx, instr->src[0]));
2632 } else {
2633 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2634 nir_print_instr(&instr->instr, stderr);
2635 fprintf(stderr, "\n");
2636 }
2637 break;
2638 }
2639 case nir_op_unpack_half_2x16_split_y: {
2640 if (dst.regClass() == v1) {
2641 Builder bld(ctx->program, ctx->block);
2642 /* TODO: use SDWA here */
2643 bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst),
2644 bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), Operand(16u), as_vgpr(ctx, get_alu_src(ctx, instr->src[0]))));
2645 } else {
2646 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2647 nir_print_instr(&instr->instr, stderr);
2648 fprintf(stderr, "\n");
2649 }
2650 break;
2651 }
2652 case nir_op_fquantize2f16: {
2653 Temp src = get_alu_src(ctx, instr->src[0]);
2654 Temp f16 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), src);
2655 Temp f32, cmp_res;
2656
2657 if (ctx->program->chip_class >= GFX8) {
2658 Temp mask = bld.copy(bld.def(s1), Operand(0x36Fu)); /* value is NOT negative/positive denormal value */
2659 cmp_res = bld.vopc_e64(aco_opcode::v_cmp_class_f16, bld.hint_vcc(bld.def(bld.lm)), f16, mask);
2660 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
2661 } else {
2662 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
2663 * so compare the result and flush to 0 if it's smaller.
2664 */
2665 f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
2666 Temp smallest = bld.copy(bld.def(s1), Operand(0x38800000u));
2667 Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), f32, smallest);
2668 static_cast<VOP3A_instruction*>(vop3)->abs[0] = true;
2669 cmp_res = vop3->definitions[0].getTemp();
2670 }
2671
2672 if (ctx->block->fp_mode.preserve_signed_zero_inf_nan32 || ctx->program->chip_class < GFX8) {
2673 Temp copysign_0 = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0u), as_vgpr(ctx, src));
2674 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), copysign_0, f32, cmp_res);
2675 } else {
2676 bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), f32, cmp_res);
2677 }
2678 break;
2679 }
2680 case nir_op_bfm: {
2681 Temp bits = get_alu_src(ctx, instr->src[0]);
2682 Temp offset = get_alu_src(ctx, instr->src[1]);
2683
2684 if (dst.regClass() == s1) {
2685 bld.sop2(aco_opcode::s_bfm_b32, Definition(dst), bits, offset);
2686 } else if (dst.regClass() == v1) {
2687 bld.vop3(aco_opcode::v_bfm_b32, Definition(dst), bits, offset);
2688 } else {
2689 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2690 nir_print_instr(&instr->instr, stderr);
2691 fprintf(stderr, "\n");
2692 }
2693 break;
2694 }
2695 case nir_op_bitfield_select: {
2696 /* (mask & insert) | (~mask & base) */
2697 Temp bitmask = get_alu_src(ctx, instr->src[0]);
2698 Temp insert = get_alu_src(ctx, instr->src[1]);
2699 Temp base = get_alu_src(ctx, instr->src[2]);
2700
2701 /* dst = (insert & bitmask) | (base & ~bitmask) */
2702 if (dst.regClass() == s1) {
2703 aco_ptr<Instruction> sop2;
2704 nir_const_value* const_bitmask = nir_src_as_const_value(instr->src[0].src);
2705 nir_const_value* const_insert = nir_src_as_const_value(instr->src[1].src);
2706 Operand lhs;
2707 if (const_insert && const_bitmask) {
2708 lhs = Operand(const_insert->u32 & const_bitmask->u32);
2709 } else {
2710 insert = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), insert, bitmask);
2711 lhs = Operand(insert);
2712 }
2713
2714 Operand rhs;
2715 nir_const_value* const_base = nir_src_as_const_value(instr->src[2].src);
2716 if (const_base && const_bitmask) {
2717 rhs = Operand(const_base->u32 & ~const_bitmask->u32);
2718 } else {
2719 base = bld.sop2(aco_opcode::s_andn2_b32, bld.def(s1), bld.def(s1, scc), base, bitmask);
2720 rhs = Operand(base);
2721 }
2722
2723 bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), rhs, lhs);
2724
2725 } else if (dst.regClass() == v1) {
2726 if (base.type() == RegType::sgpr && (bitmask.type() == RegType::sgpr || (insert.type() == RegType::sgpr)))
2727 base = as_vgpr(ctx, base);
2728 if (insert.type() == RegType::sgpr && bitmask.type() == RegType::sgpr)
2729 insert = as_vgpr(ctx, insert);
2730
2731 bld.vop3(aco_opcode::v_bfi_b32, Definition(dst), bitmask, insert, base);
2732
2733 } else {
2734 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2735 nir_print_instr(&instr->instr, stderr);
2736 fprintf(stderr, "\n");
2737 }
2738 break;
2739 }
2740 case nir_op_ubfe:
2741 case nir_op_ibfe: {
2742 Temp base = get_alu_src(ctx, instr->src[0]);
2743 Temp offset = get_alu_src(ctx, instr->src[1]);
2744 Temp bits = get_alu_src(ctx, instr->src[2]);
2745
2746 if (dst.type() == RegType::sgpr) {
2747 Operand extract;
2748 nir_const_value* const_offset = nir_src_as_const_value(instr->src[1].src);
2749 nir_const_value* const_bits = nir_src_as_const_value(instr->src[2].src);
2750 if (const_offset && const_bits) {
2751 uint32_t const_extract = (const_bits->u32 << 16) | const_offset->u32;
2752 extract = Operand(const_extract);
2753 } else {
2754 Operand width;
2755 if (const_bits) {
2756 width = Operand(const_bits->u32 << 16);
2757 } else {
2758 width = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), bits, Operand(16u));
2759 }
2760 extract = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), offset, width);
2761 }
2762
2763 aco_opcode opcode;
2764 if (dst.regClass() == s1) {
2765 if (instr->op == nir_op_ubfe)
2766 opcode = aco_opcode::s_bfe_u32;
2767 else
2768 opcode = aco_opcode::s_bfe_i32;
2769 } else if (dst.regClass() == s2) {
2770 if (instr->op == nir_op_ubfe)
2771 opcode = aco_opcode::s_bfe_u64;
2772 else
2773 opcode = aco_opcode::s_bfe_i64;
2774 } else {
2775 unreachable("Unsupported BFE bit size");
2776 }
2777
2778 bld.sop2(opcode, Definition(dst), bld.def(s1, scc), base, extract);
2779
2780 } else {
2781 aco_opcode opcode;
2782 if (dst.regClass() == v1) {
2783 if (instr->op == nir_op_ubfe)
2784 opcode = aco_opcode::v_bfe_u32;
2785 else
2786 opcode = aco_opcode::v_bfe_i32;
2787 } else {
2788 unreachable("Unsupported BFE bit size");
2789 }
2790
2791 emit_vop3a_instruction(ctx, instr, opcode, dst);
2792 }
2793 break;
2794 }
2795 case nir_op_bit_count: {
2796 Temp src = get_alu_src(ctx, instr->src[0]);
2797 if (src.regClass() == s1) {
2798 bld.sop1(aco_opcode::s_bcnt1_i32_b32, Definition(dst), bld.def(s1, scc), src);
2799 } else if (src.regClass() == v1) {
2800 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst), src, Operand(0u));
2801 } else if (src.regClass() == v2) {
2802 bld.vop3(aco_opcode::v_bcnt_u32_b32, Definition(dst),
2803 emit_extract_vector(ctx, src, 1, v1),
2804 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1),
2805 emit_extract_vector(ctx, src, 0, v1), Operand(0u)));
2806 } else if (src.regClass() == s2) {
2807 bld.sop1(aco_opcode::s_bcnt1_i32_b64, Definition(dst), bld.def(s1, scc), src);
2808 } else {
2809 fprintf(stderr, "Unimplemented NIR instr bit size: ");
2810 nir_print_instr(&instr->instr, stderr);
2811 fprintf(stderr, "\n");
2812 }
2813 break;
2814 }
2815 case nir_op_flt: {
2816 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f32, aco_opcode::v_cmp_lt_f64);
2817 break;
2818 }
2819 case nir_op_fge: {
2820 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f32, aco_opcode::v_cmp_ge_f64);
2821 break;
2822 }
2823 case nir_op_feq: {
2824 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f32, aco_opcode::v_cmp_eq_f64);
2825 break;
2826 }
2827 case nir_op_fne: {
2828 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f32, aco_opcode::v_cmp_neq_f64);
2829 break;
2830 }
2831 case nir_op_ilt: {
2832 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i32, aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32);
2833 break;
2834 }
2835 case nir_op_ige: {
2836 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i32, aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32);
2837 break;
2838 }
2839 case nir_op_ieq: {
2840 if (instr->src[0].src.ssa->bit_size == 1)
2841 emit_boolean_logic(ctx, instr, Builder::s_xnor, dst);
2842 else
2843 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32,
2844 ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_eq_u64 : aco_opcode::num_opcodes);
2845 break;
2846 }
2847 case nir_op_ine: {
2848 if (instr->src[0].src.ssa->bit_size == 1)
2849 emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
2850 else
2851 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32,
2852 ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::num_opcodes);
2853 break;
2854 }
2855 case nir_op_ult: {
2856 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u32, aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32);
2857 break;
2858 }
2859 case nir_op_uge: {
2860 emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u32, aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32);
2861 break;
2862 }
2863 case nir_op_fddx:
2864 case nir_op_fddy:
2865 case nir_op_fddx_fine:
2866 case nir_op_fddy_fine:
2867 case nir_op_fddx_coarse:
2868 case nir_op_fddy_coarse: {
2869 Temp src = get_alu_src(ctx, instr->src[0]);
2870 uint16_t dpp_ctrl1, dpp_ctrl2;
2871 if (instr->op == nir_op_fddx_fine) {
2872 dpp_ctrl1 = dpp_quad_perm(0, 0, 2, 2);
2873 dpp_ctrl2 = dpp_quad_perm(1, 1, 3, 3);
2874 } else if (instr->op == nir_op_fddy_fine) {
2875 dpp_ctrl1 = dpp_quad_perm(0, 1, 0, 1);
2876 dpp_ctrl2 = dpp_quad_perm(2, 3, 2, 3);
2877 } else {
2878 dpp_ctrl1 = dpp_quad_perm(0, 0, 0, 0);
2879 if (instr->op == nir_op_fddx || instr->op == nir_op_fddx_coarse)
2880 dpp_ctrl2 = dpp_quad_perm(1, 1, 1, 1);
2881 else
2882 dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
2883 }
2884
2885 Temp tmp;
2886 if (ctx->program->chip_class >= GFX8) {
2887 Temp tl = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl1);
2888 tmp = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), src, tl, dpp_ctrl2);
2889 } else {
2890 Temp tl = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl1);
2891 Temp tr = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl2);
2892 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), tr, tl);
2893 }
2894 emit_wqm(ctx, tmp, dst, true);
2895 break;
2896 }
2897 default:
2898 fprintf(stderr, "Unknown NIR ALU instr: ");
2899 nir_print_instr(&instr->instr, stderr);
2900 fprintf(stderr, "\n");
2901 }
2902 }
2903
2904 void visit_load_const(isel_context *ctx, nir_load_const_instr *instr)
2905 {
2906 Temp dst = get_ssa_temp(ctx, &instr->def);
2907
2908 // TODO: we really want to have the resulting type as this would allow for 64bit literals
2909 // which get truncated the lsb if double and msb if int
2910 // for now, we only use s_mov_b64 with 64bit inline constants
2911 assert(instr->def.num_components == 1 && "Vector load_const should be lowered to scalar.");
2912 assert(dst.type() == RegType::sgpr);
2913
2914 Builder bld(ctx->program, ctx->block);
2915
2916 if (instr->def.bit_size == 1) {
2917 assert(dst.regClass() == bld.lm);
2918 int val = instr->value[0].b ? -1 : 0;
2919 Operand op = bld.lm.size() == 1 ? Operand((uint32_t) val) : Operand((uint64_t) val);
2920 bld.sop1(Builder::s_mov, Definition(dst), op);
2921 } else if (dst.size() == 1) {
2922 bld.copy(Definition(dst), Operand(instr->value[0].u32));
2923 } else {
2924 assert(dst.size() != 1);
2925 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
2926 if (instr->def.bit_size == 64)
2927 for (unsigned i = 0; i < dst.size(); i++)
2928 vec->operands[i] = Operand{(uint32_t)(instr->value[0].u64 >> i * 32)};
2929 else {
2930 for (unsigned i = 0; i < dst.size(); i++)
2931 vec->operands[i] = Operand{instr->value[i].u32};
2932 }
2933 vec->definitions[0] = Definition(dst);
2934 ctx->block->instructions.emplace_back(std::move(vec));
2935 }
2936 }
2937
2938 uint32_t widen_mask(uint32_t mask, unsigned multiplier)
2939 {
2940 uint32_t new_mask = 0;
2941 for(unsigned i = 0; i < 32 && (1u << i) <= mask; ++i)
2942 if (mask & (1u << i))
2943 new_mask |= ((1u << multiplier) - 1u) << (i * multiplier);
2944 return new_mask;
2945 }
2946
2947 Operand load_lds_size_m0(isel_context *ctx)
2948 {
2949 /* TODO: m0 does not need to be initialized on GFX9+ */
2950 Builder bld(ctx->program, ctx->block);
2951 return bld.m0((Temp)bld.sopk(aco_opcode::s_movk_i32, bld.def(s1, m0), 0xffff));
2952 }
2953
2954 Temp load_lds(isel_context *ctx, unsigned elem_size_bytes, Temp dst,
2955 Temp address, unsigned base_offset, unsigned align)
2956 {
2957 assert(util_is_power_of_two_nonzero(align) && align >= 4);
2958
2959 Builder bld(ctx->program, ctx->block);
2960
2961 Operand m = load_lds_size_m0(ctx);
2962
2963 unsigned num_components = dst.size() * 4u / elem_size_bytes;
2964 unsigned bytes_read = 0;
2965 unsigned result_size = 0;
2966 unsigned total_bytes = num_components * elem_size_bytes;
2967 std::array<Temp, NIR_MAX_VEC_COMPONENTS> result;
2968 bool large_ds_read = ctx->options->chip_class >= GFX7;
2969 bool usable_read2 = ctx->options->chip_class >= GFX7;
2970
2971 while (bytes_read < total_bytes) {
2972 unsigned todo = total_bytes - bytes_read;
2973 bool aligned8 = bytes_read % 8 == 0 && align % 8 == 0;
2974 bool aligned16 = bytes_read % 16 == 0 && align % 16 == 0;
2975
2976 aco_opcode op = aco_opcode::last_opcode;
2977 bool read2 = false;
2978 if (todo >= 16 && aligned16 && large_ds_read) {
2979 op = aco_opcode::ds_read_b128;
2980 todo = 16;
2981 } else if (todo >= 16 && aligned8 && usable_read2) {
2982 op = aco_opcode::ds_read2_b64;
2983 read2 = true;
2984 todo = 16;
2985 } else if (todo >= 12 && aligned16 && large_ds_read) {
2986 op = aco_opcode::ds_read_b96;
2987 todo = 12;
2988 } else if (todo >= 8 && aligned8) {
2989 op = aco_opcode::ds_read_b64;
2990 todo = 8;
2991 } else if (todo >= 8 && usable_read2) {
2992 op = aco_opcode::ds_read2_b32;
2993 read2 = true;
2994 todo = 8;
2995 } else if (todo >= 4) {
2996 op = aco_opcode::ds_read_b32;
2997 todo = 4;
2998 } else {
2999 assert(false);
3000 }
3001 assert(todo % elem_size_bytes == 0);
3002 unsigned num_elements = todo / elem_size_bytes;
3003 unsigned offset = base_offset + bytes_read;
3004 unsigned max_offset = read2 ? 1019 : 65535;
3005
3006 Temp address_offset = address;
3007 if (offset > max_offset) {
3008 address_offset = bld.vadd32(bld.def(v1), Operand(base_offset), address_offset);
3009 offset = bytes_read;
3010 }
3011 assert(offset <= max_offset); /* bytes_read shouldn't be large enough for this to happen */
3012
3013 Temp res;
3014 if (num_components == 1 && dst.type() == RegType::vgpr)
3015 res = dst;
3016 else
3017 res = bld.tmp(RegClass(RegType::vgpr, todo / 4));
3018
3019 if (read2)
3020 res = bld.ds(op, Definition(res), address_offset, m, offset / (todo / 2), (offset / (todo / 2)) + 1);
3021 else
3022 res = bld.ds(op, Definition(res), address_offset, m, offset);
3023
3024 if (num_components == 1) {
3025 assert(todo == total_bytes);
3026 if (dst.type() == RegType::sgpr)
3027 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), res);
3028 return dst;
3029 }
3030
3031 if (dst.type() == RegType::sgpr) {
3032 Temp new_res = bld.tmp(RegType::sgpr, res.size());
3033 expand_vector(ctx, res, new_res, res.size(), (1 << res.size()) - 1);
3034 res = new_res;
3035 }
3036
3037 if (num_elements == 1) {
3038 result[result_size++] = res;
3039 } else {
3040 assert(res != dst && res.size() % num_elements == 0);
3041 aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, num_elements)};
3042 split->operands[0] = Operand(res);
3043 for (unsigned i = 0; i < num_elements; i++)
3044 split->definitions[i] = Definition(result[result_size++] = bld.tmp(res.type(), elem_size_bytes / 4));
3045 ctx->block->instructions.emplace_back(std::move(split));
3046 }
3047
3048 bytes_read += todo;
3049 }
3050
3051 assert(result_size == num_components && result_size > 1);
3052 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, result_size, 1)};
3053 for (unsigned i = 0; i < result_size; i++)
3054 vec->operands[i] = Operand(result[i]);
3055 vec->definitions[0] = Definition(dst);
3056 ctx->block->instructions.emplace_back(std::move(vec));
3057 ctx->allocated_vec.emplace(dst.id(), result);
3058
3059 return dst;
3060 }
3061
3062 Temp extract_subvector(isel_context *ctx, Temp data, unsigned start, unsigned size, RegType type)
3063 {
3064 if (start == 0 && size == data.size())
3065 return type == RegType::vgpr ? as_vgpr(ctx, data) : data;
3066
3067 unsigned size_hint = 1;
3068 auto it = ctx->allocated_vec.find(data.id());
3069 if (it != ctx->allocated_vec.end())
3070 size_hint = it->second[0].size();
3071 if (size % size_hint || start % size_hint)
3072 size_hint = 1;
3073
3074 start /= size_hint;
3075 size /= size_hint;
3076
3077 Temp elems[size];
3078 for (unsigned i = 0; i < size; i++)
3079 elems[i] = emit_extract_vector(ctx, data, start + i, RegClass(type, size_hint));
3080
3081 if (size == 1)
3082 return type == RegType::vgpr ? as_vgpr(ctx, elems[0]) : elems[0];
3083
3084 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, size, 1)};
3085 for (unsigned i = 0; i < size; i++)
3086 vec->operands[i] = Operand(elems[i]);
3087 Temp res = {ctx->program->allocateId(), RegClass(type, size * size_hint)};
3088 vec->definitions[0] = Definition(res);
3089 ctx->block->instructions.emplace_back(std::move(vec));
3090 return res;
3091 }
3092
3093 void ds_write_helper(isel_context *ctx, Operand m, Temp address, Temp data, unsigned data_start, unsigned total_size, unsigned offset0, unsigned offset1, unsigned align)
3094 {
3095 Builder bld(ctx->program, ctx->block);
3096 unsigned bytes_written = 0;
3097 bool large_ds_write = ctx->options->chip_class >= GFX7;
3098 bool usable_write2 = ctx->options->chip_class >= GFX7;
3099
3100 while (bytes_written < total_size * 4) {
3101 unsigned todo = total_size * 4 - bytes_written;
3102 bool aligned8 = bytes_written % 8 == 0 && align % 8 == 0;
3103 bool aligned16 = bytes_written % 16 == 0 && align % 16 == 0;
3104
3105 aco_opcode op = aco_opcode::last_opcode;
3106 bool write2 = false;
3107 unsigned size = 0;
3108 if (todo >= 16 && aligned16 && large_ds_write) {
3109 op = aco_opcode::ds_write_b128;
3110 size = 4;
3111 } else if (todo >= 16 && aligned8 && usable_write2) {
3112 op = aco_opcode::ds_write2_b64;
3113 write2 = true;
3114 size = 4;
3115 } else if (todo >= 12 && aligned16 && large_ds_write) {
3116 op = aco_opcode::ds_write_b96;
3117 size = 3;
3118 } else if (todo >= 8 && aligned8) {
3119 op = aco_opcode::ds_write_b64;
3120 size = 2;
3121 } else if (todo >= 8 && usable_write2) {
3122 op = aco_opcode::ds_write2_b32;
3123 write2 = true;
3124 size = 2;
3125 } else if (todo >= 4) {
3126 op = aco_opcode::ds_write_b32;
3127 size = 1;
3128 } else {
3129 assert(false);
3130 }
3131
3132 unsigned offset = offset0 + offset1 + bytes_written;
3133 unsigned max_offset = write2 ? 1020 : 65535;
3134 Temp address_offset = address;
3135 if (offset > max_offset) {
3136 address_offset = bld.vadd32(bld.def(v1), Operand(offset0), address_offset);
3137 offset = offset1 + bytes_written;
3138 }
3139 assert(offset <= max_offset); /* offset1 shouldn't be large enough for this to happen */
3140
3141 if (write2) {
3142 Temp val0 = extract_subvector(ctx, data, data_start + (bytes_written >> 2), size / 2, RegType::vgpr);
3143 Temp val1 = extract_subvector(ctx, data, data_start + (bytes_written >> 2) + 1, size / 2, RegType::vgpr);
3144 bld.ds(op, address_offset, val0, val1, m, offset / size / 2, (offset / size / 2) + 1);
3145 } else {
3146 Temp val = extract_subvector(ctx, data, data_start + (bytes_written >> 2), size, RegType::vgpr);
3147 bld.ds(op, address_offset, val, m, offset);
3148 }
3149
3150 bytes_written += size * 4;
3151 }
3152 }
3153
3154 void store_lds(isel_context *ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask,
3155 Temp address, unsigned base_offset, unsigned align)
3156 {
3157 assert(util_is_power_of_two_nonzero(align) && align >= 4);
3158 assert(elem_size_bytes == 4 || elem_size_bytes == 8);
3159
3160 Operand m = load_lds_size_m0(ctx);
3161
3162 /* we need at most two stores, assuming that the writemask is at most 4 bits wide */
3163 assert(wrmask <= 0x0f);
3164 int start[2], count[2];
3165 u_bit_scan_consecutive_range(&wrmask, &start[0], &count[0]);
3166 u_bit_scan_consecutive_range(&wrmask, &start[1], &count[1]);
3167 assert(wrmask == 0);
3168
3169 /* one combined store is sufficient */
3170 if (count[0] == count[1] && (align % elem_size_bytes) == 0 && (base_offset % elem_size_bytes) == 0) {
3171 Builder bld(ctx->program, ctx->block);
3172
3173 Temp address_offset = address;
3174 if ((base_offset / elem_size_bytes) + start[1] > 255) {
3175 address_offset = bld.vadd32(bld.def(v1), Operand(base_offset), address_offset);
3176 base_offset = 0;
3177 }
3178
3179 assert(count[0] == 1);
3180 RegClass xtract_rc(RegType::vgpr, elem_size_bytes / 4);
3181
3182 Temp val0 = emit_extract_vector(ctx, data, start[0], xtract_rc);
3183 Temp val1 = emit_extract_vector(ctx, data, start[1], xtract_rc);
3184 aco_opcode op = elem_size_bytes == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64;
3185 base_offset = base_offset / elem_size_bytes;
3186 bld.ds(op, address_offset, val0, val1, m,
3187 base_offset + start[0], base_offset + start[1]);
3188 return;
3189 }
3190
3191 for (unsigned i = 0; i < 2; i++) {
3192 if (count[i] == 0)
3193 continue;
3194
3195 unsigned elem_size_words = elem_size_bytes / 4;
3196 ds_write_helper(ctx, m, address, data, start[i] * elem_size_words, count[i] * elem_size_words,
3197 base_offset, start[i] * elem_size_bytes, align);
3198 }
3199 return;
3200 }
3201
3202 unsigned calculate_lds_alignment(isel_context *ctx, unsigned const_offset)
3203 {
3204 unsigned align = 16;
3205 if (const_offset)
3206 align = std::min(align, 1u << (ffs(const_offset) - 1));
3207
3208 return align;
3209 }
3210
3211
3212 Temp create_vec_from_array(isel_context *ctx, Temp arr[], unsigned cnt, RegType reg_type, unsigned elem_size_bytes,
3213 unsigned split_cnt = 0u, Temp dst = Temp())
3214 {
3215 Builder bld(ctx->program, ctx->block);
3216 unsigned dword_size = elem_size_bytes / 4;
3217
3218 if (!dst.id())
3219 dst = bld.tmp(RegClass(reg_type, cnt * dword_size));
3220
3221 std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
3222 aco_ptr<Pseudo_instruction> instr {create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, cnt, 1)};
3223 instr->definitions[0] = Definition(dst);
3224
3225 for (unsigned i = 0; i < cnt; ++i) {
3226 if (arr[i].id()) {
3227 assert(arr[i].size() == dword_size);
3228 allocated_vec[i] = arr[i];
3229 instr->operands[i] = Operand(arr[i]);
3230 } else {
3231 Temp zero = bld.copy(bld.def(RegClass(reg_type, dword_size)), Operand(0u, dword_size == 2));
3232 allocated_vec[i] = zero;
3233 instr->operands[i] = Operand(zero);
3234 }
3235 }
3236
3237 bld.insert(std::move(instr));
3238
3239 if (split_cnt)
3240 emit_split_vector(ctx, dst, split_cnt);
3241 else
3242 ctx->allocated_vec.emplace(dst.id(), allocated_vec); /* emit_split_vector already does this */
3243
3244 return dst;
3245 }
3246
3247 inline unsigned resolve_excess_vmem_const_offset(Builder &bld, Temp &voffset, unsigned const_offset)
3248 {
3249 if (const_offset >= 4096) {
3250 unsigned excess_const_offset = const_offset / 4096u * 4096u;
3251 const_offset %= 4096u;
3252
3253 if (!voffset.id())
3254 voffset = bld.copy(bld.def(v1), Operand(excess_const_offset));
3255 else if (unlikely(voffset.regClass() == s1))
3256 voffset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), Operand(excess_const_offset), Operand(voffset));
3257 else if (likely(voffset.regClass() == v1))
3258 voffset = bld.vadd32(bld.def(v1), Operand(voffset), Operand(excess_const_offset));
3259 else
3260 unreachable("Unsupported register class of voffset");
3261 }
3262
3263 return const_offset;
3264 }
3265
3266 void emit_single_mubuf_store(isel_context *ctx, Temp descriptor, Temp voffset, Temp soffset, Temp vdata,
3267 unsigned const_offset = 0u, bool allow_reorder = true, bool slc = false)
3268 {
3269 assert(vdata.id());
3270 assert(vdata.size() != 3 || ctx->program->chip_class != GFX6);
3271 assert(vdata.size() >= 1 && vdata.size() <= 4);
3272
3273 Builder bld(ctx->program, ctx->block);
3274 aco_opcode op = (aco_opcode) ((unsigned) aco_opcode::buffer_store_dword + vdata.size() - 1);
3275 const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset);
3276
3277 Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
3278 Operand soffset_op = soffset.id() ? Operand(soffset) : Operand(0u);
3279 Builder::Result r = bld.mubuf(op, Operand(descriptor), voffset_op, soffset_op, Operand(vdata), const_offset,
3280 /* offen */ !voffset_op.isUndefined(), /* idxen*/ false, /* addr64 */ false,
3281 /* disable_wqm */ false, /* glc */ true, /* dlc*/ false, /* slc */ slc);
3282
3283 static_cast<MUBUF_instruction *>(r.instr)->can_reorder = allow_reorder;
3284 }
3285
3286 void store_vmem_mubuf(isel_context *ctx, Temp src, Temp descriptor, Temp voffset, Temp soffset,
3287 unsigned base_const_offset, unsigned elem_size_bytes, unsigned write_mask,
3288 bool allow_combining = true, bool reorder = true, bool slc = false)
3289 {
3290 Builder bld(ctx->program, ctx->block);
3291 assert(elem_size_bytes == 4 || elem_size_bytes == 8);
3292 assert(write_mask);
3293
3294 if (elem_size_bytes == 8) {
3295 elem_size_bytes = 4;
3296 write_mask = widen_mask(write_mask, 2);
3297 }
3298
3299 while (write_mask) {
3300 int start = 0;
3301 int count = 0;
3302 u_bit_scan_consecutive_range(&write_mask, &start, &count);
3303 assert(count > 0);
3304 assert(start >= 0);
3305
3306 while (count > 0) {
3307 unsigned sub_count = allow_combining ? MIN2(count, 4) : 1;
3308 unsigned const_offset = (unsigned) start * elem_size_bytes + base_const_offset;
3309
3310 /* GFX6 doesn't have buffer_store_dwordx3, so make sure not to emit that here either. */
3311 if (unlikely(ctx->program->chip_class == GFX6 && sub_count == 3))
3312 sub_count = 2;
3313
3314 Temp elem = extract_subvector(ctx, src, start, sub_count, RegType::vgpr);
3315 emit_single_mubuf_store(ctx, descriptor, voffset, soffset, elem, const_offset, reorder, slc);
3316
3317 count -= sub_count;
3318 start += sub_count;
3319 }
3320
3321 assert(count == 0);
3322 }
3323 }
3324
3325 Temp emit_single_mubuf_load(isel_context *ctx, Temp descriptor, Temp voffset, Temp soffset,
3326 unsigned const_offset, unsigned size_dwords, bool allow_reorder = true)
3327 {
3328 assert(size_dwords != 3 || ctx->program->chip_class != GFX6);
3329 assert(size_dwords >= 1 && size_dwords <= 4);
3330
3331 Builder bld(ctx->program, ctx->block);
3332 Temp vdata = bld.tmp(RegClass(RegType::vgpr, size_dwords));
3333 aco_opcode op = (aco_opcode) ((unsigned) aco_opcode::buffer_load_dword + size_dwords - 1);
3334 const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset);
3335
3336 Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
3337 Operand soffset_op = soffset.id() ? Operand(soffset) : Operand(0u);
3338 Builder::Result r = bld.mubuf(op, Definition(vdata), Operand(descriptor), voffset_op, soffset_op, const_offset,
3339 /* offen */ !voffset_op.isUndefined(), /* idxen*/ false, /* addr64 */ false,
3340 /* disable_wqm */ false, /* glc */ true,
3341 /* dlc*/ ctx->program->chip_class >= GFX10, /* slc */ false);
3342
3343 static_cast<MUBUF_instruction *>(r.instr)->can_reorder = allow_reorder;
3344
3345 return vdata;
3346 }
3347
3348 void load_vmem_mubuf(isel_context *ctx, Temp dst, Temp descriptor, Temp voffset, Temp soffset,
3349 unsigned base_const_offset, unsigned elem_size_bytes, unsigned num_components,
3350 unsigned stride = 0u, bool allow_combining = true, bool allow_reorder = true)
3351 {
3352 assert(elem_size_bytes == 4 || elem_size_bytes == 8);
3353 assert((num_components * elem_size_bytes / 4) == dst.size());
3354 assert(!!stride != allow_combining);
3355
3356 Builder bld(ctx->program, ctx->block);
3357 unsigned split_cnt = num_components;
3358
3359 if (elem_size_bytes == 8) {
3360 elem_size_bytes = 4;
3361 num_components *= 2;
3362 }
3363
3364 if (!stride)
3365 stride = elem_size_bytes;
3366
3367 unsigned load_size = 1;
3368 if (allow_combining) {
3369 if ((num_components % 4) == 0)
3370 load_size = 4;
3371 else if ((num_components % 3) == 0 && ctx->program->chip_class != GFX6)
3372 load_size = 3;
3373 else if ((num_components % 2) == 0)
3374 load_size = 2;
3375 }
3376
3377 unsigned num_loads = num_components / load_size;
3378 std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
3379
3380 for (unsigned i = 0; i < num_loads; ++i) {
3381 unsigned const_offset = i * stride * load_size + base_const_offset;
3382 elems[i] = emit_single_mubuf_load(ctx, descriptor, voffset, soffset, const_offset, load_size, allow_reorder);
3383 }
3384
3385 create_vec_from_array(ctx, elems.data(), num_loads, RegType::vgpr, load_size * 4u, split_cnt, dst);
3386 }
3387
3388 std::pair<Temp, unsigned> offset_add_from_nir(isel_context *ctx, const std::pair<Temp, unsigned> &base_offset, nir_src *off_src, unsigned stride = 1u)
3389 {
3390 Builder bld(ctx->program, ctx->block);
3391 Temp offset = base_offset.first;
3392 unsigned const_offset = base_offset.second;
3393
3394 if (!nir_src_is_const(*off_src)) {
3395 Temp indirect_offset_arg = get_ssa_temp(ctx, off_src->ssa);
3396 Temp with_stride;
3397
3398 /* Calculate indirect offset with stride */
3399 if (likely(indirect_offset_arg.regClass() == v1))
3400 with_stride = bld.v_mul_imm(bld.def(v1), indirect_offset_arg, stride);
3401 else if (indirect_offset_arg.regClass() == s1)
3402 with_stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), indirect_offset_arg);
3403 else
3404 unreachable("Unsupported register class of indirect offset");
3405
3406 /* Add to the supplied base offset */
3407 if (offset.id() == 0)
3408 offset = with_stride;
3409 else if (unlikely(offset.regClass() == s1 && with_stride.regClass() == s1))
3410 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), with_stride, offset);
3411 else if (offset.size() == 1 && with_stride.size() == 1)
3412 offset = bld.vadd32(bld.def(v1), with_stride, offset);
3413 else
3414 unreachable("Unsupported register class of indirect offset");
3415 } else {
3416 unsigned const_offset_arg = nir_src_as_uint(*off_src);
3417 const_offset += const_offset_arg * stride;
3418 }
3419
3420 return std::make_pair(offset, const_offset);
3421 }
3422
3423 std::pair<Temp, unsigned> offset_add(isel_context *ctx, const std::pair<Temp, unsigned> &off1, const std::pair<Temp, unsigned> &off2)
3424 {
3425 Builder bld(ctx->program, ctx->block);
3426 Temp offset;
3427
3428 if (off1.first.id() && off2.first.id()) {
3429 if (unlikely(off1.first.regClass() == s1 && off2.first.regClass() == s1))
3430 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), off1.first, off2.first);
3431 else if (off1.first.size() == 1 && off2.first.size() == 1)
3432 offset = bld.vadd32(bld.def(v1), off1.first, off2.first);
3433 else
3434 unreachable("Unsupported register class of indirect offset");
3435 } else {
3436 offset = off1.first.id() ? off1.first : off2.first;
3437 }
3438
3439 return std::make_pair(offset, off1.second + off2.second);
3440 }
3441
3442 std::pair<Temp, unsigned> offset_mul(isel_context *ctx, const std::pair<Temp, unsigned> &offs, unsigned multiplier)
3443 {
3444 Builder bld(ctx->program, ctx->block);
3445 unsigned const_offset = offs.second * multiplier;
3446
3447 if (!offs.first.id())
3448 return std::make_pair(offs.first, const_offset);
3449
3450 Temp offset = unlikely(offs.first.regClass() == s1)
3451 ? bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(multiplier), offs.first)
3452 : bld.v_mul_imm(bld.def(v1), offs.first, multiplier);
3453
3454 return std::make_pair(offset, const_offset);
3455 }
3456
3457 std::pair<Temp, unsigned> get_intrinsic_io_basic_offset(isel_context *ctx, nir_intrinsic_instr *instr, unsigned base_stride, unsigned component_stride)
3458 {
3459 Builder bld(ctx->program, ctx->block);
3460
3461 /* base is the driver_location, which is already multiplied by 4, so is in dwords */
3462 unsigned const_offset = nir_intrinsic_base(instr) * base_stride;
3463 /* component is in bytes */
3464 const_offset += nir_intrinsic_component(instr) * component_stride;
3465
3466 /* offset should be interpreted in relation to the base, so the instruction effectively reads/writes another input/output when it has an offset */
3467 nir_src *off_src = nir_get_io_offset_src(instr);
3468 return offset_add_from_nir(ctx, std::make_pair(Temp(), const_offset), off_src, 4u * base_stride);
3469 }
3470
3471 std::pair<Temp, unsigned> get_intrinsic_io_basic_offset(isel_context *ctx, nir_intrinsic_instr *instr, unsigned stride = 1u)
3472 {
3473 return get_intrinsic_io_basic_offset(ctx, instr, stride, stride);
3474 }
3475
3476 Temp get_tess_rel_patch_id(isel_context *ctx)
3477 {
3478 Builder bld(ctx->program, ctx->block);
3479
3480 switch (ctx->shader->info.stage) {
3481 case MESA_SHADER_TESS_CTRL:
3482 return bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffu),
3483 get_arg(ctx, ctx->args->ac.tcs_rel_ids));
3484 case MESA_SHADER_TESS_EVAL:
3485 return get_arg(ctx, ctx->args->tes_rel_patch_id);
3486 default:
3487 unreachable("Unsupported stage in get_tess_rel_patch_id");
3488 }
3489 }
3490
3491 std::pair<Temp, unsigned> get_tcs_per_vertex_input_lds_offset(isel_context *ctx, nir_intrinsic_instr *instr)
3492 {
3493 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
3494 Builder bld(ctx->program, ctx->block);
3495
3496 uint32_t tcs_in_patch_stride = ctx->args->options->key.tcs.input_vertices * ctx->tcs_num_inputs * 4;
3497 uint32_t tcs_in_vertex_stride = ctx->tcs_num_inputs * 4;
3498
3499 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr);
3500
3501 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
3502 offs = offset_add_from_nir(ctx, offs, vertex_index_src, tcs_in_vertex_stride);
3503
3504 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
3505 Temp tcs_in_current_patch_offset = bld.v_mul24_imm(bld.def(v1), rel_patch_id, tcs_in_patch_stride);
3506 offs = offset_add(ctx, offs, std::make_pair(tcs_in_current_patch_offset, 0));
3507
3508 return offset_mul(ctx, offs, 4u);
3509 }
3510
3511 std::pair<Temp, unsigned> get_tcs_output_lds_offset(isel_context *ctx, nir_intrinsic_instr *instr = nullptr, bool per_vertex = false)
3512 {
3513 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
3514 Builder bld(ctx->program, ctx->block);
3515
3516 uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * ctx->tcs_num_inputs * 16;
3517 uint32_t num_tcs_outputs = util_last_bit64(ctx->args->shader_info->tcs.outputs_written);
3518 uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->args->shader_info->tcs.patch_outputs_written);
3519 uint32_t output_vertex_size = num_tcs_outputs * 16;
3520 uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
3521 uint32_t output_patch_stride = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
3522
3523 std::pair<Temp, unsigned> offs = instr
3524 ? get_intrinsic_io_basic_offset(ctx, instr, 4u)
3525 : std::make_pair(Temp(), 0u);
3526
3527 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
3528 Temp patch_off = bld.v_mul24_imm(bld.def(v1), rel_patch_id, output_patch_stride);
3529
3530 if (per_vertex) {
3531 assert(instr);
3532
3533 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
3534 offs = offset_add_from_nir(ctx, offs, vertex_index_src, output_vertex_size);
3535
3536 uint32_t output_patch0_offset = (input_patch_size * ctx->tcs_num_patches);
3537 offs = offset_add(ctx, offs, std::make_pair(patch_off, output_patch0_offset));
3538 } else {
3539 uint32_t output_patch0_patch_data_offset = (input_patch_size * ctx->tcs_num_patches + pervertex_output_patch_size);
3540 offs = offset_add(ctx, offs, std::make_pair(patch_off, output_patch0_patch_data_offset));
3541 }
3542
3543 return offs;
3544 }
3545
3546 std::pair<Temp, unsigned> get_tcs_per_vertex_output_vmem_offset(isel_context *ctx, nir_intrinsic_instr *instr)
3547 {
3548 Builder bld(ctx->program, ctx->block);
3549
3550 unsigned vertices_per_patch = ctx->shader->info.tess.tcs_vertices_out;
3551 unsigned attr_stride = vertices_per_patch * ctx->tcs_num_patches;
3552
3553 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr, attr_stride * 4u, 4u);
3554
3555 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
3556 Temp patch_off = bld.v_mul24_imm(bld.def(v1), rel_patch_id, vertices_per_patch * 16u);
3557 offs = offset_add(ctx, offs, std::make_pair(patch_off, 0u));
3558
3559 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
3560 offs = offset_add_from_nir(ctx, offs, vertex_index_src, 16u);
3561
3562 return offs;
3563 }
3564
3565 std::pair<Temp, unsigned> get_tcs_per_patch_output_vmem_offset(isel_context *ctx, nir_intrinsic_instr *instr = nullptr, unsigned const_base_offset = 0u)
3566 {
3567 Builder bld(ctx->program, ctx->block);
3568
3569 unsigned num_tcs_outputs = ctx->shader->info.stage == MESA_SHADER_TESS_CTRL
3570 ? util_last_bit64(ctx->args->shader_info->tcs.outputs_written)
3571 : ctx->args->options->key.tes.tcs_num_outputs;
3572
3573 unsigned output_vertex_size = num_tcs_outputs * 16;
3574 unsigned per_vertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
3575 unsigned per_patch_data_offset = per_vertex_output_patch_size * ctx->tcs_num_patches;
3576 unsigned attr_stride = ctx->tcs_num_patches;
3577
3578 std::pair<Temp, unsigned> offs = instr
3579 ? get_intrinsic_io_basic_offset(ctx, instr, attr_stride * 4u, 4u)
3580 : std::make_pair(Temp(), 0u);
3581
3582 if (const_base_offset)
3583 offs.second += const_base_offset * attr_stride;
3584
3585 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
3586 Temp patch_off = bld.v_mul_imm(bld.def(v1), rel_patch_id, 16u);
3587 offs = offset_add(ctx, offs, std::make_pair(patch_off, per_patch_data_offset));
3588
3589 return offs;
3590 }
3591
3592 bool tcs_driver_location_matches_api_mask(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex, uint64_t mask, bool *indirect)
3593 {
3594 unsigned off = nir_intrinsic_base(instr) * 4u;
3595 nir_src *off_src = nir_get_io_offset_src(instr);
3596
3597 if (!nir_src_is_const(*off_src)) {
3598 *indirect = true;
3599 return false;
3600 }
3601
3602 *indirect = false;
3603 off += nir_src_as_uint(*off_src) * 16u;
3604
3605 while (mask) {
3606 unsigned slot = u_bit_scan64(&mask) + (per_vertex ? 0 : VARYING_SLOT_PATCH0);
3607 if (off == shader_io_get_unique_index((gl_varying_slot) slot) * 16u)
3608 return true;
3609 }
3610
3611 return false;
3612 }
3613
3614 bool store_output_to_temps(isel_context *ctx, nir_intrinsic_instr *instr)
3615 {
3616 unsigned write_mask = nir_intrinsic_write_mask(instr);
3617 unsigned component = nir_intrinsic_component(instr);
3618 unsigned idx = nir_intrinsic_base(instr) + component;
3619
3620 nir_instr *off_instr = instr->src[1].ssa->parent_instr;
3621 if (off_instr->type != nir_instr_type_load_const)
3622 return false;
3623
3624 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
3625 idx += nir_src_as_uint(instr->src[1]) * 4u;
3626
3627 if (instr->src[0].ssa->bit_size == 64)
3628 write_mask = widen_mask(write_mask, 2);
3629
3630 for (unsigned i = 0; i < 8; ++i) {
3631 if (write_mask & (1 << i)) {
3632 ctx->outputs.mask[idx / 4u] |= 1 << (idx % 4u);
3633 ctx->outputs.temps[idx] = emit_extract_vector(ctx, src, i, v1);
3634 }
3635 idx++;
3636 }
3637
3638 return true;
3639 }
3640
3641 bool load_input_from_temps(isel_context *ctx, nir_intrinsic_instr *instr, Temp dst)
3642 {
3643 /* Only TCS per-vertex inputs are supported by this function.
3644 * Per-vertex inputs only match between the VS/TCS invocation id when the number of invocations is the same.
3645 */
3646 if (ctx->shader->info.stage != MESA_SHADER_TESS_CTRL || !ctx->tcs_in_out_eq)
3647 return false;
3648
3649 nir_src *off_src = nir_get_io_offset_src(instr);
3650 nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
3651 nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
3652 bool can_use_temps = nir_src_is_const(*off_src) &&
3653 vertex_index_instr->type == nir_instr_type_intrinsic &&
3654 nir_instr_as_intrinsic(vertex_index_instr)->intrinsic == nir_intrinsic_load_invocation_id;
3655
3656 if (!can_use_temps)
3657 return false;
3658
3659 unsigned idx = nir_intrinsic_base(instr) + nir_intrinsic_component(instr) + 4 * nir_src_as_uint(*off_src);
3660 Temp *src = &ctx->inputs.temps[idx];
3661 Temp vec = create_vec_from_array(ctx, src, dst.size(), dst.regClass().type(), 4u);
3662 assert(vec.size() == dst.size());
3663
3664 Builder bld(ctx->program, ctx->block);
3665 bld.copy(Definition(dst), vec);
3666 return true;
3667 }
3668
3669 void visit_store_ls_or_es_output(isel_context *ctx, nir_intrinsic_instr *instr)
3670 {
3671 Builder bld(ctx->program, ctx->block);
3672
3673 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr, 4u);
3674 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
3675 unsigned write_mask = nir_intrinsic_write_mask(instr);
3676 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8u;
3677
3678 if (ctx->tcs_in_out_eq && store_output_to_temps(ctx, instr)) {
3679 /* When the TCS only reads this output directly and for the same vertices as its invocation id, it is unnecessary to store the VS output to LDS. */
3680 bool indirect_write;
3681 bool temp_only_input = tcs_driver_location_matches_api_mask(ctx, instr, true, ctx->tcs_temp_only_inputs, &indirect_write);
3682 if (temp_only_input && !indirect_write)
3683 return;
3684 }
3685
3686 if (ctx->stage == vertex_es || ctx->stage == tess_eval_es) {
3687 /* GFX6-8: ES stage is not merged into GS, data is passed from ES to GS in VMEM. */
3688 Temp esgs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_ESGS_VS * 16u));
3689 Temp es2gs_offset = get_arg(ctx, ctx->args->es2gs_offset);
3690 store_vmem_mubuf(ctx, src, esgs_ring, offs.first, es2gs_offset, offs.second, elem_size_bytes, write_mask, false, true, true);
3691 } else {
3692 Temp lds_base;
3693
3694 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
3695 /* GFX9+: ES stage is merged into GS, data is passed between them using LDS. */
3696 unsigned itemsize = ctx->stage == vertex_geometry_gs
3697 ? ctx->program->info->vs.es_info.esgs_itemsize
3698 : ctx->program->info->tes.es_info.esgs_itemsize;
3699 Temp thread_id = emit_mbcnt(ctx, bld.def(v1));
3700 Temp wave_idx = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), get_arg(ctx, ctx->args->merged_wave_info), Operand(4u << 16 | 24));
3701 Temp vertex_idx = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), thread_id,
3702 bld.v_mul24_imm(bld.def(v1), as_vgpr(ctx, wave_idx), ctx->program->wave_size));
3703 lds_base = bld.v_mul24_imm(bld.def(v1), vertex_idx, itemsize);
3704 } else if (ctx->stage == vertex_ls || ctx->stage == vertex_tess_control_hs) {
3705 /* GFX6-8: VS runs on LS stage when tessellation is used, but LS shares LDS space with HS.
3706 * GFX9+: LS is merged into HS, but still uses the same LDS layout.
3707 */
3708 unsigned num_tcs_inputs = util_last_bit64(ctx->args->shader_info->vs.ls_outputs_written);
3709 Temp vertex_idx = get_arg(ctx, ctx->args->rel_auto_id);
3710 lds_base = bld.v_mul_imm(bld.def(v1), vertex_idx, num_tcs_inputs * 16u);
3711 } else {
3712 unreachable("Invalid LS or ES stage");
3713 }
3714
3715 offs = offset_add(ctx, offs, std::make_pair(lds_base, 0u));
3716 unsigned lds_align = calculate_lds_alignment(ctx, offs.second);
3717 store_lds(ctx, elem_size_bytes, src, write_mask, offs.first, offs.second, lds_align);
3718 }
3719 }
3720
3721 bool should_write_tcs_patch_output_to_vmem(isel_context *ctx, nir_intrinsic_instr *instr)
3722 {
3723 unsigned off = nir_intrinsic_base(instr) * 4u;
3724 return off != ctx->tcs_tess_lvl_out_loc &&
3725 off != ctx->tcs_tess_lvl_in_loc;
3726 }
3727
3728 bool should_write_tcs_output_to_lds(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
3729 {
3730 /* When none of the appropriate outputs are read, we are OK to never write to LDS */
3731 if (per_vertex ? ctx->shader->info.outputs_read == 0U : ctx->shader->info.patch_outputs_read == 0u)
3732 return false;
3733
3734 uint64_t mask = per_vertex
3735 ? ctx->shader->info.outputs_read
3736 : ctx->shader->info.patch_outputs_read;
3737 bool indirect_write;
3738 bool output_read = tcs_driver_location_matches_api_mask(ctx, instr, per_vertex, mask, &indirect_write);
3739 return indirect_write || output_read;
3740 }
3741
3742 void visit_store_tcs_output(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
3743 {
3744 assert(ctx->stage == tess_control_hs || ctx->stage == vertex_tess_control_hs);
3745 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
3746
3747 Builder bld(ctx->program, ctx->block);
3748
3749 Temp store_val = get_ssa_temp(ctx, instr->src[0].ssa);
3750 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
3751 unsigned write_mask = nir_intrinsic_write_mask(instr);
3752
3753 /* Only write to VMEM if the output is per-vertex or it's per-patch non tess factor */
3754 bool write_to_vmem = per_vertex || should_write_tcs_patch_output_to_vmem(ctx, instr);
3755 /* Only write to LDS if the output is read by the shader, or it's per-patch tess factor */
3756 bool write_to_lds = !write_to_vmem || should_write_tcs_output_to_lds(ctx, instr, per_vertex);
3757
3758 if (write_to_vmem) {
3759 std::pair<Temp, unsigned> vmem_offs = per_vertex
3760 ? get_tcs_per_vertex_output_vmem_offset(ctx, instr)
3761 : get_tcs_per_patch_output_vmem_offset(ctx, instr);
3762
3763 Temp hs_ring_tess_offchip = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
3764 Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
3765 store_vmem_mubuf(ctx, store_val, hs_ring_tess_offchip, vmem_offs.first, oc_lds, vmem_offs.second, elem_size_bytes, write_mask, true, false);
3766 }
3767
3768 if (write_to_lds) {
3769 std::pair<Temp, unsigned> lds_offs = get_tcs_output_lds_offset(ctx, instr, per_vertex);
3770 unsigned lds_align = calculate_lds_alignment(ctx, lds_offs.second);
3771 store_lds(ctx, elem_size_bytes, store_val, write_mask, lds_offs.first, lds_offs.second, lds_align);
3772 }
3773 }
3774
3775 void visit_load_tcs_output(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
3776 {
3777 assert(ctx->stage == tess_control_hs || ctx->stage == vertex_tess_control_hs);
3778 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
3779
3780 Builder bld(ctx->program, ctx->block);
3781
3782 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
3783 std::pair<Temp, unsigned> lds_offs = get_tcs_output_lds_offset(ctx, instr, per_vertex);
3784 unsigned lds_align = calculate_lds_alignment(ctx, lds_offs.second);
3785 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
3786
3787 load_lds(ctx, elem_size_bytes, dst, lds_offs.first, lds_offs.second, lds_align);
3788 }
3789
3790 void visit_store_output(isel_context *ctx, nir_intrinsic_instr *instr)
3791 {
3792 if (ctx->stage == vertex_vs ||
3793 ctx->stage == tess_eval_vs ||
3794 ctx->stage == fragment_fs ||
3795 ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
3796 bool stored_to_temps = store_output_to_temps(ctx, instr);
3797 if (!stored_to_temps) {
3798 fprintf(stderr, "Unimplemented output offset instruction:\n");
3799 nir_print_instr(instr->src[1].ssa->parent_instr, stderr);
3800 fprintf(stderr, "\n");
3801 abort();
3802 }
3803 } else if (ctx->stage == vertex_es ||
3804 ctx->stage == vertex_ls ||
3805 ctx->stage == tess_eval_es ||
3806 (ctx->stage == vertex_tess_control_hs && ctx->shader->info.stage == MESA_SHADER_VERTEX) ||
3807 (ctx->stage == vertex_geometry_gs && ctx->shader->info.stage == MESA_SHADER_VERTEX) ||
3808 (ctx->stage == tess_eval_geometry_gs && ctx->shader->info.stage == MESA_SHADER_TESS_EVAL)) {
3809 visit_store_ls_or_es_output(ctx, instr);
3810 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
3811 visit_store_tcs_output(ctx, instr, false);
3812 } else {
3813 unreachable("Shader stage not implemented");
3814 }
3815 }
3816
3817 void visit_load_output(isel_context *ctx, nir_intrinsic_instr *instr)
3818 {
3819 visit_load_tcs_output(ctx, instr, false);
3820 }
3821
3822 void emit_interp_instr(isel_context *ctx, unsigned idx, unsigned component, Temp src, Temp dst, Temp prim_mask)
3823 {
3824 Temp coord1 = emit_extract_vector(ctx, src, 0, v1);
3825 Temp coord2 = emit_extract_vector(ctx, src, 1, v1);
3826
3827 Builder bld(ctx->program, ctx->block);
3828 Builder::Result interp_p1 = bld.vintrp(aco_opcode::v_interp_p1_f32, bld.def(v1), coord1, bld.m0(prim_mask), idx, component);
3829 if (ctx->program->has_16bank_lds)
3830 interp_p1.instr->operands[0].setLateKill(true);
3831 bld.vintrp(aco_opcode::v_interp_p2_f32, Definition(dst), coord2, bld.m0(prim_mask), interp_p1, idx, component);
3832 }
3833
3834 void emit_load_frag_coord(isel_context *ctx, Temp dst, unsigned num_components)
3835 {
3836 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1));
3837 for (unsigned i = 0; i < num_components; i++)
3838 vec->operands[i] = Operand(get_arg(ctx, ctx->args->ac.frag_pos[i]));
3839 if (G_0286CC_POS_W_FLOAT_ENA(ctx->program->config->spi_ps_input_ena)) {
3840 assert(num_components == 4);
3841 Builder bld(ctx->program, ctx->block);
3842 vec->operands[3] = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), get_arg(ctx, ctx->args->ac.frag_pos[3]));
3843 }
3844
3845 for (Operand& op : vec->operands)
3846 op = op.isUndefined() ? Operand(0u) : op;
3847
3848 vec->definitions[0] = Definition(dst);
3849 ctx->block->instructions.emplace_back(std::move(vec));
3850 emit_split_vector(ctx, dst, num_components);
3851 return;
3852 }
3853
3854 void visit_load_interpolated_input(isel_context *ctx, nir_intrinsic_instr *instr)
3855 {
3856 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
3857 Temp coords = get_ssa_temp(ctx, instr->src[0].ssa);
3858 unsigned idx = nir_intrinsic_base(instr);
3859 unsigned component = nir_intrinsic_component(instr);
3860 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
3861
3862 nir_const_value* offset = nir_src_as_const_value(instr->src[1]);
3863 if (offset) {
3864 assert(offset->u32 == 0);
3865 } else {
3866 /* the lower 15bit of the prim_mask contain the offset into LDS
3867 * while the upper bits contain the number of prims */
3868 Temp offset_src = get_ssa_temp(ctx, instr->src[1].ssa);
3869 assert(offset_src.regClass() == s1 && "TODO: divergent offsets...");
3870 Builder bld(ctx->program, ctx->block);
3871 Temp stride = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), prim_mask, Operand(16u));
3872 stride = bld.sop1(aco_opcode::s_bcnt1_i32_b32, bld.def(s1), bld.def(s1, scc), stride);
3873 stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, Operand(48u));
3874 offset_src = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, offset_src);
3875 prim_mask = bld.sop2(aco_opcode::s_add_i32, bld.def(s1, m0), bld.def(s1, scc), offset_src, prim_mask);
3876 }
3877
3878 if (instr->dest.ssa.num_components == 1) {
3879 emit_interp_instr(ctx, idx, component, coords, dst, prim_mask);
3880 } else {
3881 aco_ptr<Pseudo_instruction> vec(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.ssa.num_components, 1));
3882 for (unsigned i = 0; i < instr->dest.ssa.num_components; i++)
3883 {
3884 Temp tmp = {ctx->program->allocateId(), v1};
3885 emit_interp_instr(ctx, idx, component+i, coords, tmp, prim_mask);
3886 vec->operands[i] = Operand(tmp);
3887 }
3888 vec->definitions[0] = Definition(dst);
3889 ctx->block->instructions.emplace_back(std::move(vec));
3890 }
3891 }
3892
3893 bool check_vertex_fetch_size(isel_context *ctx, const ac_data_format_info *vtx_info,
3894 unsigned offset, unsigned stride, unsigned channels)
3895 {
3896 unsigned vertex_byte_size = vtx_info->chan_byte_size * channels;
3897 if (vtx_info->chan_byte_size != 4 && channels == 3)
3898 return false;
3899 return (ctx->options->chip_class != GFX6 && ctx->options->chip_class != GFX10) ||
3900 (offset % vertex_byte_size == 0 && stride % vertex_byte_size == 0);
3901 }
3902
3903 uint8_t get_fetch_data_format(isel_context *ctx, const ac_data_format_info *vtx_info,
3904 unsigned offset, unsigned stride, unsigned *channels)
3905 {
3906 if (!vtx_info->chan_byte_size) {
3907 *channels = vtx_info->num_channels;
3908 return vtx_info->chan_format;
3909 }
3910
3911 unsigned num_channels = *channels;
3912 if (!check_vertex_fetch_size(ctx, vtx_info, offset, stride, *channels)) {
3913 unsigned new_channels = num_channels + 1;
3914 /* first, assume more loads is worse and try using a larger data format */
3915 while (new_channels <= 4 && !check_vertex_fetch_size(ctx, vtx_info, offset, stride, new_channels)) {
3916 new_channels++;
3917 /* don't make the attribute potentially out-of-bounds */
3918 if (offset + new_channels * vtx_info->chan_byte_size > stride)
3919 new_channels = 5;
3920 }
3921
3922 if (new_channels == 5) {
3923 /* then try decreasing load size (at the cost of more loads) */
3924 new_channels = *channels;
3925 while (new_channels > 1 && !check_vertex_fetch_size(ctx, vtx_info, offset, stride, new_channels))
3926 new_channels--;
3927 }
3928
3929 if (new_channels < *channels)
3930 *channels = new_channels;
3931 num_channels = new_channels;
3932 }
3933
3934 switch (vtx_info->chan_format) {
3935 case V_008F0C_BUF_DATA_FORMAT_8:
3936 return (uint8_t[]){V_008F0C_BUF_DATA_FORMAT_8, V_008F0C_BUF_DATA_FORMAT_8_8,
3937 V_008F0C_BUF_DATA_FORMAT_INVALID, V_008F0C_BUF_DATA_FORMAT_8_8_8_8}[num_channels - 1];
3938 case V_008F0C_BUF_DATA_FORMAT_16:
3939 return (uint8_t[]){V_008F0C_BUF_DATA_FORMAT_16, V_008F0C_BUF_DATA_FORMAT_16_16,
3940 V_008F0C_BUF_DATA_FORMAT_INVALID, V_008F0C_BUF_DATA_FORMAT_16_16_16_16}[num_channels - 1];
3941 case V_008F0C_BUF_DATA_FORMAT_32:
3942 return (uint8_t[]){V_008F0C_BUF_DATA_FORMAT_32, V_008F0C_BUF_DATA_FORMAT_32_32,
3943 V_008F0C_BUF_DATA_FORMAT_32_32_32, V_008F0C_BUF_DATA_FORMAT_32_32_32_32}[num_channels - 1];
3944 }
3945 unreachable("shouldn't reach here");
3946 return V_008F0C_BUF_DATA_FORMAT_INVALID;
3947 }
3948
3949 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
3950 * so we may need to fix it up. */
3951 Temp adjust_vertex_fetch_alpha(isel_context *ctx, unsigned adjustment, Temp alpha)
3952 {
3953 Builder bld(ctx->program, ctx->block);
3954
3955 if (adjustment == RADV_ALPHA_ADJUST_SSCALED)
3956 alpha = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), alpha);
3957
3958 /* For the integer-like cases, do a natural sign extension.
3959 *
3960 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
3961 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
3962 * exponent.
3963 */
3964 alpha = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(adjustment == RADV_ALPHA_ADJUST_SNORM ? 7u : 30u), alpha);
3965 alpha = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(30u), alpha);
3966
3967 /* Convert back to the right type. */
3968 if (adjustment == RADV_ALPHA_ADJUST_SNORM) {
3969 alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha);
3970 Temp clamp = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0xbf800000u), alpha);
3971 alpha = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xbf800000u), alpha, clamp);
3972 } else if (adjustment == RADV_ALPHA_ADJUST_SSCALED) {
3973 alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha);
3974 }
3975
3976 return alpha;
3977 }
3978
3979 void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr)
3980 {
3981 Builder bld(ctx->program, ctx->block);
3982 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
3983 if (ctx->shader->info.stage == MESA_SHADER_VERTEX) {
3984
3985 nir_instr *off_instr = instr->src[0].ssa->parent_instr;
3986 if (off_instr->type != nir_instr_type_load_const) {
3987 fprintf(stderr, "Unimplemented nir_intrinsic_load_input offset\n");
3988 nir_print_instr(off_instr, stderr);
3989 fprintf(stderr, "\n");
3990 }
3991 uint32_t offset = nir_instr_as_load_const(off_instr)->value[0].u32;
3992
3993 Temp vertex_buffers = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->vertex_buffers));
3994
3995 unsigned location = nir_intrinsic_base(instr) / 4 - VERT_ATTRIB_GENERIC0 + offset;
3996 unsigned component = nir_intrinsic_component(instr);
3997 unsigned attrib_binding = ctx->options->key.vs.vertex_attribute_bindings[location];
3998 uint32_t attrib_offset = ctx->options->key.vs.vertex_attribute_offsets[location];
3999 uint32_t attrib_stride = ctx->options->key.vs.vertex_attribute_strides[location];
4000 unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[location];
4001
4002 unsigned dfmt = attrib_format & 0xf;
4003 unsigned nfmt = (attrib_format >> 4) & 0x7;
4004 const struct ac_data_format_info *vtx_info = ac_get_data_format_info(dfmt);
4005
4006 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa) << component;
4007 unsigned num_channels = MIN2(util_last_bit(mask), vtx_info->num_channels);
4008 unsigned alpha_adjust = (ctx->options->key.vs.alpha_adjust >> (location * 2)) & 3;
4009 bool post_shuffle = ctx->options->key.vs.post_shuffle & (1 << location);
4010 if (post_shuffle)
4011 num_channels = MAX2(num_channels, 3);
4012
4013 Operand off = bld.copy(bld.def(s1), Operand(attrib_binding * 16u));
4014 Temp list = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), vertex_buffers, off);
4015
4016 Temp index;
4017 if (ctx->options->key.vs.instance_rate_inputs & (1u << location)) {
4018 uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[location];
4019 Temp start_instance = get_arg(ctx, ctx->args->ac.start_instance);
4020 if (divisor) {
4021 Temp instance_id = get_arg(ctx, ctx->args->ac.instance_id);
4022 if (divisor != 1) {
4023 Temp divided = bld.tmp(v1);
4024 emit_v_div_u32(ctx, divided, as_vgpr(ctx, instance_id), divisor);
4025 index = bld.vadd32(bld.def(v1), start_instance, divided);
4026 } else {
4027 index = bld.vadd32(bld.def(v1), start_instance, instance_id);
4028 }
4029 } else {
4030 index = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), start_instance);
4031 }
4032 } else {
4033 index = bld.vadd32(bld.def(v1),
4034 get_arg(ctx, ctx->args->ac.base_vertex),
4035 get_arg(ctx, ctx->args->ac.vertex_id));
4036 }
4037
4038 Temp channels[num_channels];
4039 unsigned channel_start = 0;
4040 bool direct_fetch = false;
4041
4042 /* skip unused channels at the start */
4043 if (vtx_info->chan_byte_size && !post_shuffle) {
4044 channel_start = ffs(mask) - 1;
4045 for (unsigned i = 0; i < channel_start; i++)
4046 channels[i] = Temp(0, s1);
4047 } else if (vtx_info->chan_byte_size && post_shuffle && !(mask & 0x8)) {
4048 num_channels = 3 - (ffs(mask) - 1);
4049 }
4050
4051 /* load channels */
4052 while (channel_start < num_channels) {
4053 unsigned fetch_size = num_channels - channel_start;
4054 unsigned fetch_offset = attrib_offset + channel_start * vtx_info->chan_byte_size;
4055 bool expanded = false;
4056
4057 /* use MUBUF when possible to avoid possible alignment issues */
4058 /* TODO: we could use SDWA to unpack 8/16-bit attributes without extra instructions */
4059 bool use_mubuf = (nfmt == V_008F0C_BUF_NUM_FORMAT_FLOAT ||
4060 nfmt == V_008F0C_BUF_NUM_FORMAT_UINT ||
4061 nfmt == V_008F0C_BUF_NUM_FORMAT_SINT) &&
4062 vtx_info->chan_byte_size == 4;
4063 unsigned fetch_dfmt = V_008F0C_BUF_DATA_FORMAT_INVALID;
4064 if (!use_mubuf) {
4065 fetch_dfmt = get_fetch_data_format(ctx, vtx_info, fetch_offset, attrib_stride, &fetch_size);
4066 } else {
4067 if (fetch_size == 3 && ctx->options->chip_class == GFX6) {
4068 /* GFX6 only supports loading vec3 with MTBUF, expand to vec4. */
4069 fetch_size = 4;
4070 expanded = true;
4071 }
4072 }
4073
4074 Temp fetch_index = index;
4075 if (attrib_stride != 0 && fetch_offset > attrib_stride) {
4076 fetch_index = bld.vadd32(bld.def(v1), Operand(fetch_offset / attrib_stride), fetch_index);
4077 fetch_offset = fetch_offset % attrib_stride;
4078 }
4079
4080 Operand soffset(0u);
4081 if (fetch_offset >= 4096) {
4082 soffset = bld.copy(bld.def(s1), Operand(fetch_offset / 4096 * 4096));
4083 fetch_offset %= 4096;
4084 }
4085
4086 aco_opcode opcode;
4087 switch (fetch_size) {
4088 case 1:
4089 opcode = use_mubuf ? aco_opcode::buffer_load_dword : aco_opcode::tbuffer_load_format_x;
4090 break;
4091 case 2:
4092 opcode = use_mubuf ? aco_opcode::buffer_load_dwordx2 : aco_opcode::tbuffer_load_format_xy;
4093 break;
4094 case 3:
4095 assert(ctx->options->chip_class >= GFX7 ||
4096 (!use_mubuf && ctx->options->chip_class == GFX6));
4097 opcode = use_mubuf ? aco_opcode::buffer_load_dwordx3 : aco_opcode::tbuffer_load_format_xyz;
4098 break;
4099 case 4:
4100 opcode = use_mubuf ? aco_opcode::buffer_load_dwordx4 : aco_opcode::tbuffer_load_format_xyzw;
4101 break;
4102 default:
4103 unreachable("Unimplemented load_input vector size");
4104 }
4105
4106 Temp fetch_dst;
4107 if (channel_start == 0 && fetch_size == dst.size() && !post_shuffle &&
4108 !expanded && (alpha_adjust == RADV_ALPHA_ADJUST_NONE ||
4109 num_channels <= 3)) {
4110 direct_fetch = true;
4111 fetch_dst = dst;
4112 } else {
4113 fetch_dst = bld.tmp(RegType::vgpr, fetch_size);
4114 }
4115
4116 if (use_mubuf) {
4117 Instruction *mubuf = bld.mubuf(opcode,
4118 Definition(fetch_dst), list, fetch_index, soffset,
4119 fetch_offset, false, true).instr;
4120 static_cast<MUBUF_instruction*>(mubuf)->can_reorder = true;
4121 } else {
4122 Instruction *mtbuf = bld.mtbuf(opcode,
4123 Definition(fetch_dst), list, fetch_index, soffset,
4124 fetch_dfmt, nfmt, fetch_offset, false, true).instr;
4125 static_cast<MTBUF_instruction*>(mtbuf)->can_reorder = true;
4126 }
4127
4128 emit_split_vector(ctx, fetch_dst, fetch_dst.size());
4129
4130 if (fetch_size == 1) {
4131 channels[channel_start] = fetch_dst;
4132 } else {
4133 for (unsigned i = 0; i < MIN2(fetch_size, num_channels - channel_start); i++)
4134 channels[channel_start + i] = emit_extract_vector(ctx, fetch_dst, i, v1);
4135 }
4136
4137 channel_start += fetch_size;
4138 }
4139
4140 if (!direct_fetch) {
4141 bool is_float = nfmt != V_008F0C_BUF_NUM_FORMAT_UINT &&
4142 nfmt != V_008F0C_BUF_NUM_FORMAT_SINT;
4143
4144 static const unsigned swizzle_normal[4] = {0, 1, 2, 3};
4145 static const unsigned swizzle_post_shuffle[4] = {2, 1, 0, 3};
4146 const unsigned *swizzle = post_shuffle ? swizzle_post_shuffle : swizzle_normal;
4147
4148 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
4149 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
4150 unsigned num_temp = 0;
4151 for (unsigned i = 0; i < dst.size(); i++) {
4152 unsigned idx = i + component;
4153 if (swizzle[idx] < num_channels && channels[swizzle[idx]].id()) {
4154 Temp channel = channels[swizzle[idx]];
4155 if (idx == 3 && alpha_adjust != RADV_ALPHA_ADJUST_NONE)
4156 channel = adjust_vertex_fetch_alpha(ctx, alpha_adjust, channel);
4157 vec->operands[i] = Operand(channel);
4158
4159 num_temp++;
4160 elems[i] = channel;
4161 } else if (is_float && idx == 3) {
4162 vec->operands[i] = Operand(0x3f800000u);
4163 } else if (!is_float && idx == 3) {
4164 vec->operands[i] = Operand(1u);
4165 } else {
4166 vec->operands[i] = Operand(0u);
4167 }
4168 }
4169 vec->definitions[0] = Definition(dst);
4170 ctx->block->instructions.emplace_back(std::move(vec));
4171 emit_split_vector(ctx, dst, dst.size());
4172
4173 if (num_temp == dst.size())
4174 ctx->allocated_vec.emplace(dst.id(), elems);
4175 }
4176 } else if (ctx->shader->info.stage == MESA_SHADER_FRAGMENT) {
4177 unsigned offset_idx = instr->intrinsic == nir_intrinsic_load_input ? 0 : 1;
4178 nir_instr *off_instr = instr->src[offset_idx].ssa->parent_instr;
4179 if (off_instr->type != nir_instr_type_load_const ||
4180 nir_instr_as_load_const(off_instr)->value[0].u32 != 0) {
4181 fprintf(stderr, "Unimplemented nir_intrinsic_load_input offset\n");
4182 nir_print_instr(off_instr, stderr);
4183 fprintf(stderr, "\n");
4184 }
4185
4186 Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
4187 nir_const_value* offset = nir_src_as_const_value(instr->src[offset_idx]);
4188 if (offset) {
4189 assert(offset->u32 == 0);
4190 } else {
4191 /* the lower 15bit of the prim_mask contain the offset into LDS
4192 * while the upper bits contain the number of prims */
4193 Temp offset_src = get_ssa_temp(ctx, instr->src[offset_idx].ssa);
4194 assert(offset_src.regClass() == s1 && "TODO: divergent offsets...");
4195 Builder bld(ctx->program, ctx->block);
4196 Temp stride = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), prim_mask, Operand(16u));
4197 stride = bld.sop1(aco_opcode::s_bcnt1_i32_b32, bld.def(s1), bld.def(s1, scc), stride);
4198 stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, Operand(48u));
4199 offset_src = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), stride, offset_src);
4200 prim_mask = bld.sop2(aco_opcode::s_add_i32, bld.def(s1, m0), bld.def(s1, scc), offset_src, prim_mask);
4201 }
4202
4203 unsigned idx = nir_intrinsic_base(instr);
4204 unsigned component = nir_intrinsic_component(instr);
4205 unsigned vertex_id = 2; /* P0 */
4206
4207 if (instr->intrinsic == nir_intrinsic_load_input_vertex) {
4208 nir_const_value* src0 = nir_src_as_const_value(instr->src[0]);
4209 switch (src0->u32) {
4210 case 0:
4211 vertex_id = 2; /* P0 */
4212 break;
4213 case 1:
4214 vertex_id = 0; /* P10 */
4215 break;
4216 case 2:
4217 vertex_id = 1; /* P20 */
4218 break;
4219 default:
4220 unreachable("invalid vertex index");
4221 }
4222 }
4223
4224 if (dst.size() == 1) {
4225 bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(dst), Operand(vertex_id), bld.m0(prim_mask), idx, component);
4226 } else {
4227 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
4228 for (unsigned i = 0; i < dst.size(); i++)
4229 vec->operands[i] = bld.vintrp(aco_opcode::v_interp_mov_f32, bld.def(v1), Operand(vertex_id), bld.m0(prim_mask), idx, component + i);
4230 vec->definitions[0] = Definition(dst);
4231 bld.insert(std::move(vec));
4232 }
4233
4234 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4235 Temp ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
4236 Temp soffset = get_arg(ctx, ctx->args->oc_lds);
4237 std::pair<Temp, unsigned> offs = get_tcs_per_patch_output_vmem_offset(ctx, instr);
4238 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8u;
4239
4240 load_vmem_mubuf(ctx, dst, ring, offs.first, soffset, offs.second, elem_size_bytes, instr->dest.ssa.num_components);
4241 } else {
4242 unreachable("Shader stage not implemented");
4243 }
4244 }
4245
4246 std::pair<Temp, unsigned> get_gs_per_vertex_input_offset(isel_context *ctx, nir_intrinsic_instr *instr, unsigned base_stride = 1u)
4247 {
4248 assert(ctx->shader->info.stage == MESA_SHADER_GEOMETRY);
4249
4250 Builder bld(ctx->program, ctx->block);
4251 nir_src *vertex_src = nir_get_io_vertex_index_src(instr);
4252 Temp vertex_offset;
4253
4254 if (!nir_src_is_const(*vertex_src)) {
4255 /* better code could be created, but this case probably doesn't happen
4256 * much in practice */
4257 Temp indirect_vertex = as_vgpr(ctx, get_ssa_temp(ctx, vertex_src->ssa));
4258 for (unsigned i = 0; i < ctx->shader->info.gs.vertices_in; i++) {
4259 Temp elem;
4260
4261 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
4262 elem = get_arg(ctx, ctx->args->gs_vtx_offset[i / 2u * 2u]);
4263 if (i % 2u)
4264 elem = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), Operand(16u), elem);
4265 } else {
4266 elem = get_arg(ctx, ctx->args->gs_vtx_offset[i]);
4267 }
4268
4269 if (vertex_offset.id()) {
4270 Temp cond = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.hint_vcc(bld.def(bld.lm)),
4271 Operand(i), indirect_vertex);
4272 vertex_offset = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), vertex_offset, elem, cond);
4273 } else {
4274 vertex_offset = elem;
4275 }
4276 }
4277
4278 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs)
4279 vertex_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu), vertex_offset);
4280 } else {
4281 unsigned vertex = nir_src_as_uint(*vertex_src);
4282 if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs)
4283 vertex_offset = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
4284 get_arg(ctx, ctx->args->gs_vtx_offset[vertex / 2u * 2u]),
4285 Operand((vertex % 2u) * 16u), Operand(16u));
4286 else
4287 vertex_offset = get_arg(ctx, ctx->args->gs_vtx_offset[vertex]);
4288 }
4289
4290 std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr, base_stride);
4291 offs = offset_add(ctx, offs, std::make_pair(vertex_offset, 0u));
4292 return offset_mul(ctx, offs, 4u);
4293 }
4294
4295 void visit_load_gs_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
4296 {
4297 assert(ctx->shader->info.stage == MESA_SHADER_GEOMETRY);
4298
4299 Builder bld(ctx->program, ctx->block);
4300 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4301 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
4302
4303 if (ctx->stage == geometry_gs) {
4304 std::pair<Temp, unsigned> offs = get_gs_per_vertex_input_offset(ctx, instr, ctx->program->wave_size);
4305 Temp ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_ESGS_GS * 16u));
4306 load_vmem_mubuf(ctx, dst, ring, offs.first, Temp(), offs.second, elem_size_bytes, instr->dest.ssa.num_components, 4u * ctx->program->wave_size, false, true);
4307 } else if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
4308 std::pair<Temp, unsigned> offs = get_gs_per_vertex_input_offset(ctx, instr);
4309 unsigned lds_align = calculate_lds_alignment(ctx, offs.second);
4310 load_lds(ctx, elem_size_bytes, dst, offs.first, offs.second, lds_align);
4311 } else {
4312 unreachable("Unsupported GS stage.");
4313 }
4314 }
4315
4316 void visit_load_tcs_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
4317 {
4318 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4319
4320 Builder bld(ctx->program, ctx->block);
4321 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4322
4323 if (load_input_from_temps(ctx, instr, dst))
4324 return;
4325
4326 std::pair<Temp, unsigned> offs = get_tcs_per_vertex_input_lds_offset(ctx, instr);
4327 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
4328 unsigned lds_align = calculate_lds_alignment(ctx, offs.second);
4329
4330 load_lds(ctx, elem_size_bytes, dst, offs.first, offs.second, lds_align);
4331 }
4332
4333 void visit_load_tes_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
4334 {
4335 assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
4336
4337 Builder bld(ctx->program, ctx->block);
4338
4339 Temp ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
4340 Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
4341 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4342
4343 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
4344 std::pair<Temp, unsigned> offs = get_tcs_per_vertex_output_vmem_offset(ctx, instr);
4345
4346 load_vmem_mubuf(ctx, dst, ring, offs.first, oc_lds, offs.second, elem_size_bytes, instr->dest.ssa.num_components, 0u, true, true);
4347 }
4348
4349 void visit_load_per_vertex_input(isel_context *ctx, nir_intrinsic_instr *instr)
4350 {
4351 switch (ctx->shader->info.stage) {
4352 case MESA_SHADER_GEOMETRY:
4353 visit_load_gs_per_vertex_input(ctx, instr);
4354 break;
4355 case MESA_SHADER_TESS_CTRL:
4356 visit_load_tcs_per_vertex_input(ctx, instr);
4357 break;
4358 case MESA_SHADER_TESS_EVAL:
4359 visit_load_tes_per_vertex_input(ctx, instr);
4360 break;
4361 default:
4362 unreachable("Unimplemented shader stage");
4363 }
4364 }
4365
4366 void visit_load_per_vertex_output(isel_context *ctx, nir_intrinsic_instr *instr)
4367 {
4368 visit_load_tcs_output(ctx, instr, true);
4369 }
4370
4371 void visit_store_per_vertex_output(isel_context *ctx, nir_intrinsic_instr *instr)
4372 {
4373 assert(ctx->stage == tess_control_hs || ctx->stage == vertex_tess_control_hs);
4374 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
4375
4376 visit_store_tcs_output(ctx, instr, true);
4377 }
4378
4379 void visit_load_tess_coord(isel_context *ctx, nir_intrinsic_instr *instr)
4380 {
4381 assert(ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
4382
4383 Builder bld(ctx->program, ctx->block);
4384 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4385
4386 Operand tes_u(get_arg(ctx, ctx->args->tes_u));
4387 Operand tes_v(get_arg(ctx, ctx->args->tes_v));
4388 Operand tes_w(0u);
4389
4390 if (ctx->shader->info.tess.primitive_mode == GL_TRIANGLES) {
4391 Temp tmp = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), tes_u, tes_v);
4392 tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand(0x3f800000u /* 1.0f */), tmp);
4393 tes_w = Operand(tmp);
4394 }
4395
4396 Temp tess_coord = bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tes_u, tes_v, tes_w);
4397 emit_split_vector(ctx, tess_coord, 3);
4398 }
4399
4400 Temp load_desc_ptr(isel_context *ctx, unsigned desc_set)
4401 {
4402 if (ctx->program->info->need_indirect_descriptor_sets) {
4403 Builder bld(ctx->program, ctx->block);
4404 Temp ptr64 = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->descriptor_sets[0]));
4405 Operand off = bld.copy(bld.def(s1), Operand(desc_set << 2));
4406 return bld.smem(aco_opcode::s_load_dword, bld.def(s1), ptr64, off);//, false, false, false);
4407 }
4408
4409 return get_arg(ctx, ctx->args->descriptor_sets[desc_set]);
4410 }
4411
4412
4413 void visit_load_resource(isel_context *ctx, nir_intrinsic_instr *instr)
4414 {
4415 Builder bld(ctx->program, ctx->block);
4416 Temp index = get_ssa_temp(ctx, instr->src[0].ssa);
4417 if (!ctx->divergent_vals[instr->dest.ssa.index])
4418 index = bld.as_uniform(index);
4419 unsigned desc_set = nir_intrinsic_desc_set(instr);
4420 unsigned binding = nir_intrinsic_binding(instr);
4421
4422 Temp desc_ptr;
4423 radv_pipeline_layout *pipeline_layout = ctx->options->layout;
4424 radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
4425 unsigned offset = layout->binding[binding].offset;
4426 unsigned stride;
4427 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
4428 layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
4429 unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
4430 desc_ptr = get_arg(ctx, ctx->args->ac.push_constants);
4431 offset = pipeline_layout->push_constant_size + 16 * idx;
4432 stride = 16;
4433 } else {
4434 desc_ptr = load_desc_ptr(ctx, desc_set);
4435 stride = layout->binding[binding].size;
4436 }
4437
4438 nir_const_value* nir_const_index = nir_src_as_const_value(instr->src[0]);
4439 unsigned const_index = nir_const_index ? nir_const_index->u32 : 0;
4440 if (stride != 1) {
4441 if (nir_const_index) {
4442 const_index = const_index * stride;
4443 } else if (index.type() == RegType::vgpr) {
4444 bool index24bit = layout->binding[binding].array_size <= 0x1000000;
4445 index = bld.v_mul_imm(bld.def(v1), index, stride, index24bit);
4446 } else {
4447 index = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), Operand(index));
4448 }
4449 }
4450 if (offset) {
4451 if (nir_const_index) {
4452 const_index = const_index + offset;
4453 } else if (index.type() == RegType::vgpr) {
4454 index = bld.vadd32(bld.def(v1), Operand(offset), index);
4455 } else {
4456 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), Operand(index));
4457 }
4458 }
4459
4460 if (nir_const_index && const_index == 0) {
4461 index = desc_ptr;
4462 } else if (index.type() == RegType::vgpr) {
4463 index = bld.vadd32(bld.def(v1),
4464 nir_const_index ? Operand(const_index) : Operand(index),
4465 Operand(desc_ptr));
4466 } else {
4467 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
4468 nir_const_index ? Operand(const_index) : Operand(index),
4469 Operand(desc_ptr));
4470 }
4471
4472 bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), index);
4473 }
4474
4475 void load_buffer(isel_context *ctx, unsigned num_components, unsigned component_size,
4476 Temp dst, Temp rsrc, Temp offset, int byte_align,
4477 bool glc=false, bool readonly=true)
4478 {
4479 Builder bld(ctx->program, ctx->block);
4480 bool dlc = glc && ctx->options->chip_class >= GFX10;
4481 unsigned num_bytes = num_components * component_size;
4482
4483 aco_opcode op;
4484 if (dst.type() == RegType::vgpr || ((ctx->options->chip_class < GFX8 || component_size < 4) && !readonly)) {
4485 Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
4486 Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
4487 unsigned const_offset = 0;
4488
4489 /* for small bit sizes add buffer for unaligned loads */
4490 if (byte_align) {
4491 if (num_bytes > 2)
4492 num_bytes += byte_align == -1 ? 4 - component_size : byte_align;
4493 else
4494 byte_align = 0;
4495 }
4496
4497 Temp lower = Temp();
4498 if (num_bytes > 16) {
4499 assert(num_components == 3 || num_components == 4);
4500 op = aco_opcode::buffer_load_dwordx4;
4501 lower = bld.tmp(v4);
4502 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
4503 mubuf->definitions[0] = Definition(lower);
4504 mubuf->operands[0] = Operand(rsrc);
4505 mubuf->operands[1] = vaddr;
4506 mubuf->operands[2] = soffset;
4507 mubuf->offen = (offset.type() == RegType::vgpr);
4508 mubuf->glc = glc;
4509 mubuf->dlc = dlc;
4510 mubuf->barrier = readonly ? barrier_none : barrier_buffer;
4511 mubuf->can_reorder = readonly;
4512 bld.insert(std::move(mubuf));
4513 emit_split_vector(ctx, lower, 2);
4514 num_bytes -= 16;
4515 const_offset = 16;
4516 } else if (num_bytes == 12 && ctx->options->chip_class == GFX6) {
4517 /* GFX6 doesn't support loading vec3, expand to vec4. */
4518 num_bytes = 16;
4519 }
4520
4521 switch (num_bytes) {
4522 case 1:
4523 op = aco_opcode::buffer_load_ubyte;
4524 break;
4525 case 2:
4526 op = aco_opcode::buffer_load_ushort;
4527 break;
4528 case 3:
4529 case 4:
4530 op = aco_opcode::buffer_load_dword;
4531 break;
4532 case 5:
4533 case 6:
4534 case 7:
4535 case 8:
4536 op = aco_opcode::buffer_load_dwordx2;
4537 break;
4538 case 10:
4539 case 12:
4540 assert(ctx->options->chip_class > GFX6);
4541 op = aco_opcode::buffer_load_dwordx3;
4542 break;
4543 case 16:
4544 op = aco_opcode::buffer_load_dwordx4;
4545 break;
4546 default:
4547 unreachable("Load SSBO not implemented for this size.");
4548 }
4549 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
4550 mubuf->operands[0] = Operand(rsrc);
4551 mubuf->operands[1] = vaddr;
4552 mubuf->operands[2] = soffset;
4553 mubuf->offen = (offset.type() == RegType::vgpr);
4554 mubuf->glc = glc;
4555 mubuf->dlc = dlc;
4556 mubuf->barrier = readonly ? barrier_none : barrier_buffer;
4557 mubuf->can_reorder = readonly;
4558 mubuf->offset = const_offset;
4559 aco_ptr<Instruction> instr = std::move(mubuf);
4560
4561 if (component_size < 4) {
4562 Temp vec = num_bytes <= 4 ? bld.tmp(v1) : num_bytes <= 8 ? bld.tmp(v2) : bld.tmp(v3);
4563 instr->definitions[0] = Definition(vec);
4564 bld.insert(std::move(instr));
4565
4566 if (byte_align == -1 || (byte_align && dst.type() == RegType::sgpr)) {
4567 Operand align = byte_align == -1 ? Operand(offset) : Operand((uint32_t)byte_align);
4568 Temp tmp[3] = {vec, vec, vec};
4569
4570 if (vec.size() == 3) {
4571 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
4572 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec);
4573 } else if (vec.size() == 2) {
4574 tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
4575 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
4576 }
4577 for (unsigned i = 0; i < dst.size(); i++)
4578 tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], align);
4579
4580 vec = tmp[0];
4581 if (dst.size() == 2)
4582 vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
4583
4584 byte_align = 0;
4585 }
4586
4587 if (dst.type() == RegType::vgpr && num_components == 1) {
4588 bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), vec, Operand(byte_align / component_size));
4589 } else {
4590 trim_subdword_vector(ctx, vec, dst, 4 * vec.size() / component_size, ((1 << num_components) - 1) << byte_align / component_size);
4591 }
4592
4593 return;
4594
4595 } else if (dst.size() > 4) {
4596 assert(lower != Temp());
4597 Temp upper = bld.tmp(RegType::vgpr, dst.size() - lower.size());
4598 instr->definitions[0] = Definition(upper);
4599 bld.insert(std::move(instr));
4600 if (dst.size() == 8)
4601 emit_split_vector(ctx, upper, 2);
4602 instr.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size() / 2, 1));
4603 instr->operands[0] = Operand(emit_extract_vector(ctx, lower, 0, v2));
4604 instr->operands[1] = Operand(emit_extract_vector(ctx, lower, 1, v2));
4605 instr->operands[2] = Operand(emit_extract_vector(ctx, upper, 0, v2));
4606 if (dst.size() == 8)
4607 instr->operands[3] = Operand(emit_extract_vector(ctx, upper, 1, v2));
4608 } else if (dst.size() == 3 && ctx->options->chip_class == GFX6) {
4609 Temp vec = bld.tmp(v4);
4610 instr->definitions[0] = Definition(vec);
4611 bld.insert(std::move(instr));
4612 emit_split_vector(ctx, vec, 4);
4613
4614 instr.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, 3, 1));
4615 instr->operands[0] = Operand(emit_extract_vector(ctx, vec, 0, v1));
4616 instr->operands[1] = Operand(emit_extract_vector(ctx, vec, 1, v1));
4617 instr->operands[2] = Operand(emit_extract_vector(ctx, vec, 2, v1));
4618 }
4619
4620 if (dst.type() == RegType::sgpr) {
4621 Temp vec = bld.tmp(RegType::vgpr, dst.size());
4622 instr->definitions[0] = Definition(vec);
4623 bld.insert(std::move(instr));
4624 expand_vector(ctx, vec, dst, num_components, (1 << num_components) - 1);
4625 } else {
4626 instr->definitions[0] = Definition(dst);
4627 bld.insert(std::move(instr));
4628 emit_split_vector(ctx, dst, num_components);
4629 }
4630 } else {
4631 /* for small bit sizes add buffer for unaligned loads */
4632 if (byte_align)
4633 num_bytes += byte_align == -1 ? 4 - component_size : byte_align;
4634
4635 switch (num_bytes) {
4636 case 1:
4637 case 2:
4638 case 3:
4639 case 4:
4640 op = aco_opcode::s_buffer_load_dword;
4641 break;
4642 case 5:
4643 case 6:
4644 case 7:
4645 case 8:
4646 op = aco_opcode::s_buffer_load_dwordx2;
4647 break;
4648 case 10:
4649 case 12:
4650 case 16:
4651 op = aco_opcode::s_buffer_load_dwordx4;
4652 break;
4653 case 24:
4654 case 32:
4655 op = aco_opcode::s_buffer_load_dwordx8;
4656 break;
4657 default:
4658 unreachable("Load SSBO not implemented for this size.");
4659 }
4660 offset = bld.as_uniform(offset);
4661 aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
4662 load->operands[0] = Operand(rsrc);
4663 load->operands[1] = Operand(offset);
4664 assert(load->operands[1].getTemp().type() == RegType::sgpr);
4665 load->definitions[0] = Definition(dst);
4666 load->glc = glc;
4667 load->dlc = dlc;
4668 load->barrier = readonly ? barrier_none : barrier_buffer;
4669 load->can_reorder = false; // FIXME: currently, it doesn't seem beneficial due to how our scheduler works
4670 assert(ctx->options->chip_class >= GFX8 || !glc);
4671
4672 /* adjust misaligned small bit size loads */
4673 if (byte_align) {
4674 Temp vec = num_bytes <= 4 ? bld.tmp(s1) : num_bytes <= 8 ? bld.tmp(s2) : bld.tmp(s4);
4675 load->definitions[0] = Definition(vec);
4676 bld.insert(std::move(load));
4677 Operand byte_offset = byte_align > 0 ? Operand(uint32_t(byte_align)) : Operand(offset);
4678 byte_align_scalar(ctx, vec, byte_offset, dst);
4679
4680 /* trim vector */
4681 } else if (dst.size() == 3) {
4682 Temp vec = bld.tmp(s4);
4683 load->definitions[0] = Definition(vec);
4684 bld.insert(std::move(load));
4685 emit_split_vector(ctx, vec, 4);
4686
4687 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
4688 emit_extract_vector(ctx, vec, 0, s1),
4689 emit_extract_vector(ctx, vec, 1, s1),
4690 emit_extract_vector(ctx, vec, 2, s1));
4691 } else if (dst.size() == 6) {
4692 Temp vec = bld.tmp(s8);
4693 load->definitions[0] = Definition(vec);
4694 bld.insert(std::move(load));
4695 emit_split_vector(ctx, vec, 4);
4696
4697 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
4698 emit_extract_vector(ctx, vec, 0, s2),
4699 emit_extract_vector(ctx, vec, 1, s2),
4700 emit_extract_vector(ctx, vec, 2, s2));
4701 } else {
4702 bld.insert(std::move(load));
4703 }
4704 emit_split_vector(ctx, dst, num_components);
4705 }
4706 }
4707
4708 void visit_load_ubo(isel_context *ctx, nir_intrinsic_instr *instr)
4709 {
4710 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4711 Temp rsrc = get_ssa_temp(ctx, instr->src[0].ssa);
4712
4713 Builder bld(ctx->program, ctx->block);
4714
4715 nir_intrinsic_instr* idx_instr = nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
4716 unsigned desc_set = nir_intrinsic_desc_set(idx_instr);
4717 unsigned binding = nir_intrinsic_binding(idx_instr);
4718 radv_descriptor_set_layout *layout = ctx->options->layout->set[desc_set].layout;
4719
4720 if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
4721 uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
4722 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
4723 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
4724 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
4725 if (ctx->options->chip_class >= GFX10) {
4726 desc_type |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
4727 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
4728 S_008F0C_RESOURCE_LEVEL(1);
4729 } else {
4730 desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4731 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
4732 }
4733 Temp upper_dwords = bld.pseudo(aco_opcode::p_create_vector, bld.def(s3),
4734 Operand(S_008F04_BASE_ADDRESS_HI(ctx->options->address32_hi)),
4735 Operand(0xFFFFFFFFu),
4736 Operand(desc_type));
4737 rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
4738 rsrc, upper_dwords);
4739 } else {
4740 rsrc = convert_pointer_to_64_bit(ctx, rsrc);
4741 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
4742 }
4743 unsigned size = instr->dest.ssa.bit_size / 8;
4744 int byte_align = 0;
4745 if (size < 4) {
4746 unsigned align_mul = nir_intrinsic_align_mul(instr);
4747 unsigned align_offset = nir_intrinsic_align_offset(instr);
4748 byte_align = align_mul % 4 == 0 ? align_offset : -1;
4749 }
4750 load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), byte_align);
4751 }
4752
4753 void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr)
4754 {
4755 Builder bld(ctx->program, ctx->block);
4756 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4757 unsigned offset = nir_intrinsic_base(instr);
4758 unsigned count = instr->dest.ssa.num_components;
4759 nir_const_value *index_cv = nir_src_as_const_value(instr->src[0]);
4760
4761 if (index_cv && instr->dest.ssa.bit_size == 32) {
4762 unsigned start = (offset + index_cv->u32) / 4u;
4763 start -= ctx->args->ac.base_inline_push_consts;
4764 if (start + count <= ctx->args->ac.num_inline_push_consts) {
4765 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
4766 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
4767 for (unsigned i = 0; i < count; ++i) {
4768 elems[i] = get_arg(ctx, ctx->args->ac.inline_push_consts[start + i]);
4769 vec->operands[i] = Operand{elems[i]};
4770 }
4771 vec->definitions[0] = Definition(dst);
4772 ctx->block->instructions.emplace_back(std::move(vec));
4773 ctx->allocated_vec.emplace(dst.id(), elems);
4774 return;
4775 }
4776 }
4777
4778 Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
4779 if (offset != 0) // TODO check if index != 0 as well
4780 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index);
4781 Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants));
4782 Temp vec = dst;
4783 bool trim = false;
4784 bool aligned = true;
4785
4786 if (instr->dest.ssa.bit_size == 8) {
4787 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
4788 bool fits_in_dword = count == 1 || (index_cv && ((offset + index_cv->u32) % 4 + count) <= 4);
4789 if (!aligned)
4790 vec = fits_in_dword ? bld.tmp(s1) : bld.tmp(s2);
4791 } else if (instr->dest.ssa.bit_size == 16) {
4792 aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
4793 if (!aligned)
4794 vec = count == 4 ? bld.tmp(s4) : count > 1 ? bld.tmp(s2) : bld.tmp(s1);
4795 }
4796
4797 aco_opcode op;
4798
4799 switch (vec.size()) {
4800 case 1:
4801 op = aco_opcode::s_load_dword;
4802 break;
4803 case 2:
4804 op = aco_opcode::s_load_dwordx2;
4805 break;
4806 case 3:
4807 vec = bld.tmp(s4);
4808 trim = true;
4809 case 4:
4810 op = aco_opcode::s_load_dwordx4;
4811 break;
4812 case 6:
4813 vec = bld.tmp(s8);
4814 trim = true;
4815 case 8:
4816 op = aco_opcode::s_load_dwordx8;
4817 break;
4818 default:
4819 unreachable("unimplemented or forbidden load_push_constant.");
4820 }
4821
4822 bld.smem(op, Definition(vec), ptr, index);
4823
4824 if (!aligned) {
4825 Operand byte_offset = index_cv ? Operand((offset + index_cv->u32) % 4) : Operand(index);
4826 byte_align_scalar(ctx, vec, byte_offset, dst);
4827 return;
4828 }
4829
4830 if (trim) {
4831 emit_split_vector(ctx, vec, 4);
4832 RegClass rc = dst.size() == 3 ? s1 : s2;
4833 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
4834 emit_extract_vector(ctx, vec, 0, rc),
4835 emit_extract_vector(ctx, vec, 1, rc),
4836 emit_extract_vector(ctx, vec, 2, rc));
4837
4838 }
4839 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
4840 }
4841
4842 void visit_load_constant(isel_context *ctx, nir_intrinsic_instr *instr)
4843 {
4844 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
4845
4846 Builder bld(ctx->program, ctx->block);
4847
4848 uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
4849 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
4850 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
4851 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
4852 if (ctx->options->chip_class >= GFX10) {
4853 desc_type |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
4854 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
4855 S_008F0C_RESOURCE_LEVEL(1);
4856 } else {
4857 desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
4858 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
4859 }
4860
4861 unsigned base = nir_intrinsic_base(instr);
4862 unsigned range = nir_intrinsic_range(instr);
4863
4864 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
4865 if (base && offset.type() == RegType::sgpr)
4866 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset, Operand(base));
4867 else if (base && offset.type() == RegType::vgpr)
4868 offset = bld.vadd32(bld.def(v1), Operand(base), offset);
4869
4870 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
4871 bld.sop1(aco_opcode::p_constaddr, bld.def(s2), bld.def(s1, scc), Operand(ctx->constant_data_offset)),
4872 Operand(MIN2(base + range, ctx->shader->constant_data_size)),
4873 Operand(desc_type));
4874 unsigned size = instr->dest.ssa.bit_size / 8;
4875 // TODO: get alignment information for subdword constants
4876 unsigned byte_align = size < 4 ? -1 : 0;
4877 load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, byte_align);
4878 }
4879
4880 void visit_discard_if(isel_context *ctx, nir_intrinsic_instr *instr)
4881 {
4882 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
4883 ctx->cf_info.exec_potentially_empty_discard = true;
4884
4885 ctx->program->needs_exact = true;
4886
4887 // TODO: optimize uniform conditions
4888 Builder bld(ctx->program, ctx->block);
4889 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
4890 assert(src.regClass() == bld.lm);
4891 src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
4892 bld.pseudo(aco_opcode::p_discard_if, src);
4893 ctx->block->kind |= block_kind_uses_discard_if;
4894 return;
4895 }
4896
4897 void visit_discard(isel_context* ctx, nir_intrinsic_instr *instr)
4898 {
4899 Builder bld(ctx->program, ctx->block);
4900
4901 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
4902 ctx->cf_info.exec_potentially_empty_discard = true;
4903
4904 bool divergent = ctx->cf_info.parent_if.is_divergent ||
4905 ctx->cf_info.parent_loop.has_divergent_continue;
4906
4907 if (ctx->block->loop_nest_depth &&
4908 ((nir_instr_is_last(&instr->instr) && !divergent) || divergent)) {
4909 /* we handle discards the same way as jump instructions */
4910 append_logical_end(ctx->block);
4911
4912 /* in loops, discard behaves like break */
4913 Block *linear_target = ctx->cf_info.parent_loop.exit;
4914 ctx->block->kind |= block_kind_discard;
4915
4916 if (!divergent) {
4917 /* uniform discard - loop ends here */
4918 assert(nir_instr_is_last(&instr->instr));
4919 ctx->block->kind |= block_kind_uniform;
4920 ctx->cf_info.has_branch = true;
4921 bld.branch(aco_opcode::p_branch);
4922 add_linear_edge(ctx->block->index, linear_target);
4923 return;
4924 }
4925
4926 /* we add a break right behind the discard() instructions */
4927 ctx->block->kind |= block_kind_break;
4928 unsigned idx = ctx->block->index;
4929
4930 ctx->cf_info.parent_loop.has_divergent_branch = true;
4931 ctx->cf_info.nir_to_aco[instr->instr.block->index] = idx;
4932
4933 /* remove critical edges from linear CFG */
4934 bld.branch(aco_opcode::p_branch);
4935 Block* break_block = ctx->program->create_and_insert_block();
4936 break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
4937 break_block->kind |= block_kind_uniform;
4938 add_linear_edge(idx, break_block);
4939 add_linear_edge(break_block->index, linear_target);
4940 bld.reset(break_block);
4941 bld.branch(aco_opcode::p_branch);
4942
4943 Block* continue_block = ctx->program->create_and_insert_block();
4944 continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
4945 add_linear_edge(idx, continue_block);
4946 append_logical_start(continue_block);
4947 ctx->block = continue_block;
4948
4949 return;
4950 }
4951
4952 /* it can currently happen that NIR doesn't remove the unreachable code */
4953 if (!nir_instr_is_last(&instr->instr)) {
4954 ctx->program->needs_exact = true;
4955 /* save exec somewhere temporarily so that it doesn't get
4956 * overwritten before the discard from outer exec masks */
4957 Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), Operand(0xFFFFFFFF), Operand(exec, bld.lm));
4958 bld.pseudo(aco_opcode::p_discard_if, cond);
4959 ctx->block->kind |= block_kind_uses_discard_if;
4960 return;
4961 }
4962
4963 /* This condition is incorrect for uniformly branched discards in a loop
4964 * predicated by a divergent condition, but the above code catches that case
4965 * and the discard would end up turning into a discard_if.
4966 * For example:
4967 * if (divergent) {
4968 * while (...) {
4969 * if (uniform) {
4970 * discard;
4971 * }
4972 * }
4973 * }
4974 */
4975 if (!ctx->cf_info.parent_if.is_divergent) {
4976 /* program just ends here */
4977 ctx->block->kind |= block_kind_uniform;
4978 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
4979 0 /* enabled mask */, 9 /* dest */,
4980 false /* compressed */, true/* done */, true /* valid mask */);
4981 bld.sopp(aco_opcode::s_endpgm);
4982 // TODO: it will potentially be followed by a branch which is dead code to sanitize NIR phis
4983 } else {
4984 ctx->block->kind |= block_kind_discard;
4985 /* branch and linear edge is added by visit_if() */
4986 }
4987 }
4988
4989 enum aco_descriptor_type {
4990 ACO_DESC_IMAGE,
4991 ACO_DESC_FMASK,
4992 ACO_DESC_SAMPLER,
4993 ACO_DESC_BUFFER,
4994 ACO_DESC_PLANE_0,
4995 ACO_DESC_PLANE_1,
4996 ACO_DESC_PLANE_2,
4997 };
4998
4999 static bool
5000 should_declare_array(isel_context *ctx, enum glsl_sampler_dim sampler_dim, bool is_array) {
5001 if (sampler_dim == GLSL_SAMPLER_DIM_BUF)
5002 return false;
5003 ac_image_dim dim = ac_get_sampler_dim(ctx->options->chip_class, sampler_dim, is_array);
5004 return dim == ac_image_cube ||
5005 dim == ac_image_1darray ||
5006 dim == ac_image_2darray ||
5007 dim == ac_image_2darraymsaa;
5008 }
5009
5010 Temp get_sampler_desc(isel_context *ctx, nir_deref_instr *deref_instr,
5011 enum aco_descriptor_type desc_type,
5012 const nir_tex_instr *tex_instr, bool image, bool write)
5013 {
5014 /* FIXME: we should lower the deref with some new nir_intrinsic_load_desc
5015 std::unordered_map<uint64_t, Temp>::iterator it = ctx->tex_desc.find((uint64_t) desc_type << 32 | deref_instr->dest.ssa.index);
5016 if (it != ctx->tex_desc.end())
5017 return it->second;
5018 */
5019 Temp index = Temp();
5020 bool index_set = false;
5021 unsigned constant_index = 0;
5022 unsigned descriptor_set;
5023 unsigned base_index;
5024 Builder bld(ctx->program, ctx->block);
5025
5026 if (!deref_instr) {
5027 assert(tex_instr && !image);
5028 descriptor_set = 0;
5029 base_index = tex_instr->sampler_index;
5030 } else {
5031 while(deref_instr->deref_type != nir_deref_type_var) {
5032 unsigned array_size = glsl_get_aoa_size(deref_instr->type);
5033 if (!array_size)
5034 array_size = 1;
5035
5036 assert(deref_instr->deref_type == nir_deref_type_array);
5037 nir_const_value *const_value = nir_src_as_const_value(deref_instr->arr.index);
5038 if (const_value) {
5039 constant_index += array_size * const_value->u32;
5040 } else {
5041 Temp indirect = get_ssa_temp(ctx, deref_instr->arr.index.ssa);
5042 if (indirect.type() == RegType::vgpr)
5043 indirect = bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), indirect);
5044
5045 if (array_size != 1)
5046 indirect = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(array_size), indirect);
5047
5048 if (!index_set) {
5049 index = indirect;
5050 index_set = true;
5051 } else {
5052 index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), index, indirect);
5053 }
5054 }
5055
5056 deref_instr = nir_src_as_deref(deref_instr->parent);
5057 }
5058 descriptor_set = deref_instr->var->data.descriptor_set;
5059 base_index = deref_instr->var->data.binding;
5060 }
5061
5062 Temp list = load_desc_ptr(ctx, descriptor_set);
5063 list = convert_pointer_to_64_bit(ctx, list);
5064
5065 struct radv_descriptor_set_layout *layout = ctx->options->layout->set[descriptor_set].layout;
5066 struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
5067 unsigned offset = binding->offset;
5068 unsigned stride = binding->size;
5069 aco_opcode opcode;
5070 RegClass type;
5071
5072 assert(base_index < layout->binding_count);
5073
5074 switch (desc_type) {
5075 case ACO_DESC_IMAGE:
5076 type = s8;
5077 opcode = aco_opcode::s_load_dwordx8;
5078 break;
5079 case ACO_DESC_FMASK:
5080 type = s8;
5081 opcode = aco_opcode::s_load_dwordx8;
5082 offset += 32;
5083 break;
5084 case ACO_DESC_SAMPLER:
5085 type = s4;
5086 opcode = aco_opcode::s_load_dwordx4;
5087 if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
5088 offset += radv_combined_image_descriptor_sampler_offset(binding);
5089 break;
5090 case ACO_DESC_BUFFER:
5091 type = s4;
5092 opcode = aco_opcode::s_load_dwordx4;
5093 break;
5094 case ACO_DESC_PLANE_0:
5095 case ACO_DESC_PLANE_1:
5096 type = s8;
5097 opcode = aco_opcode::s_load_dwordx8;
5098 offset += 32 * (desc_type - ACO_DESC_PLANE_0);
5099 break;
5100 case ACO_DESC_PLANE_2:
5101 type = s4;
5102 opcode = aco_opcode::s_load_dwordx4;
5103 offset += 64;
5104 break;
5105 default:
5106 unreachable("invalid desc_type\n");
5107 }
5108
5109 offset += constant_index * stride;
5110
5111 if (desc_type == ACO_DESC_SAMPLER && binding->immutable_samplers_offset &&
5112 (!index_set || binding->immutable_samplers_equal)) {
5113 if (binding->immutable_samplers_equal)
5114 constant_index = 0;
5115
5116 const uint32_t *samplers = radv_immutable_samplers(layout, binding);
5117 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
5118 Operand(samplers[constant_index * 4 + 0]),
5119 Operand(samplers[constant_index * 4 + 1]),
5120 Operand(samplers[constant_index * 4 + 2]),
5121 Operand(samplers[constant_index * 4 + 3]));
5122 }
5123
5124 Operand off;
5125 if (!index_set) {
5126 off = bld.copy(bld.def(s1), Operand(offset));
5127 } else {
5128 off = Operand((Temp)bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset),
5129 bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), index)));
5130 }
5131
5132 Temp res = bld.smem(opcode, bld.def(type), list, off);
5133
5134 if (desc_type == ACO_DESC_PLANE_2) {
5135 Temp components[8];
5136 for (unsigned i = 0; i < 8; i++)
5137 components[i] = bld.tmp(s1);
5138 bld.pseudo(aco_opcode::p_split_vector,
5139 Definition(components[0]),
5140 Definition(components[1]),
5141 Definition(components[2]),
5142 Definition(components[3]),
5143 res);
5144
5145 Temp desc2 = get_sampler_desc(ctx, deref_instr, ACO_DESC_PLANE_1, tex_instr, image, write);
5146 bld.pseudo(aco_opcode::p_split_vector,
5147 bld.def(s1), bld.def(s1), bld.def(s1), bld.def(s1),
5148 Definition(components[4]),
5149 Definition(components[5]),
5150 Definition(components[6]),
5151 Definition(components[7]),
5152 desc2);
5153
5154 res = bld.pseudo(aco_opcode::p_create_vector, bld.def(s8),
5155 components[0], components[1], components[2], components[3],
5156 components[4], components[5], components[6], components[7]);
5157 }
5158
5159 return res;
5160 }
5161
5162 static int image_type_to_components_count(enum glsl_sampler_dim dim, bool array)
5163 {
5164 switch (dim) {
5165 case GLSL_SAMPLER_DIM_BUF:
5166 return 1;
5167 case GLSL_SAMPLER_DIM_1D:
5168 return array ? 2 : 1;
5169 case GLSL_SAMPLER_DIM_2D:
5170 return array ? 3 : 2;
5171 case GLSL_SAMPLER_DIM_MS:
5172 return array ? 4 : 3;
5173 case GLSL_SAMPLER_DIM_3D:
5174 case GLSL_SAMPLER_DIM_CUBE:
5175 return 3;
5176 case GLSL_SAMPLER_DIM_RECT:
5177 case GLSL_SAMPLER_DIM_SUBPASS:
5178 return 2;
5179 case GLSL_SAMPLER_DIM_SUBPASS_MS:
5180 return 3;
5181 default:
5182 break;
5183 }
5184 return 0;
5185 }
5186
5187
5188 /* Adjust the sample index according to FMASK.
5189 *
5190 * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
5191 * which is the identity mapping. Each nibble says which physical sample
5192 * should be fetched to get that sample.
5193 *
5194 * For example, 0x11111100 means there are only 2 samples stored and
5195 * the second sample covers 3/4 of the pixel. When reading samples 0
5196 * and 1, return physical sample 0 (determined by the first two 0s
5197 * in FMASK), otherwise return physical sample 1.
5198 *
5199 * The sample index should be adjusted as follows:
5200 * sample_index = (fmask >> (sample_index * 4)) & 0xF;
5201 */
5202 static Temp adjust_sample_index_using_fmask(isel_context *ctx, bool da, std::vector<Temp>& coords, Operand sample_index, Temp fmask_desc_ptr)
5203 {
5204 Builder bld(ctx->program, ctx->block);
5205 Temp fmask = bld.tmp(v1);
5206 unsigned dim = ctx->options->chip_class >= GFX10
5207 ? ac_get_sampler_dim(ctx->options->chip_class, GLSL_SAMPLER_DIM_2D, da)
5208 : 0;
5209
5210 Temp coord = da ? bld.pseudo(aco_opcode::p_create_vector, bld.def(v3), coords[0], coords[1], coords[2]) :
5211 bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), coords[0], coords[1]);
5212 aco_ptr<MIMG_instruction> load{create_instruction<MIMG_instruction>(aco_opcode::image_load, Format::MIMG, 3, 1)};
5213 load->operands[0] = Operand(fmask_desc_ptr);
5214 load->operands[1] = Operand(s4); /* no sampler */
5215 load->operands[2] = Operand(coord);
5216 load->definitions[0] = Definition(fmask);
5217 load->glc = false;
5218 load->dlc = false;
5219 load->dmask = 0x1;
5220 load->unrm = true;
5221 load->da = da;
5222 load->dim = dim;
5223 load->can_reorder = true; /* fmask images shouldn't be modified */
5224 ctx->block->instructions.emplace_back(std::move(load));
5225
5226 Operand sample_index4;
5227 if (sample_index.isConstant() && sample_index.constantValue() < 16) {
5228 sample_index4 = Operand(sample_index.constantValue() << 2);
5229 } else if (sample_index.regClass() == s1) {
5230 sample_index4 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), sample_index, Operand(2u));
5231 } else {
5232 assert(sample_index.regClass() == v1);
5233 sample_index4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), sample_index);
5234 }
5235
5236 Temp final_sample;
5237 if (sample_index4.isConstant() && sample_index4.constantValue() == 0)
5238 final_sample = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(15u), fmask);
5239 else if (sample_index4.isConstant() && sample_index4.constantValue() == 28)
5240 final_sample = bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), Operand(28u), fmask);
5241 else
5242 final_sample = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), fmask, sample_index4, Operand(4u));
5243
5244 /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
5245 * resource descriptor is 0 (invalid),
5246 */
5247 Temp compare = bld.tmp(bld.lm);
5248 bld.vopc_e64(aco_opcode::v_cmp_lg_u32, Definition(compare),
5249 Operand(0u), emit_extract_vector(ctx, fmask_desc_ptr, 1, s1)).def(0).setHint(vcc);
5250
5251 Temp sample_index_v = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), sample_index);
5252
5253 /* Replace the MSAA sample index. */
5254 return bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), sample_index_v, final_sample, compare);
5255 }
5256
5257 static Temp get_image_coords(isel_context *ctx, const nir_intrinsic_instr *instr, const struct glsl_type *type)
5258 {
5259
5260 Temp src0 = get_ssa_temp(ctx, instr->src[1].ssa);
5261 enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5262 bool is_array = glsl_sampler_type_is_array(type);
5263 ASSERTED bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
5264 assert(!add_frag_pos && "Input attachments should be lowered.");
5265 bool is_ms = (dim == GLSL_SAMPLER_DIM_MS || dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
5266 bool gfx9_1d = ctx->options->chip_class == GFX9 && dim == GLSL_SAMPLER_DIM_1D;
5267 int count = image_type_to_components_count(dim, is_array);
5268 std::vector<Temp> coords(count);
5269 Builder bld(ctx->program, ctx->block);
5270
5271 if (is_ms) {
5272 count--;
5273 Temp src2 = get_ssa_temp(ctx, instr->src[2].ssa);
5274 /* get sample index */
5275 if (instr->intrinsic == nir_intrinsic_image_deref_load) {
5276 nir_const_value *sample_cv = nir_src_as_const_value(instr->src[2]);
5277 Operand sample_index = sample_cv ? Operand(sample_cv->u32) : Operand(emit_extract_vector(ctx, src2, 0, v1));
5278 std::vector<Temp> fmask_load_address;
5279 for (unsigned i = 0; i < (is_array ? 3 : 2); i++)
5280 fmask_load_address.emplace_back(emit_extract_vector(ctx, src0, i, v1));
5281
5282 Temp fmask_desc_ptr = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_FMASK, nullptr, false, false);
5283 coords[count] = adjust_sample_index_using_fmask(ctx, is_array, fmask_load_address, sample_index, fmask_desc_ptr);
5284 } else {
5285 coords[count] = emit_extract_vector(ctx, src2, 0, v1);
5286 }
5287 }
5288
5289 if (gfx9_1d) {
5290 coords[0] = emit_extract_vector(ctx, src0, 0, v1);
5291 coords.resize(coords.size() + 1);
5292 coords[1] = bld.copy(bld.def(v1), Operand(0u));
5293 if (is_array)
5294 coords[2] = emit_extract_vector(ctx, src0, 1, v1);
5295 } else {
5296 for (int i = 0; i < count; i++)
5297 coords[i] = emit_extract_vector(ctx, src0, i, v1);
5298 }
5299
5300 if (instr->intrinsic == nir_intrinsic_image_deref_load ||
5301 instr->intrinsic == nir_intrinsic_image_deref_store) {
5302 int lod_index = instr->intrinsic == nir_intrinsic_image_deref_load ? 3 : 4;
5303 bool level_zero = nir_src_is_const(instr->src[lod_index]) && nir_src_as_uint(instr->src[lod_index]) == 0;
5304
5305 if (!level_zero)
5306 coords.emplace_back(get_ssa_temp(ctx, instr->src[lod_index].ssa));
5307 }
5308
5309 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, coords.size(), 1)};
5310 for (unsigned i = 0; i < coords.size(); i++)
5311 vec->operands[i] = Operand(coords[i]);
5312 Temp res = {ctx->program->allocateId(), RegClass(RegType::vgpr, coords.size())};
5313 vec->definitions[0] = Definition(res);
5314 ctx->block->instructions.emplace_back(std::move(vec));
5315 return res;
5316 }
5317
5318
5319 void visit_image_load(isel_context *ctx, nir_intrinsic_instr *instr)
5320 {
5321 Builder bld(ctx->program, ctx->block);
5322 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
5323 const struct glsl_type *type = glsl_without_array(var->type);
5324 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5325 bool is_array = glsl_sampler_type_is_array(type);
5326 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5327
5328 if (dim == GLSL_SAMPLER_DIM_BUF) {
5329 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
5330 unsigned num_channels = util_last_bit(mask);
5331 Temp rsrc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true);
5332 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
5333
5334 aco_opcode opcode;
5335 switch (num_channels) {
5336 case 1:
5337 opcode = aco_opcode::buffer_load_format_x;
5338 break;
5339 case 2:
5340 opcode = aco_opcode::buffer_load_format_xy;
5341 break;
5342 case 3:
5343 opcode = aco_opcode::buffer_load_format_xyz;
5344 break;
5345 case 4:
5346 opcode = aco_opcode::buffer_load_format_xyzw;
5347 break;
5348 default:
5349 unreachable(">4 channel buffer image load");
5350 }
5351 aco_ptr<MUBUF_instruction> load{create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 3, 1)};
5352 load->operands[0] = Operand(rsrc);
5353 load->operands[1] = Operand(vindex);
5354 load->operands[2] = Operand((uint32_t) 0);
5355 Temp tmp;
5356 if (num_channels == instr->dest.ssa.num_components && dst.type() == RegType::vgpr)
5357 tmp = dst;
5358 else
5359 tmp = {ctx->program->allocateId(), RegClass(RegType::vgpr, num_channels)};
5360 load->definitions[0] = Definition(tmp);
5361 load->idxen = true;
5362 load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT);
5363 load->dlc = load->glc && ctx->options->chip_class >= GFX10;
5364 load->barrier = barrier_image;
5365 ctx->block->instructions.emplace_back(std::move(load));
5366
5367 expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, (1 << num_channels) - 1);
5368 return;
5369 }
5370
5371 Temp coords = get_image_coords(ctx, instr, type);
5372 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, nullptr, true, true);
5373
5374 unsigned dmask = nir_ssa_def_components_read(&instr->dest.ssa);
5375 unsigned num_components = util_bitcount(dmask);
5376 Temp tmp;
5377 if (num_components == instr->dest.ssa.num_components && dst.type() == RegType::vgpr)
5378 tmp = dst;
5379 else
5380 tmp = {ctx->program->allocateId(), RegClass(RegType::vgpr, num_components)};
5381
5382 bool level_zero = nir_src_is_const(instr->src[3]) && nir_src_as_uint(instr->src[3]) == 0;
5383 aco_opcode opcode = level_zero ? aco_opcode::image_load : aco_opcode::image_load_mip;
5384
5385 aco_ptr<MIMG_instruction> load{create_instruction<MIMG_instruction>(opcode, Format::MIMG, 3, 1)};
5386 load->operands[0] = Operand(resource);
5387 load->operands[1] = Operand(s4); /* no sampler */
5388 load->operands[2] = Operand(coords);
5389 load->definitions[0] = Definition(tmp);
5390 load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0;
5391 load->dlc = load->glc && ctx->options->chip_class >= GFX10;
5392 load->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
5393 load->dmask = dmask;
5394 load->unrm = true;
5395 load->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
5396 load->barrier = barrier_image;
5397 ctx->block->instructions.emplace_back(std::move(load));
5398
5399 expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, dmask);
5400 return;
5401 }
5402
5403 void visit_image_store(isel_context *ctx, nir_intrinsic_instr *instr)
5404 {
5405 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
5406 const struct glsl_type *type = glsl_without_array(var->type);
5407 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5408 bool is_array = glsl_sampler_type_is_array(type);
5409 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa));
5410
5411 bool glc = ctx->options->chip_class == GFX6 || var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE) ? 1 : 0;
5412
5413 if (dim == GLSL_SAMPLER_DIM_BUF) {
5414 Temp rsrc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true);
5415 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
5416 aco_opcode opcode;
5417 switch (data.size()) {
5418 case 1:
5419 opcode = aco_opcode::buffer_store_format_x;
5420 break;
5421 case 2:
5422 opcode = aco_opcode::buffer_store_format_xy;
5423 break;
5424 case 3:
5425 opcode = aco_opcode::buffer_store_format_xyz;
5426 break;
5427 case 4:
5428 opcode = aco_opcode::buffer_store_format_xyzw;
5429 break;
5430 default:
5431 unreachable(">4 channel buffer image store");
5432 }
5433 aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
5434 store->operands[0] = Operand(rsrc);
5435 store->operands[1] = Operand(vindex);
5436 store->operands[2] = Operand((uint32_t) 0);
5437 store->operands[3] = Operand(data);
5438 store->idxen = true;
5439 store->glc = glc;
5440 store->dlc = false;
5441 store->disable_wqm = true;
5442 store->barrier = barrier_image;
5443 ctx->program->needs_exact = true;
5444 ctx->block->instructions.emplace_back(std::move(store));
5445 return;
5446 }
5447
5448 assert(data.type() == RegType::vgpr);
5449 Temp coords = get_image_coords(ctx, instr, type);
5450 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, nullptr, true, true);
5451
5452 bool level_zero = nir_src_is_const(instr->src[4]) && nir_src_as_uint(instr->src[4]) == 0;
5453 aco_opcode opcode = level_zero ? aco_opcode::image_store : aco_opcode::image_store_mip;
5454
5455 aco_ptr<MIMG_instruction> store{create_instruction<MIMG_instruction>(opcode, Format::MIMG, 3, 0)};
5456 store->operands[0] = Operand(resource);
5457 store->operands[1] = Operand(data);
5458 store->operands[2] = Operand(coords);
5459 store->glc = glc;
5460 store->dlc = false;
5461 store->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
5462 store->dmask = (1 << data.size()) - 1;
5463 store->unrm = true;
5464 store->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
5465 store->disable_wqm = true;
5466 store->barrier = barrier_image;
5467 ctx->program->needs_exact = true;
5468 ctx->block->instructions.emplace_back(std::move(store));
5469 return;
5470 }
5471
5472 void visit_image_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
5473 {
5474 /* return the previous value if dest is ever used */
5475 bool return_previous = false;
5476 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
5477 return_previous = true;
5478 break;
5479 }
5480 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
5481 return_previous = true;
5482 break;
5483 }
5484
5485 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
5486 const struct glsl_type *type = glsl_without_array(var->type);
5487 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5488 bool is_array = glsl_sampler_type_is_array(type);
5489 Builder bld(ctx->program, ctx->block);
5490
5491 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa));
5492 assert(data.size() == 1 && "64bit ssbo atomics not yet implemented.");
5493
5494 if (instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap)
5495 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), get_ssa_temp(ctx, instr->src[4].ssa), data);
5496
5497 aco_opcode buf_op, image_op;
5498 switch (instr->intrinsic) {
5499 case nir_intrinsic_image_deref_atomic_add:
5500 buf_op = aco_opcode::buffer_atomic_add;
5501 image_op = aco_opcode::image_atomic_add;
5502 break;
5503 case nir_intrinsic_image_deref_atomic_umin:
5504 buf_op = aco_opcode::buffer_atomic_umin;
5505 image_op = aco_opcode::image_atomic_umin;
5506 break;
5507 case nir_intrinsic_image_deref_atomic_imin:
5508 buf_op = aco_opcode::buffer_atomic_smin;
5509 image_op = aco_opcode::image_atomic_smin;
5510 break;
5511 case nir_intrinsic_image_deref_atomic_umax:
5512 buf_op = aco_opcode::buffer_atomic_umax;
5513 image_op = aco_opcode::image_atomic_umax;
5514 break;
5515 case nir_intrinsic_image_deref_atomic_imax:
5516 buf_op = aco_opcode::buffer_atomic_smax;
5517 image_op = aco_opcode::image_atomic_smax;
5518 break;
5519 case nir_intrinsic_image_deref_atomic_and:
5520 buf_op = aco_opcode::buffer_atomic_and;
5521 image_op = aco_opcode::image_atomic_and;
5522 break;
5523 case nir_intrinsic_image_deref_atomic_or:
5524 buf_op = aco_opcode::buffer_atomic_or;
5525 image_op = aco_opcode::image_atomic_or;
5526 break;
5527 case nir_intrinsic_image_deref_atomic_xor:
5528 buf_op = aco_opcode::buffer_atomic_xor;
5529 image_op = aco_opcode::image_atomic_xor;
5530 break;
5531 case nir_intrinsic_image_deref_atomic_exchange:
5532 buf_op = aco_opcode::buffer_atomic_swap;
5533 image_op = aco_opcode::image_atomic_swap;
5534 break;
5535 case nir_intrinsic_image_deref_atomic_comp_swap:
5536 buf_op = aco_opcode::buffer_atomic_cmpswap;
5537 image_op = aco_opcode::image_atomic_cmpswap;
5538 break;
5539 default:
5540 unreachable("visit_image_atomic should only be called with nir_intrinsic_image_deref_atomic_* instructions.");
5541 }
5542
5543 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5544
5545 if (dim == GLSL_SAMPLER_DIM_BUF) {
5546 Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
5547 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true);
5548 //assert(ctx->options->chip_class < GFX9 && "GFX9 stride size workaround not yet implemented.");
5549 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(buf_op, Format::MUBUF, 4, return_previous ? 1 : 0)};
5550 mubuf->operands[0] = Operand(resource);
5551 mubuf->operands[1] = Operand(vindex);
5552 mubuf->operands[2] = Operand((uint32_t)0);
5553 mubuf->operands[3] = Operand(data);
5554 if (return_previous)
5555 mubuf->definitions[0] = Definition(dst);
5556 mubuf->offset = 0;
5557 mubuf->idxen = true;
5558 mubuf->glc = return_previous;
5559 mubuf->dlc = false; /* Not needed for atomics */
5560 mubuf->disable_wqm = true;
5561 mubuf->barrier = barrier_image;
5562 ctx->program->needs_exact = true;
5563 ctx->block->instructions.emplace_back(std::move(mubuf));
5564 return;
5565 }
5566
5567 Temp coords = get_image_coords(ctx, instr, type);
5568 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, nullptr, true, true);
5569 aco_ptr<MIMG_instruction> mimg{create_instruction<MIMG_instruction>(image_op, Format::MIMG, 3, return_previous ? 1 : 0)};
5570 mimg->operands[0] = Operand(resource);
5571 mimg->operands[1] = Operand(data);
5572 mimg->operands[2] = Operand(coords);
5573 if (return_previous)
5574 mimg->definitions[0] = Definition(dst);
5575 mimg->glc = return_previous;
5576 mimg->dlc = false; /* Not needed for atomics */
5577 mimg->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
5578 mimg->dmask = (1 << data.size()) - 1;
5579 mimg->unrm = true;
5580 mimg->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
5581 mimg->disable_wqm = true;
5582 mimg->barrier = barrier_image;
5583 ctx->program->needs_exact = true;
5584 ctx->block->instructions.emplace_back(std::move(mimg));
5585 return;
5586 }
5587
5588 void get_buffer_size(isel_context *ctx, Temp desc, Temp dst, bool in_elements)
5589 {
5590 if (in_elements && ctx->options->chip_class == GFX8) {
5591 /* we only have to divide by 1, 2, 4, 8, 12 or 16 */
5592 Builder bld(ctx->program, ctx->block);
5593
5594 Temp size = emit_extract_vector(ctx, desc, 2, s1);
5595
5596 Temp size_div3 = bld.vop3(aco_opcode::v_mul_hi_u32, bld.def(v1), bld.copy(bld.def(v1), Operand(0xaaaaaaabu)), size);
5597 size_div3 = bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.as_uniform(size_div3), Operand(1u));
5598
5599 Temp stride = emit_extract_vector(ctx, desc, 1, s1);
5600 stride = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), stride, Operand((5u << 16) | 16u));
5601
5602 Temp is12 = bld.sopc(aco_opcode::s_cmp_eq_i32, bld.def(s1, scc), stride, Operand(12u));
5603 size = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), size_div3, size, bld.scc(is12));
5604
5605 Temp shr_dst = dst.type() == RegType::vgpr ? bld.tmp(s1) : dst;
5606 bld.sop2(aco_opcode::s_lshr_b32, Definition(shr_dst), bld.def(s1, scc),
5607 size, bld.sop1(aco_opcode::s_ff1_i32_b32, bld.def(s1), stride));
5608 if (dst.type() == RegType::vgpr)
5609 bld.copy(Definition(dst), shr_dst);
5610
5611 /* TODO: we can probably calculate this faster with v_skip when stride != 12 */
5612 } else {
5613 emit_extract_vector(ctx, desc, 2, dst);
5614 }
5615 }
5616
5617 void visit_image_size(isel_context *ctx, nir_intrinsic_instr *instr)
5618 {
5619 const nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
5620 const struct glsl_type *type = glsl_without_array(var->type);
5621 const enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
5622 bool is_array = glsl_sampler_type_is_array(type);
5623 Builder bld(ctx->program, ctx->block);
5624
5625 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
5626 Temp desc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, NULL, true, false);
5627 return get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), true);
5628 }
5629
5630 /* LOD */
5631 Temp lod = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0u));
5632
5633 /* Resource */
5634 Temp resource = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_IMAGE, NULL, true, false);
5635
5636 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5637
5638 aco_ptr<MIMG_instruction> mimg{create_instruction<MIMG_instruction>(aco_opcode::image_get_resinfo, Format::MIMG, 3, 1)};
5639 mimg->operands[0] = Operand(resource);
5640 mimg->operands[1] = Operand(s4); /* no sampler */
5641 mimg->operands[2] = Operand(lod);
5642 uint8_t& dmask = mimg->dmask;
5643 mimg->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
5644 mimg->dmask = (1 << instr->dest.ssa.num_components) - 1;
5645 mimg->da = glsl_sampler_type_is_array(type);
5646 mimg->can_reorder = true;
5647 Definition& def = mimg->definitions[0];
5648 ctx->block->instructions.emplace_back(std::move(mimg));
5649
5650 if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE &&
5651 glsl_sampler_type_is_array(type)) {
5652
5653 assert(instr->dest.ssa.num_components == 3);
5654 Temp tmp = {ctx->program->allocateId(), v3};
5655 def = Definition(tmp);
5656 emit_split_vector(ctx, tmp, 3);
5657
5658 /* divide 3rd value by 6 by multiplying with magic number */
5659 Temp c = bld.copy(bld.def(s1), Operand((uint32_t) 0x2AAAAAAB));
5660 Temp by_6 = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), emit_extract_vector(ctx, tmp, 2, v1), c);
5661
5662 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
5663 emit_extract_vector(ctx, tmp, 0, v1),
5664 emit_extract_vector(ctx, tmp, 1, v1),
5665 by_6);
5666
5667 } else if (ctx->options->chip_class == GFX9 &&
5668 glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_1D &&
5669 glsl_sampler_type_is_array(type)) {
5670 assert(instr->dest.ssa.num_components == 2);
5671 def = Definition(dst);
5672 dmask = 0x5;
5673 } else {
5674 def = Definition(dst);
5675 }
5676
5677 emit_split_vector(ctx, dst, instr->dest.ssa.num_components);
5678 }
5679
5680 void visit_load_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
5681 {
5682 Builder bld(ctx->program, ctx->block);
5683 unsigned num_components = instr->num_components;
5684
5685 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5686 Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
5687 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
5688
5689 bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
5690 unsigned size = instr->dest.ssa.bit_size / 8;
5691 int byte_align = 0;
5692 if (size < 4) {
5693 unsigned align_mul = nir_intrinsic_align_mul(instr);
5694 unsigned align_offset = nir_intrinsic_align_offset(instr);
5695 byte_align = align_mul % 4 == 0 ? align_offset : -1;
5696 }
5697 load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), byte_align, glc, false);
5698 }
5699
5700 void visit_store_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
5701 {
5702 Builder bld(ctx->program, ctx->block);
5703 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
5704 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
5705 unsigned writemask = nir_intrinsic_write_mask(instr);
5706 Temp offset = get_ssa_temp(ctx, instr->src[2].ssa);
5707
5708 Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
5709 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
5710
5711 bool smem = !ctx->divergent_vals[instr->src[2].ssa->index] &&
5712 ctx->options->chip_class >= GFX8 &&
5713 elem_size_bytes >= 4;
5714 if (smem)
5715 offset = bld.as_uniform(offset);
5716 bool smem_nonfs = smem && ctx->stage != fragment_fs;
5717
5718 while (writemask) {
5719 int start, count;
5720 u_bit_scan_consecutive_range(&writemask, &start, &count);
5721 if (count == 3 && (smem || ctx->options->chip_class == GFX6)) {
5722 /* GFX6 doesn't support storing vec3, split it. */
5723 writemask |= 1u << (start + 2);
5724 count = 2;
5725 }
5726 int num_bytes = count * elem_size_bytes;
5727
5728 /* dword or larger stores have to be dword-aligned */
5729 if (elem_size_bytes < 4 && num_bytes > 2) {
5730 // TODO: improve alignment check of sub-dword stores
5731 unsigned count_new = 2 / elem_size_bytes;
5732 writemask |= ((1 << (count - count_new)) - 1) << (start + count_new);
5733 count = count_new;
5734 num_bytes = 2;
5735 }
5736
5737 if (num_bytes > 16) {
5738 assert(elem_size_bytes == 8);
5739 writemask |= (((count - 2) << 1) - 1) << (start + 2);
5740 count = 2;
5741 num_bytes = 16;
5742 }
5743
5744 Temp write_data;
5745 if (elem_size_bytes < 4) {
5746 if (data.type() == RegType::sgpr) {
5747 data = as_vgpr(ctx, data);
5748 emit_split_vector(ctx, data, 4 * data.size() / elem_size_bytes);
5749 }
5750 RegClass rc = RegClass(RegType::vgpr, elem_size_bytes).as_subdword();
5751 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
5752 for (int i = 0; i < count; i++)
5753 vec->operands[i] = Operand(emit_extract_vector(ctx, data, start + i, rc));
5754 write_data = bld.tmp(RegClass(RegType::vgpr, num_bytes).as_subdword());
5755 vec->definitions[0] = Definition(write_data);
5756 bld.insert(std::move(vec));
5757 } else if (count != instr->num_components) {
5758 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
5759 for (int i = 0; i < count; i++) {
5760 Temp elem = emit_extract_vector(ctx, data, start + i, RegClass(data.type(), elem_size_bytes / 4));
5761 vec->operands[i] = Operand(smem_nonfs ? bld.as_uniform(elem) : elem);
5762 }
5763 write_data = bld.tmp(!smem ? RegType::vgpr : smem_nonfs ? RegType::sgpr : data.type(), count * elem_size_bytes / 4);
5764 vec->definitions[0] = Definition(write_data);
5765 ctx->block->instructions.emplace_back(std::move(vec));
5766 } else if (!smem && data.type() != RegType::vgpr) {
5767 assert(num_bytes % 4 == 0);
5768 write_data = bld.copy(bld.def(RegType::vgpr, num_bytes / 4), data);
5769 } else if (smem_nonfs && data.type() == RegType::vgpr) {
5770 assert(num_bytes % 4 == 0);
5771 write_data = bld.as_uniform(data);
5772 } else {
5773 write_data = data;
5774 }
5775
5776 aco_opcode vmem_op, smem_op = aco_opcode::last_opcode;
5777 switch (num_bytes) {
5778 case 1:
5779 vmem_op = aco_opcode::buffer_store_byte;
5780 break;
5781 case 2:
5782 vmem_op = aco_opcode::buffer_store_short;
5783 break;
5784 case 4:
5785 vmem_op = aco_opcode::buffer_store_dword;
5786 smem_op = aco_opcode::s_buffer_store_dword;
5787 break;
5788 case 8:
5789 vmem_op = aco_opcode::buffer_store_dwordx2;
5790 smem_op = aco_opcode::s_buffer_store_dwordx2;
5791 break;
5792 case 12:
5793 vmem_op = aco_opcode::buffer_store_dwordx3;
5794 assert(!smem && ctx->options->chip_class > GFX6);
5795 break;
5796 case 16:
5797 vmem_op = aco_opcode::buffer_store_dwordx4;
5798 smem_op = aco_opcode::s_buffer_store_dwordx4;
5799 break;
5800 default:
5801 unreachable("Store SSBO not implemented for this size.");
5802 }
5803 if (ctx->stage == fragment_fs)
5804 smem_op = aco_opcode::p_fs_buffer_store_smem;
5805
5806 if (smem) {
5807 aco_ptr<SMEM_instruction> store{create_instruction<SMEM_instruction>(smem_op, Format::SMEM, 3, 0)};
5808 store->operands[0] = Operand(rsrc);
5809 if (start) {
5810 Temp off = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
5811 offset, Operand(start * elem_size_bytes));
5812 store->operands[1] = Operand(off);
5813 } else {
5814 store->operands[1] = Operand(offset);
5815 }
5816 if (smem_op != aco_opcode::p_fs_buffer_store_smem)
5817 store->operands[1].setFixed(m0);
5818 store->operands[2] = Operand(write_data);
5819 store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
5820 store->dlc = false;
5821 store->disable_wqm = true;
5822 store->barrier = barrier_buffer;
5823 ctx->block->instructions.emplace_back(std::move(store));
5824 ctx->program->wb_smem_l1_on_end = true;
5825 if (smem_op == aco_opcode::p_fs_buffer_store_smem) {
5826 ctx->block->kind |= block_kind_needs_lowering;
5827 ctx->program->needs_exact = true;
5828 }
5829 } else {
5830 aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(vmem_op, Format::MUBUF, 4, 0)};
5831 store->operands[0] = Operand(rsrc);
5832 store->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
5833 store->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
5834 store->operands[3] = Operand(write_data);
5835 store->offset = start * elem_size_bytes;
5836 store->offen = (offset.type() == RegType::vgpr);
5837 store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
5838 store->dlc = false;
5839 store->disable_wqm = true;
5840 store->barrier = barrier_buffer;
5841 ctx->program->needs_exact = true;
5842 ctx->block->instructions.emplace_back(std::move(store));
5843 }
5844 }
5845 }
5846
5847 void visit_atomic_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
5848 {
5849 /* return the previous value if dest is ever used */
5850 bool return_previous = false;
5851 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
5852 return_previous = true;
5853 break;
5854 }
5855 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
5856 return_previous = true;
5857 break;
5858 }
5859
5860 Builder bld(ctx->program, ctx->block);
5861 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[2].ssa));
5862
5863 if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap)
5864 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
5865 get_ssa_temp(ctx, instr->src[3].ssa), data);
5866
5867 Temp offset = get_ssa_temp(ctx, instr->src[1].ssa);
5868 Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
5869 rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
5870
5871 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5872
5873 aco_opcode op32, op64;
5874 switch (instr->intrinsic) {
5875 case nir_intrinsic_ssbo_atomic_add:
5876 op32 = aco_opcode::buffer_atomic_add;
5877 op64 = aco_opcode::buffer_atomic_add_x2;
5878 break;
5879 case nir_intrinsic_ssbo_atomic_imin:
5880 op32 = aco_opcode::buffer_atomic_smin;
5881 op64 = aco_opcode::buffer_atomic_smin_x2;
5882 break;
5883 case nir_intrinsic_ssbo_atomic_umin:
5884 op32 = aco_opcode::buffer_atomic_umin;
5885 op64 = aco_opcode::buffer_atomic_umin_x2;
5886 break;
5887 case nir_intrinsic_ssbo_atomic_imax:
5888 op32 = aco_opcode::buffer_atomic_smax;
5889 op64 = aco_opcode::buffer_atomic_smax_x2;
5890 break;
5891 case nir_intrinsic_ssbo_atomic_umax:
5892 op32 = aco_opcode::buffer_atomic_umax;
5893 op64 = aco_opcode::buffer_atomic_umax_x2;
5894 break;
5895 case nir_intrinsic_ssbo_atomic_and:
5896 op32 = aco_opcode::buffer_atomic_and;
5897 op64 = aco_opcode::buffer_atomic_and_x2;
5898 break;
5899 case nir_intrinsic_ssbo_atomic_or:
5900 op32 = aco_opcode::buffer_atomic_or;
5901 op64 = aco_opcode::buffer_atomic_or_x2;
5902 break;
5903 case nir_intrinsic_ssbo_atomic_xor:
5904 op32 = aco_opcode::buffer_atomic_xor;
5905 op64 = aco_opcode::buffer_atomic_xor_x2;
5906 break;
5907 case nir_intrinsic_ssbo_atomic_exchange:
5908 op32 = aco_opcode::buffer_atomic_swap;
5909 op64 = aco_opcode::buffer_atomic_swap_x2;
5910 break;
5911 case nir_intrinsic_ssbo_atomic_comp_swap:
5912 op32 = aco_opcode::buffer_atomic_cmpswap;
5913 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
5914 break;
5915 default:
5916 unreachable("visit_atomic_ssbo should only be called with nir_intrinsic_ssbo_atomic_* instructions.");
5917 }
5918 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
5919 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
5920 mubuf->operands[0] = Operand(rsrc);
5921 mubuf->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
5922 mubuf->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
5923 mubuf->operands[3] = Operand(data);
5924 if (return_previous)
5925 mubuf->definitions[0] = Definition(dst);
5926 mubuf->offset = 0;
5927 mubuf->offen = (offset.type() == RegType::vgpr);
5928 mubuf->glc = return_previous;
5929 mubuf->dlc = false; /* Not needed for atomics */
5930 mubuf->disable_wqm = true;
5931 mubuf->barrier = barrier_buffer;
5932 ctx->program->needs_exact = true;
5933 ctx->block->instructions.emplace_back(std::move(mubuf));
5934 }
5935
5936 void visit_get_buffer_size(isel_context *ctx, nir_intrinsic_instr *instr) {
5937
5938 Temp index = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
5939 Builder bld(ctx->program, ctx->block);
5940 Temp desc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), index, Operand(0u));
5941 get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), false);
5942 }
5943
5944 Temp get_gfx6_global_rsrc(Builder& bld, Temp addr)
5945 {
5946 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
5947 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
5948
5949 if (addr.type() == RegType::vgpr)
5950 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand(0u), Operand(0u), Operand(-1u), Operand(rsrc_conf));
5951 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand(-1u), Operand(rsrc_conf));
5952 }
5953
5954 void visit_load_global(isel_context *ctx, nir_intrinsic_instr *instr)
5955 {
5956 Builder bld(ctx->program, ctx->block);
5957 unsigned num_components = instr->num_components;
5958 unsigned num_bytes = num_components * instr->dest.ssa.bit_size / 8;
5959
5960 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
5961 Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
5962
5963 bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
5964 bool dlc = glc && ctx->options->chip_class >= GFX10;
5965 aco_opcode op;
5966 if (dst.type() == RegType::vgpr || (glc && ctx->options->chip_class < GFX8)) {
5967 bool global = ctx->options->chip_class >= GFX9;
5968
5969 if (ctx->options->chip_class >= GFX7) {
5970 aco_opcode op;
5971 switch (num_bytes) {
5972 case 4:
5973 op = global ? aco_opcode::global_load_dword : aco_opcode::flat_load_dword;
5974 break;
5975 case 8:
5976 op = global ? aco_opcode::global_load_dwordx2 : aco_opcode::flat_load_dwordx2;
5977 break;
5978 case 12:
5979 op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3;
5980 break;
5981 case 16:
5982 op = global ? aco_opcode::global_load_dwordx4 : aco_opcode::flat_load_dwordx4;
5983 break;
5984 default:
5985 unreachable("load_global not implemented for this size.");
5986 }
5987
5988 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)};
5989 flat->operands[0] = Operand(addr);
5990 flat->operands[1] = Operand(s1);
5991 flat->glc = glc;
5992 flat->dlc = dlc;
5993 flat->barrier = barrier_buffer;
5994
5995 if (dst.type() == RegType::sgpr) {
5996 Temp vec = bld.tmp(RegType::vgpr, dst.size());
5997 flat->definitions[0] = Definition(vec);
5998 ctx->block->instructions.emplace_back(std::move(flat));
5999 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
6000 } else {
6001 flat->definitions[0] = Definition(dst);
6002 ctx->block->instructions.emplace_back(std::move(flat));
6003 }
6004 emit_split_vector(ctx, dst, num_components);
6005 } else {
6006 assert(ctx->options->chip_class == GFX6);
6007
6008 /* GFX6 doesn't support loading vec3, expand to vec4. */
6009 num_bytes = num_bytes == 12 ? 16 : num_bytes;
6010
6011 aco_opcode op;
6012 switch (num_bytes) {
6013 case 4:
6014 op = aco_opcode::buffer_load_dword;
6015 break;
6016 case 8:
6017 op = aco_opcode::buffer_load_dwordx2;
6018 break;
6019 case 16:
6020 op = aco_opcode::buffer_load_dwordx4;
6021 break;
6022 default:
6023 unreachable("load_global not implemented for this size.");
6024 }
6025
6026 Temp rsrc = get_gfx6_global_rsrc(bld, addr);
6027
6028 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
6029 mubuf->operands[0] = Operand(rsrc);
6030 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
6031 mubuf->operands[2] = Operand(0u);
6032 mubuf->glc = glc;
6033 mubuf->dlc = false;
6034 mubuf->offset = 0;
6035 mubuf->addr64 = addr.type() == RegType::vgpr;
6036 mubuf->disable_wqm = false;
6037 mubuf->barrier = barrier_buffer;
6038 aco_ptr<Instruction> instr = std::move(mubuf);
6039
6040 /* expand vector */
6041 if (dst.size() == 3) {
6042 Temp vec = bld.tmp(v4);
6043 instr->definitions[0] = Definition(vec);
6044 bld.insert(std::move(instr));
6045 emit_split_vector(ctx, vec, 4);
6046
6047 instr.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, 3, 1));
6048 instr->operands[0] = Operand(emit_extract_vector(ctx, vec, 0, v1));
6049 instr->operands[1] = Operand(emit_extract_vector(ctx, vec, 1, v1));
6050 instr->operands[2] = Operand(emit_extract_vector(ctx, vec, 2, v1));
6051 }
6052
6053 if (dst.type() == RegType::sgpr) {
6054 Temp vec = bld.tmp(RegType::vgpr, dst.size());
6055 instr->definitions[0] = Definition(vec);
6056 bld.insert(std::move(instr));
6057 expand_vector(ctx, vec, dst, num_components, (1 << num_components) - 1);
6058 bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
6059 } else {
6060 instr->definitions[0] = Definition(dst);
6061 bld.insert(std::move(instr));
6062 emit_split_vector(ctx, dst, num_components);
6063 }
6064 }
6065 } else {
6066 switch (num_bytes) {
6067 case 4:
6068 op = aco_opcode::s_load_dword;
6069 break;
6070 case 8:
6071 op = aco_opcode::s_load_dwordx2;
6072 break;
6073 case 12:
6074 case 16:
6075 op = aco_opcode::s_load_dwordx4;
6076 break;
6077 default:
6078 unreachable("load_global not implemented for this size.");
6079 }
6080 aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
6081 load->operands[0] = Operand(addr);
6082 load->operands[1] = Operand(0u);
6083 load->definitions[0] = Definition(dst);
6084 load->glc = glc;
6085 load->dlc = dlc;
6086 load->barrier = barrier_buffer;
6087 assert(ctx->options->chip_class >= GFX8 || !glc);
6088
6089 if (dst.size() == 3) {
6090 /* trim vector */
6091 Temp vec = bld.tmp(s4);
6092 load->definitions[0] = Definition(vec);
6093 ctx->block->instructions.emplace_back(std::move(load));
6094 emit_split_vector(ctx, vec, 4);
6095
6096 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
6097 emit_extract_vector(ctx, vec, 0, s1),
6098 emit_extract_vector(ctx, vec, 1, s1),
6099 emit_extract_vector(ctx, vec, 2, s1));
6100 } else {
6101 ctx->block->instructions.emplace_back(std::move(load));
6102 }
6103 }
6104 }
6105
6106 void visit_store_global(isel_context *ctx, nir_intrinsic_instr *instr)
6107 {
6108 Builder bld(ctx->program, ctx->block);
6109 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6110
6111 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6112 Temp addr = get_ssa_temp(ctx, instr->src[1].ssa);
6113
6114 if (ctx->options->chip_class >= GFX7)
6115 addr = as_vgpr(ctx, addr);
6116
6117 unsigned writemask = nir_intrinsic_write_mask(instr);
6118 while (writemask) {
6119 int start, count;
6120 u_bit_scan_consecutive_range(&writemask, &start, &count);
6121 if (count == 3 && ctx->options->chip_class == GFX6) {
6122 /* GFX6 doesn't support storing vec3, split it. */
6123 writemask |= 1u << (start + 2);
6124 count = 2;
6125 }
6126 unsigned num_bytes = count * elem_size_bytes;
6127
6128 Temp write_data = data;
6129 if (count != instr->num_components) {
6130 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
6131 for (int i = 0; i < count; i++)
6132 vec->operands[i] = Operand(emit_extract_vector(ctx, data, start + i, v1));
6133 write_data = bld.tmp(RegType::vgpr, count);
6134 vec->definitions[0] = Definition(write_data);
6135 ctx->block->instructions.emplace_back(std::move(vec));
6136 }
6137
6138 bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
6139 unsigned offset = start * elem_size_bytes;
6140
6141 if (ctx->options->chip_class >= GFX7) {
6142 if (offset > 0 && ctx->options->chip_class < GFX9) {
6143 Temp addr0 = bld.tmp(v1), addr1 = bld.tmp(v1);
6144 Temp new_addr0 = bld.tmp(v1), new_addr1 = bld.tmp(v1);
6145 Temp carry = bld.tmp(bld.lm);
6146 bld.pseudo(aco_opcode::p_split_vector, Definition(addr0), Definition(addr1), addr);
6147
6148 bld.vop2(aco_opcode::v_add_co_u32, Definition(new_addr0), bld.hint_vcc(Definition(carry)),
6149 Operand(offset), addr0);
6150 bld.vop2(aco_opcode::v_addc_co_u32, Definition(new_addr1), bld.def(bld.lm),
6151 Operand(0u), addr1,
6152 carry).def(1).setHint(vcc);
6153
6154 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_addr0, new_addr1);
6155
6156 offset = 0;
6157 }
6158
6159 bool global = ctx->options->chip_class >= GFX9;
6160 aco_opcode op;
6161 switch (num_bytes) {
6162 case 4:
6163 op = global ? aco_opcode::global_store_dword : aco_opcode::flat_store_dword;
6164 break;
6165 case 8:
6166 op = global ? aco_opcode::global_store_dwordx2 : aco_opcode::flat_store_dwordx2;
6167 break;
6168 case 12:
6169 op = global ? aco_opcode::global_store_dwordx3 : aco_opcode::flat_store_dwordx3;
6170 break;
6171 case 16:
6172 op = global ? aco_opcode::global_store_dwordx4 : aco_opcode::flat_store_dwordx4;
6173 break;
6174 default:
6175 unreachable("store_global not implemented for this size.");
6176 }
6177
6178 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, 0)};
6179 flat->operands[0] = Operand(addr);
6180 flat->operands[1] = Operand(s1);
6181 flat->operands[2] = Operand(data);
6182 flat->glc = glc;
6183 flat->dlc = false;
6184 flat->offset = offset;
6185 flat->disable_wqm = true;
6186 flat->barrier = barrier_buffer;
6187 ctx->program->needs_exact = true;
6188 ctx->block->instructions.emplace_back(std::move(flat));
6189 } else {
6190 assert(ctx->options->chip_class == GFX6);
6191
6192 aco_opcode op;
6193 switch (num_bytes) {
6194 case 4:
6195 op = aco_opcode::buffer_store_dword;
6196 break;
6197 case 8:
6198 op = aco_opcode::buffer_store_dwordx2;
6199 break;
6200 case 16:
6201 op = aco_opcode::buffer_store_dwordx4;
6202 break;
6203 default:
6204 unreachable("store_global not implemented for this size.");
6205 }
6206
6207 Temp rsrc = get_gfx6_global_rsrc(bld, addr);
6208
6209 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
6210 mubuf->operands[0] = Operand(rsrc);
6211 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
6212 mubuf->operands[2] = Operand(0u);
6213 mubuf->operands[3] = Operand(write_data);
6214 mubuf->glc = glc;
6215 mubuf->dlc = false;
6216 mubuf->offset = offset;
6217 mubuf->addr64 = addr.type() == RegType::vgpr;
6218 mubuf->disable_wqm = true;
6219 mubuf->barrier = barrier_buffer;
6220 ctx->program->needs_exact = true;
6221 ctx->block->instructions.emplace_back(std::move(mubuf));
6222 }
6223 }
6224 }
6225
6226 void visit_global_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
6227 {
6228 /* return the previous value if dest is ever used */
6229 bool return_previous = false;
6230 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
6231 return_previous = true;
6232 break;
6233 }
6234 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
6235 return_previous = true;
6236 break;
6237 }
6238
6239 Builder bld(ctx->program, ctx->block);
6240 Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
6241 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6242
6243 if (ctx->options->chip_class >= GFX7)
6244 addr = as_vgpr(ctx, addr);
6245
6246 if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
6247 data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
6248 get_ssa_temp(ctx, instr->src[2].ssa), data);
6249
6250 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6251
6252 aco_opcode op32, op64;
6253
6254 if (ctx->options->chip_class >= GFX7) {
6255 bool global = ctx->options->chip_class >= GFX9;
6256 switch (instr->intrinsic) {
6257 case nir_intrinsic_global_atomic_add:
6258 op32 = global ? aco_opcode::global_atomic_add : aco_opcode::flat_atomic_add;
6259 op64 = global ? aco_opcode::global_atomic_add_x2 : aco_opcode::flat_atomic_add_x2;
6260 break;
6261 case nir_intrinsic_global_atomic_imin:
6262 op32 = global ? aco_opcode::global_atomic_smin : aco_opcode::flat_atomic_smin;
6263 op64 = global ? aco_opcode::global_atomic_smin_x2 : aco_opcode::flat_atomic_smin_x2;
6264 break;
6265 case nir_intrinsic_global_atomic_umin:
6266 op32 = global ? aco_opcode::global_atomic_umin : aco_opcode::flat_atomic_umin;
6267 op64 = global ? aco_opcode::global_atomic_umin_x2 : aco_opcode::flat_atomic_umin_x2;
6268 break;
6269 case nir_intrinsic_global_atomic_imax:
6270 op32 = global ? aco_opcode::global_atomic_smax : aco_opcode::flat_atomic_smax;
6271 op64 = global ? aco_opcode::global_atomic_smax_x2 : aco_opcode::flat_atomic_smax_x2;
6272 break;
6273 case nir_intrinsic_global_atomic_umax:
6274 op32 = global ? aco_opcode::global_atomic_umax : aco_opcode::flat_atomic_umax;
6275 op64 = global ? aco_opcode::global_atomic_umax_x2 : aco_opcode::flat_atomic_umax_x2;
6276 break;
6277 case nir_intrinsic_global_atomic_and:
6278 op32 = global ? aco_opcode::global_atomic_and : aco_opcode::flat_atomic_and;
6279 op64 = global ? aco_opcode::global_atomic_and_x2 : aco_opcode::flat_atomic_and_x2;
6280 break;
6281 case nir_intrinsic_global_atomic_or:
6282 op32 = global ? aco_opcode::global_atomic_or : aco_opcode::flat_atomic_or;
6283 op64 = global ? aco_opcode::global_atomic_or_x2 : aco_opcode::flat_atomic_or_x2;
6284 break;
6285 case nir_intrinsic_global_atomic_xor:
6286 op32 = global ? aco_opcode::global_atomic_xor : aco_opcode::flat_atomic_xor;
6287 op64 = global ? aco_opcode::global_atomic_xor_x2 : aco_opcode::flat_atomic_xor_x2;
6288 break;
6289 case nir_intrinsic_global_atomic_exchange:
6290 op32 = global ? aco_opcode::global_atomic_swap : aco_opcode::flat_atomic_swap;
6291 op64 = global ? aco_opcode::global_atomic_swap_x2 : aco_opcode::flat_atomic_swap_x2;
6292 break;
6293 case nir_intrinsic_global_atomic_comp_swap:
6294 op32 = global ? aco_opcode::global_atomic_cmpswap : aco_opcode::flat_atomic_cmpswap;
6295 op64 = global ? aco_opcode::global_atomic_cmpswap_x2 : aco_opcode::flat_atomic_cmpswap_x2;
6296 break;
6297 default:
6298 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* instructions.");
6299 }
6300
6301 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6302 aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)};
6303 flat->operands[0] = Operand(addr);
6304 flat->operands[1] = Operand(s1);
6305 flat->operands[2] = Operand(data);
6306 if (return_previous)
6307 flat->definitions[0] = Definition(dst);
6308 flat->glc = return_previous;
6309 flat->dlc = false; /* Not needed for atomics */
6310 flat->offset = 0;
6311 flat->disable_wqm = true;
6312 flat->barrier = barrier_buffer;
6313 ctx->program->needs_exact = true;
6314 ctx->block->instructions.emplace_back(std::move(flat));
6315 } else {
6316 assert(ctx->options->chip_class == GFX6);
6317
6318 switch (instr->intrinsic) {
6319 case nir_intrinsic_global_atomic_add:
6320 op32 = aco_opcode::buffer_atomic_add;
6321 op64 = aco_opcode::buffer_atomic_add_x2;
6322 break;
6323 case nir_intrinsic_global_atomic_imin:
6324 op32 = aco_opcode::buffer_atomic_smin;
6325 op64 = aco_opcode::buffer_atomic_smin_x2;
6326 break;
6327 case nir_intrinsic_global_atomic_umin:
6328 op32 = aco_opcode::buffer_atomic_umin;
6329 op64 = aco_opcode::buffer_atomic_umin_x2;
6330 break;
6331 case nir_intrinsic_global_atomic_imax:
6332 op32 = aco_opcode::buffer_atomic_smax;
6333 op64 = aco_opcode::buffer_atomic_smax_x2;
6334 break;
6335 case nir_intrinsic_global_atomic_umax:
6336 op32 = aco_opcode::buffer_atomic_umax;
6337 op64 = aco_opcode::buffer_atomic_umax_x2;
6338 break;
6339 case nir_intrinsic_global_atomic_and:
6340 op32 = aco_opcode::buffer_atomic_and;
6341 op64 = aco_opcode::buffer_atomic_and_x2;
6342 break;
6343 case nir_intrinsic_global_atomic_or:
6344 op32 = aco_opcode::buffer_atomic_or;
6345 op64 = aco_opcode::buffer_atomic_or_x2;
6346 break;
6347 case nir_intrinsic_global_atomic_xor:
6348 op32 = aco_opcode::buffer_atomic_xor;
6349 op64 = aco_opcode::buffer_atomic_xor_x2;
6350 break;
6351 case nir_intrinsic_global_atomic_exchange:
6352 op32 = aco_opcode::buffer_atomic_swap;
6353 op64 = aco_opcode::buffer_atomic_swap_x2;
6354 break;
6355 case nir_intrinsic_global_atomic_comp_swap:
6356 op32 = aco_opcode::buffer_atomic_cmpswap;
6357 op64 = aco_opcode::buffer_atomic_cmpswap_x2;
6358 break;
6359 default:
6360 unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* instructions.");
6361 }
6362
6363 Temp rsrc = get_gfx6_global_rsrc(bld, addr);
6364
6365 aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
6366
6367 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, return_previous ? 1 : 0)};
6368 mubuf->operands[0] = Operand(rsrc);
6369 mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
6370 mubuf->operands[2] = Operand(0u);
6371 mubuf->operands[3] = Operand(data);
6372 if (return_previous)
6373 mubuf->definitions[0] = Definition(dst);
6374 mubuf->glc = return_previous;
6375 mubuf->dlc = false;
6376 mubuf->offset = 0;
6377 mubuf->addr64 = addr.type() == RegType::vgpr;
6378 mubuf->disable_wqm = true;
6379 mubuf->barrier = barrier_buffer;
6380 ctx->program->needs_exact = true;
6381 ctx->block->instructions.emplace_back(std::move(mubuf));
6382 }
6383 }
6384
6385 void emit_memory_barrier(isel_context *ctx, nir_intrinsic_instr *instr) {
6386 Builder bld(ctx->program, ctx->block);
6387 switch(instr->intrinsic) {
6388 case nir_intrinsic_group_memory_barrier:
6389 case nir_intrinsic_memory_barrier:
6390 bld.barrier(aco_opcode::p_memory_barrier_common);
6391 break;
6392 case nir_intrinsic_memory_barrier_buffer:
6393 bld.barrier(aco_opcode::p_memory_barrier_buffer);
6394 break;
6395 case nir_intrinsic_memory_barrier_image:
6396 bld.barrier(aco_opcode::p_memory_barrier_image);
6397 break;
6398 case nir_intrinsic_memory_barrier_tcs_patch:
6399 case nir_intrinsic_memory_barrier_shared:
6400 bld.barrier(aco_opcode::p_memory_barrier_shared);
6401 break;
6402 default:
6403 unreachable("Unimplemented memory barrier intrinsic");
6404 break;
6405 }
6406 }
6407
6408 void visit_load_shared(isel_context *ctx, nir_intrinsic_instr *instr)
6409 {
6410 // TODO: implement sparse reads using ds_read2_b32 and nir_ssa_def_components_read()
6411 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6412 assert(instr->dest.ssa.bit_size >= 32 && "Bitsize not supported in load_shared.");
6413 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6414 Builder bld(ctx->program, ctx->block);
6415
6416 unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
6417 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
6418 load_lds(ctx, elem_size_bytes, dst, address, nir_intrinsic_base(instr), align);
6419 }
6420
6421 void visit_store_shared(isel_context *ctx, nir_intrinsic_instr *instr)
6422 {
6423 unsigned writemask = nir_intrinsic_write_mask(instr);
6424 Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
6425 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6426 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6427 assert(elem_size_bytes >= 4 && "Only 32bit & 64bit store_shared currently supported.");
6428
6429 unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
6430 store_lds(ctx, elem_size_bytes, data, writemask, address, nir_intrinsic_base(instr), align);
6431 }
6432
6433 void visit_shared_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
6434 {
6435 unsigned offset = nir_intrinsic_base(instr);
6436 Operand m = load_lds_size_m0(ctx);
6437 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6438 Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6439
6440 unsigned num_operands = 3;
6441 aco_opcode op32, op64, op32_rtn, op64_rtn;
6442 switch(instr->intrinsic) {
6443 case nir_intrinsic_shared_atomic_add:
6444 op32 = aco_opcode::ds_add_u32;
6445 op64 = aco_opcode::ds_add_u64;
6446 op32_rtn = aco_opcode::ds_add_rtn_u32;
6447 op64_rtn = aco_opcode::ds_add_rtn_u64;
6448 break;
6449 case nir_intrinsic_shared_atomic_imin:
6450 op32 = aco_opcode::ds_min_i32;
6451 op64 = aco_opcode::ds_min_i64;
6452 op32_rtn = aco_opcode::ds_min_rtn_i32;
6453 op64_rtn = aco_opcode::ds_min_rtn_i64;
6454 break;
6455 case nir_intrinsic_shared_atomic_umin:
6456 op32 = aco_opcode::ds_min_u32;
6457 op64 = aco_opcode::ds_min_u64;
6458 op32_rtn = aco_opcode::ds_min_rtn_u32;
6459 op64_rtn = aco_opcode::ds_min_rtn_u64;
6460 break;
6461 case nir_intrinsic_shared_atomic_imax:
6462 op32 = aco_opcode::ds_max_i32;
6463 op64 = aco_opcode::ds_max_i64;
6464 op32_rtn = aco_opcode::ds_max_rtn_i32;
6465 op64_rtn = aco_opcode::ds_max_rtn_i64;
6466 break;
6467 case nir_intrinsic_shared_atomic_umax:
6468 op32 = aco_opcode::ds_max_u32;
6469 op64 = aco_opcode::ds_max_u64;
6470 op32_rtn = aco_opcode::ds_max_rtn_u32;
6471 op64_rtn = aco_opcode::ds_max_rtn_u64;
6472 break;
6473 case nir_intrinsic_shared_atomic_and:
6474 op32 = aco_opcode::ds_and_b32;
6475 op64 = aco_opcode::ds_and_b64;
6476 op32_rtn = aco_opcode::ds_and_rtn_b32;
6477 op64_rtn = aco_opcode::ds_and_rtn_b64;
6478 break;
6479 case nir_intrinsic_shared_atomic_or:
6480 op32 = aco_opcode::ds_or_b32;
6481 op64 = aco_opcode::ds_or_b64;
6482 op32_rtn = aco_opcode::ds_or_rtn_b32;
6483 op64_rtn = aco_opcode::ds_or_rtn_b64;
6484 break;
6485 case nir_intrinsic_shared_atomic_xor:
6486 op32 = aco_opcode::ds_xor_b32;
6487 op64 = aco_opcode::ds_xor_b64;
6488 op32_rtn = aco_opcode::ds_xor_rtn_b32;
6489 op64_rtn = aco_opcode::ds_xor_rtn_b64;
6490 break;
6491 case nir_intrinsic_shared_atomic_exchange:
6492 op32 = aco_opcode::ds_write_b32;
6493 op64 = aco_opcode::ds_write_b64;
6494 op32_rtn = aco_opcode::ds_wrxchg_rtn_b32;
6495 op64_rtn = aco_opcode::ds_wrxchg2_rtn_b64;
6496 break;
6497 case nir_intrinsic_shared_atomic_comp_swap:
6498 op32 = aco_opcode::ds_cmpst_b32;
6499 op64 = aco_opcode::ds_cmpst_b64;
6500 op32_rtn = aco_opcode::ds_cmpst_rtn_b32;
6501 op64_rtn = aco_opcode::ds_cmpst_rtn_b64;
6502 num_operands = 4;
6503 break;
6504 default:
6505 unreachable("Unhandled shared atomic intrinsic");
6506 }
6507
6508 /* return the previous value if dest is ever used */
6509 bool return_previous = false;
6510 nir_foreach_use_safe(use_src, &instr->dest.ssa) {
6511 return_previous = true;
6512 break;
6513 }
6514 nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
6515 return_previous = true;
6516 break;
6517 }
6518
6519 aco_opcode op;
6520 if (data.size() == 1) {
6521 assert(instr->dest.ssa.bit_size == 32);
6522 op = return_previous ? op32_rtn : op32;
6523 } else {
6524 assert(instr->dest.ssa.bit_size == 64);
6525 op = return_previous ? op64_rtn : op64;
6526 }
6527
6528 if (offset > 65535) {
6529 Builder bld(ctx->program, ctx->block);
6530 address = bld.vadd32(bld.def(v1), Operand(offset), address);
6531 offset = 0;
6532 }
6533
6534 aco_ptr<DS_instruction> ds;
6535 ds.reset(create_instruction<DS_instruction>(op, Format::DS, num_operands, return_previous ? 1 : 0));
6536 ds->operands[0] = Operand(address);
6537 ds->operands[1] = Operand(data);
6538 if (num_operands == 4)
6539 ds->operands[2] = Operand(get_ssa_temp(ctx, instr->src[2].ssa));
6540 ds->operands[num_operands - 1] = m;
6541 ds->offset0 = offset;
6542 if (return_previous)
6543 ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->dest.ssa));
6544 ctx->block->instructions.emplace_back(std::move(ds));
6545 }
6546
6547 Temp get_scratch_resource(isel_context *ctx)
6548 {
6549 Builder bld(ctx->program, ctx->block);
6550 Temp scratch_addr = ctx->program->private_segment_buffer;
6551 if (ctx->stage != compute_cs)
6552 scratch_addr = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), scratch_addr, Operand(0u));
6553
6554 uint32_t rsrc_conf = S_008F0C_ADD_TID_ENABLE(1) |
6555 S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);;
6556
6557 if (ctx->program->chip_class >= GFX10) {
6558 rsrc_conf |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
6559 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
6560 S_008F0C_RESOURCE_LEVEL(1);
6561 } else if (ctx->program->chip_class <= GFX7) { /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
6562 rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
6563 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
6564 }
6565
6566 /* older generations need element size = 16 bytes. element size removed in GFX9 */
6567 if (ctx->program->chip_class <= GFX8)
6568 rsrc_conf |= S_008F0C_ELEMENT_SIZE(3);
6569
6570 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), scratch_addr, Operand(-1u), Operand(rsrc_conf));
6571 }
6572
6573 void visit_load_scratch(isel_context *ctx, nir_intrinsic_instr *instr) {
6574 assert(instr->dest.ssa.bit_size == 32 || instr->dest.ssa.bit_size == 64);
6575 Builder bld(ctx->program, ctx->block);
6576 Temp rsrc = get_scratch_resource(ctx);
6577 Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6578 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6579
6580 aco_opcode op;
6581 switch (dst.size()) {
6582 case 1:
6583 op = aco_opcode::buffer_load_dword;
6584 break;
6585 case 2:
6586 op = aco_opcode::buffer_load_dwordx2;
6587 break;
6588 case 3:
6589 op = aco_opcode::buffer_load_dwordx3;
6590 break;
6591 case 4:
6592 op = aco_opcode::buffer_load_dwordx4;
6593 break;
6594 case 6:
6595 case 8: {
6596 std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
6597 Temp lower = bld.mubuf(aco_opcode::buffer_load_dwordx4,
6598 bld.def(v4), rsrc, offset,
6599 ctx->program->scratch_offset, 0, true);
6600 Temp upper = bld.mubuf(dst.size() == 6 ? aco_opcode::buffer_load_dwordx2 :
6601 aco_opcode::buffer_load_dwordx4,
6602 dst.size() == 6 ? bld.def(v2) : bld.def(v4),
6603 rsrc, offset, ctx->program->scratch_offset, 16, true);
6604 emit_split_vector(ctx, lower, 2);
6605 elems[0] = emit_extract_vector(ctx, lower, 0, v2);
6606 elems[1] = emit_extract_vector(ctx, lower, 1, v2);
6607 if (dst.size() == 8) {
6608 emit_split_vector(ctx, upper, 2);
6609 elems[2] = emit_extract_vector(ctx, upper, 0, v2);
6610 elems[3] = emit_extract_vector(ctx, upper, 1, v2);
6611 } else {
6612 elems[2] = upper;
6613 }
6614
6615 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
6616 Format::PSEUDO, dst.size() / 2, 1)};
6617 for (unsigned i = 0; i < dst.size() / 2; i++)
6618 vec->operands[i] = Operand(elems[i]);
6619 vec->definitions[0] = Definition(dst);
6620 bld.insert(std::move(vec));
6621 ctx->allocated_vec.emplace(dst.id(), elems);
6622 return;
6623 }
6624 default:
6625 unreachable("Wrong dst size for nir_intrinsic_load_scratch");
6626 }
6627
6628 bld.mubuf(op, Definition(dst), rsrc, offset, ctx->program->scratch_offset, 0, true);
6629 emit_split_vector(ctx, dst, instr->num_components);
6630 }
6631
6632 void visit_store_scratch(isel_context *ctx, nir_intrinsic_instr *instr) {
6633 assert(instr->src[0].ssa->bit_size == 32 || instr->src[0].ssa->bit_size == 64);
6634 Builder bld(ctx->program, ctx->block);
6635 Temp rsrc = get_scratch_resource(ctx);
6636 Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6637 Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
6638
6639 unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
6640 unsigned writemask = nir_intrinsic_write_mask(instr);
6641
6642 while (writemask) {
6643 int start, count;
6644 u_bit_scan_consecutive_range(&writemask, &start, &count);
6645 int num_bytes = count * elem_size_bytes;
6646
6647 if (num_bytes > 16) {
6648 assert(elem_size_bytes == 8);
6649 writemask |= (((count - 2) << 1) - 1) << (start + 2);
6650 count = 2;
6651 num_bytes = 16;
6652 }
6653
6654 // TODO: check alignment of sub-dword stores
6655 // TODO: split 3 bytes. there is no store instruction for that
6656
6657 Temp write_data;
6658 if (count != instr->num_components) {
6659 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
6660 for (int i = 0; i < count; i++) {
6661 Temp elem = emit_extract_vector(ctx, data, start + i, RegClass(RegType::vgpr, elem_size_bytes / 4));
6662 vec->operands[i] = Operand(elem);
6663 }
6664 write_data = bld.tmp(RegClass(RegType::vgpr, count * elem_size_bytes / 4));
6665 vec->definitions[0] = Definition(write_data);
6666 ctx->block->instructions.emplace_back(std::move(vec));
6667 } else {
6668 write_data = data;
6669 }
6670
6671 aco_opcode op;
6672 switch (num_bytes) {
6673 case 4:
6674 op = aco_opcode::buffer_store_dword;
6675 break;
6676 case 8:
6677 op = aco_opcode::buffer_store_dwordx2;
6678 break;
6679 case 12:
6680 op = aco_opcode::buffer_store_dwordx3;
6681 break;
6682 case 16:
6683 op = aco_opcode::buffer_store_dwordx4;
6684 break;
6685 default:
6686 unreachable("Invalid data size for nir_intrinsic_store_scratch.");
6687 }
6688
6689 bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset, write_data, start * elem_size_bytes, true);
6690 }
6691 }
6692
6693 void visit_load_sample_mask_in(isel_context *ctx, nir_intrinsic_instr *instr) {
6694 uint8_t log2_ps_iter_samples;
6695 if (ctx->program->info->ps.force_persample) {
6696 log2_ps_iter_samples =
6697 util_logbase2(ctx->options->key.fs.num_samples);
6698 } else {
6699 log2_ps_iter_samples = ctx->options->key.fs.log2_ps_iter_samples;
6700 }
6701
6702 /* The bit pattern matches that used by fixed function fragment
6703 * processing. */
6704 static const unsigned ps_iter_masks[] = {
6705 0xffff, /* not used */
6706 0x5555,
6707 0x1111,
6708 0x0101,
6709 0x0001,
6710 };
6711 assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
6712
6713 Builder bld(ctx->program, ctx->block);
6714
6715 Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
6716 get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u));
6717 Temp ps_iter_mask = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(ps_iter_masks[log2_ps_iter_samples]));
6718 Temp mask = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), sample_id, ps_iter_mask);
6719 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
6720 bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, get_arg(ctx, ctx->args->ac.sample_coverage));
6721 }
6722
6723 void visit_emit_vertex_with_counter(isel_context *ctx, nir_intrinsic_instr *instr) {
6724 Builder bld(ctx->program, ctx->block);
6725
6726 unsigned stream = nir_intrinsic_stream_id(instr);
6727 Temp next_vertex = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
6728 next_vertex = bld.v_mul_imm(bld.def(v1), next_vertex, 4u);
6729 nir_const_value *next_vertex_cv = nir_src_as_const_value(instr->src[0]);
6730
6731 /* get GSVS ring */
6732 Temp gsvs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_GSVS_GS * 16u));
6733
6734 unsigned num_components =
6735 ctx->program->info->gs.num_stream_output_components[stream];
6736 assert(num_components);
6737
6738 unsigned stride = 4u * num_components * ctx->shader->info.gs.vertices_out;
6739 unsigned stream_offset = 0;
6740 for (unsigned i = 0; i < stream; i++) {
6741 unsigned prev_stride = 4u * ctx->program->info->gs.num_stream_output_components[i] * ctx->shader->info.gs.vertices_out;
6742 stream_offset += prev_stride * ctx->program->wave_size;
6743 }
6744
6745 /* Limit on the stride field for <= GFX7. */
6746 assert(stride < (1 << 14));
6747
6748 Temp gsvs_dwords[4];
6749 for (unsigned i = 0; i < 4; i++)
6750 gsvs_dwords[i] = bld.tmp(s1);
6751 bld.pseudo(aco_opcode::p_split_vector,
6752 Definition(gsvs_dwords[0]),
6753 Definition(gsvs_dwords[1]),
6754 Definition(gsvs_dwords[2]),
6755 Definition(gsvs_dwords[3]),
6756 gsvs_ring);
6757
6758 if (stream_offset) {
6759 Temp stream_offset_tmp = bld.copy(bld.def(s1), Operand(stream_offset));
6760
6761 Temp carry = bld.tmp(s1);
6762 gsvs_dwords[0] = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), gsvs_dwords[0], stream_offset_tmp);
6763 gsvs_dwords[1] = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), gsvs_dwords[1], Operand(0u), bld.scc(carry));
6764 }
6765
6766 gsvs_dwords[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), gsvs_dwords[1], Operand(S_008F04_STRIDE(stride)));
6767 gsvs_dwords[2] = bld.copy(bld.def(s1), Operand((uint32_t)ctx->program->wave_size));
6768
6769 gsvs_ring = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
6770 gsvs_dwords[0], gsvs_dwords[1], gsvs_dwords[2], gsvs_dwords[3]);
6771
6772 unsigned offset = 0;
6773 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; i++) {
6774 if (ctx->program->info->gs.output_streams[i] != stream)
6775 continue;
6776
6777 for (unsigned j = 0; j < 4; j++) {
6778 if (!(ctx->program->info->gs.output_usage_mask[i] & (1 << j)))
6779 continue;
6780
6781 if (ctx->outputs.mask[i] & (1 << j)) {
6782 Operand vaddr_offset = next_vertex_cv ? Operand(v1) : Operand(next_vertex);
6783 unsigned const_offset = (offset + (next_vertex_cv ? next_vertex_cv->u32 : 0u)) * 4u;
6784 if (const_offset >= 4096u) {
6785 if (vaddr_offset.isUndefined())
6786 vaddr_offset = bld.copy(bld.def(v1), Operand(const_offset / 4096u * 4096u));
6787 else
6788 vaddr_offset = bld.vadd32(bld.def(v1), Operand(const_offset / 4096u * 4096u), vaddr_offset);
6789 const_offset %= 4096u;
6790 }
6791
6792 aco_ptr<MTBUF_instruction> mtbuf{create_instruction<MTBUF_instruction>(aco_opcode::tbuffer_store_format_x, Format::MTBUF, 4, 0)};
6793 mtbuf->operands[0] = Operand(gsvs_ring);
6794 mtbuf->operands[1] = vaddr_offset;
6795 mtbuf->operands[2] = Operand(get_arg(ctx, ctx->args->gs2vs_offset));
6796 mtbuf->operands[3] = Operand(ctx->outputs.temps[i * 4u + j]);
6797 mtbuf->offen = !vaddr_offset.isUndefined();
6798 mtbuf->dfmt = V_008F0C_BUF_DATA_FORMAT_32;
6799 mtbuf->nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
6800 mtbuf->offset = const_offset;
6801 mtbuf->glc = true;
6802 mtbuf->slc = true;
6803 mtbuf->barrier = barrier_gs_data;
6804 mtbuf->can_reorder = true;
6805 bld.insert(std::move(mtbuf));
6806 }
6807
6808 offset += ctx->shader->info.gs.vertices_out;
6809 }
6810
6811 /* outputs for the next vertex are undefined and keeping them around can
6812 * create invalid IR with control flow */
6813 ctx->outputs.mask[i] = 0;
6814 }
6815
6816 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1, sendmsg_gs(false, true, stream));
6817 }
6818
6819 Temp emit_boolean_reduce(isel_context *ctx, nir_op op, unsigned cluster_size, Temp src)
6820 {
6821 Builder bld(ctx->program, ctx->block);
6822
6823 if (cluster_size == 1) {
6824 return src;
6825 } if (op == nir_op_iand && cluster_size == 4) {
6826 //subgroupClusteredAnd(val, 4) -> ~wqm(exec & ~val)
6827 Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
6828 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc),
6829 bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc), tmp));
6830 } else if (op == nir_op_ior && cluster_size == 4) {
6831 //subgroupClusteredOr(val, 4) -> wqm(val & exec)
6832 return bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc),
6833 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)));
6834 } else if (op == nir_op_iand && cluster_size == ctx->program->wave_size) {
6835 //subgroupAnd(val) -> (exec & ~val) == 0
6836 Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp();
6837 Temp cond = bool_to_vector_condition(ctx, emit_wqm(ctx, tmp));
6838 return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), cond);
6839 } else if (op == nir_op_ior && cluster_size == ctx->program->wave_size) {
6840 //subgroupOr(val) -> (val & exec) != 0
6841 Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)).def(1).getTemp();
6842 return bool_to_vector_condition(ctx, tmp);
6843 } else if (op == nir_op_ixor && cluster_size == ctx->program->wave_size) {
6844 //subgroupXor(val) -> s_bcnt1_i32_b64(val & exec) & 1
6845 Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
6846 tmp = bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), tmp);
6847 tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), tmp, Operand(1u)).def(1).getTemp();
6848 return bool_to_vector_condition(ctx, tmp);
6849 } else {
6850 //subgroupClustered{And,Or,Xor}(val, n) ->
6851 //lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0)) ; just v_mbcnt_lo_u32_b32 on wave32
6852 //cluster_offset = ~(n - 1) & lane_id
6853 //cluster_mask = ((1 << n) - 1)
6854 //subgroupClusteredAnd():
6855 // return ((val | ~exec) >> cluster_offset) & cluster_mask == cluster_mask
6856 //subgroupClusteredOr():
6857 // return ((val & exec) >> cluster_offset) & cluster_mask != 0
6858 //subgroupClusteredXor():
6859 // return v_bnt_u32_b32(((val & exec) >> cluster_offset) & cluster_mask, 0) & 1 != 0
6860 Temp lane_id = emit_mbcnt(ctx, bld.def(v1));
6861 Temp cluster_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(~uint32_t(cluster_size - 1)), lane_id);
6862
6863 Temp tmp;
6864 if (op == nir_op_iand)
6865 tmp = bld.sop2(Builder::s_orn2, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
6866 else
6867 tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
6868
6869 uint32_t cluster_mask = cluster_size == 32 ? -1 : (1u << cluster_size) - 1u;
6870
6871 if (ctx->program->chip_class <= GFX7)
6872 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), tmp, cluster_offset);
6873 else if (ctx->program->wave_size == 64)
6874 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp);
6875 else
6876 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), cluster_offset, tmp);
6877 tmp = emit_extract_vector(ctx, tmp, 0, v1);
6878 if (cluster_mask != 0xffffffff)
6879 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(cluster_mask), tmp);
6880
6881 Definition cmp_def = Definition();
6882 if (op == nir_op_iand) {
6883 cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(cluster_mask), tmp).def(0);
6884 } else if (op == nir_op_ior) {
6885 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0);
6886 } else if (op == nir_op_ixor) {
6887 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u),
6888 bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1), tmp, Operand(0u)));
6889 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0);
6890 }
6891 cmp_def.setHint(vcc);
6892 return cmp_def.getTemp();
6893 }
6894 }
6895
6896 Temp emit_boolean_exclusive_scan(isel_context *ctx, nir_op op, Temp src)
6897 {
6898 Builder bld(ctx->program, ctx->block);
6899
6900 //subgroupExclusiveAnd(val) -> mbcnt(exec & ~val) == 0
6901 //subgroupExclusiveOr(val) -> mbcnt(val & exec) != 0
6902 //subgroupExclusiveXor(val) -> mbcnt(val & exec) & 1 != 0
6903 Temp tmp;
6904 if (op == nir_op_iand)
6905 tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
6906 else
6907 tmp = bld.sop2(Builder::s_and, bld.def(s2), bld.def(s1, scc), src, Operand(exec, bld.lm));
6908
6909 Builder::Result lohi = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), tmp);
6910 Temp lo = lohi.def(0).getTemp();
6911 Temp hi = lohi.def(1).getTemp();
6912 Temp mbcnt = emit_mbcnt(ctx, bld.def(v1), Operand(lo), Operand(hi));
6913
6914 Definition cmp_def = Definition();
6915 if (op == nir_op_iand)
6916 cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0);
6917 else if (op == nir_op_ior)
6918 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0);
6919 else if (op == nir_op_ixor)
6920 cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u),
6921 bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), mbcnt)).def(0);
6922 cmp_def.setHint(vcc);
6923 return cmp_def.getTemp();
6924 }
6925
6926 Temp emit_boolean_inclusive_scan(isel_context *ctx, nir_op op, Temp src)
6927 {
6928 Builder bld(ctx->program, ctx->block);
6929
6930 //subgroupInclusiveAnd(val) -> subgroupExclusiveAnd(val) && val
6931 //subgroupInclusiveOr(val) -> subgroupExclusiveOr(val) || val
6932 //subgroupInclusiveXor(val) -> subgroupExclusiveXor(val) ^^ val
6933 Temp tmp = emit_boolean_exclusive_scan(ctx, op, src);
6934 if (op == nir_op_iand)
6935 return bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
6936 else if (op == nir_op_ior)
6937 return bld.sop2(Builder::s_or, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
6938 else if (op == nir_op_ixor)
6939 return bld.sop2(Builder::s_xor, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
6940
6941 assert(false);
6942 return Temp();
6943 }
6944
6945 void emit_uniform_subgroup(isel_context *ctx, nir_intrinsic_instr *instr, Temp src)
6946 {
6947 Builder bld(ctx->program, ctx->block);
6948 Definition dst(get_ssa_temp(ctx, &instr->dest.ssa));
6949 if (src.regClass().type() == RegType::vgpr) {
6950 bld.pseudo(aco_opcode::p_as_uniform, dst, src);
6951 } else if (src.regClass() == s1) {
6952 bld.sop1(aco_opcode::s_mov_b32, dst, src);
6953 } else if (src.regClass() == s2) {
6954 bld.sop1(aco_opcode::s_mov_b64, dst, src);
6955 } else {
6956 fprintf(stderr, "Unimplemented NIR instr bit size: ");
6957 nir_print_instr(&instr->instr, stderr);
6958 fprintf(stderr, "\n");
6959 }
6960 }
6961
6962 void emit_interp_center(isel_context *ctx, Temp dst, Temp pos1, Temp pos2)
6963 {
6964 Builder bld(ctx->program, ctx->block);
6965 Temp persp_center = get_arg(ctx, ctx->args->ac.persp_center);
6966 Temp p1 = emit_extract_vector(ctx, persp_center, 0, v1);
6967 Temp p2 = emit_extract_vector(ctx, persp_center, 1, v1);
6968
6969 Temp ddx_1, ddx_2, ddy_1, ddy_2;
6970 uint32_t dpp_ctrl0 = dpp_quad_perm(0, 0, 0, 0);
6971 uint32_t dpp_ctrl1 = dpp_quad_perm(1, 1, 1, 1);
6972 uint32_t dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2);
6973
6974 /* Build DD X/Y */
6975 if (ctx->program->chip_class >= GFX8) {
6976 Temp tl_1 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p1, dpp_ctrl0);
6977 ddx_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl1);
6978 ddy_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl2);
6979 Temp tl_2 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p2, dpp_ctrl0);
6980 ddx_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl1);
6981 ddy_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl2);
6982 } else {
6983 Temp tl_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl0);
6984 ddx_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl1);
6985 ddx_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_1, tl_1);
6986 ddx_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl2);
6987 ddx_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_2, tl_1);
6988 Temp tl_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl0);
6989 ddy_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl1);
6990 ddy_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_1, tl_2);
6991 ddy_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl2);
6992 ddy_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_2, tl_2);
6993 }
6994
6995 /* res_k = p_k + ddx_k * pos1 + ddy_k * pos2 */
6996 Temp tmp1 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddx_1, pos1, p1);
6997 Temp tmp2 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddx_2, pos1, p2);
6998 tmp1 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddy_1, pos2, tmp1);
6999 tmp2 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddy_2, pos2, tmp2);
7000 Temp wqm1 = bld.tmp(v1);
7001 emit_wqm(ctx, tmp1, wqm1, true);
7002 Temp wqm2 = bld.tmp(v1);
7003 emit_wqm(ctx, tmp2, wqm2, true);
7004 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), wqm1, wqm2);
7005 return;
7006 }
7007
7008 void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
7009 {
7010 Builder bld(ctx->program, ctx->block);
7011 switch(instr->intrinsic) {
7012 case nir_intrinsic_load_barycentric_sample:
7013 case nir_intrinsic_load_barycentric_pixel:
7014 case nir_intrinsic_load_barycentric_centroid: {
7015 glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr);
7016 Temp bary = Temp(0, s2);
7017 switch (mode) {
7018 case INTERP_MODE_SMOOTH:
7019 case INTERP_MODE_NONE:
7020 if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel)
7021 bary = get_arg(ctx, ctx->args->ac.persp_center);
7022 else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
7023 bary = ctx->persp_centroid;
7024 else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
7025 bary = get_arg(ctx, ctx->args->ac.persp_sample);
7026 break;
7027 case INTERP_MODE_NOPERSPECTIVE:
7028 if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel)
7029 bary = get_arg(ctx, ctx->args->ac.linear_center);
7030 else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid)
7031 bary = ctx->linear_centroid;
7032 else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample)
7033 bary = get_arg(ctx, ctx->args->ac.linear_sample);
7034 break;
7035 default:
7036 break;
7037 }
7038 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7039 Temp p1 = emit_extract_vector(ctx, bary, 0, v1);
7040 Temp p2 = emit_extract_vector(ctx, bary, 1, v1);
7041 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
7042 Operand(p1), Operand(p2));
7043 emit_split_vector(ctx, dst, 2);
7044 break;
7045 }
7046 case nir_intrinsic_load_barycentric_model: {
7047 Temp model = get_arg(ctx, ctx->args->ac.pull_model);
7048
7049 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7050 Temp p1 = emit_extract_vector(ctx, model, 0, v1);
7051 Temp p2 = emit_extract_vector(ctx, model, 1, v1);
7052 Temp p3 = emit_extract_vector(ctx, model, 2, v1);
7053 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
7054 Operand(p1), Operand(p2), Operand(p3));
7055 emit_split_vector(ctx, dst, 3);
7056 break;
7057 }
7058 case nir_intrinsic_load_barycentric_at_sample: {
7059 uint32_t sample_pos_offset = RING_PS_SAMPLE_POSITIONS * 16;
7060 switch (ctx->options->key.fs.num_samples) {
7061 case 2: sample_pos_offset += 1 << 3; break;
7062 case 4: sample_pos_offset += 3 << 3; break;
7063 case 8: sample_pos_offset += 7 << 3; break;
7064 default: break;
7065 }
7066 Temp sample_pos;
7067 Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
7068 nir_const_value* const_addr = nir_src_as_const_value(instr->src[0]);
7069 Temp private_segment_buffer = ctx->program->private_segment_buffer;
7070 if (addr.type() == RegType::sgpr) {
7071 Operand offset;
7072 if (const_addr) {
7073 sample_pos_offset += const_addr->u32 << 3;
7074 offset = Operand(sample_pos_offset);
7075 } else if (ctx->options->chip_class >= GFX9) {
7076 offset = bld.sop2(aco_opcode::s_lshl3_add_u32, bld.def(s1), bld.def(s1, scc), addr, Operand(sample_pos_offset));
7077 } else {
7078 offset = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), addr, Operand(3u));
7079 offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), addr, Operand(sample_pos_offset));
7080 }
7081
7082 Operand off = bld.copy(bld.def(s1), Operand(offset));
7083 sample_pos = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, off);
7084
7085 } else if (ctx->options->chip_class >= GFX9) {
7086 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(3u), addr);
7087 sample_pos = bld.global(aco_opcode::global_load_dwordx2, bld.def(v2), addr, private_segment_buffer, sample_pos_offset);
7088 } else if (ctx->options->chip_class >= GFX7) {
7089 /* addr += private_segment_buffer + sample_pos_offset */
7090 Temp tmp0 = bld.tmp(s1);
7091 Temp tmp1 = bld.tmp(s1);
7092 bld.pseudo(aco_opcode::p_split_vector, Definition(tmp0), Definition(tmp1), private_segment_buffer);
7093 Definition scc_tmp = bld.def(s1, scc);
7094 tmp0 = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), scc_tmp, tmp0, Operand(sample_pos_offset));
7095 tmp1 = bld.sop2(aco_opcode::s_addc_u32, bld.def(s1), bld.def(s1, scc), tmp1, Operand(0u), bld.scc(scc_tmp.getTemp()));
7096 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(3u), addr);
7097 Temp pck0 = bld.tmp(v1);
7098 Temp carry = bld.vadd32(Definition(pck0), tmp0, addr, true).def(1).getTemp();
7099 tmp1 = as_vgpr(ctx, tmp1);
7100 Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.hint_vcc(bld.def(bld.lm)), tmp1, Operand(0u), carry);
7101 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), pck0, pck1);
7102
7103 /* sample_pos = flat_load_dwordx2 addr */
7104 sample_pos = bld.flat(aco_opcode::flat_load_dwordx2, bld.def(v2), addr, Operand(s1));
7105 } else {
7106 assert(ctx->options->chip_class == GFX6);
7107
7108 uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
7109 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
7110 Temp rsrc = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer, Operand(0u), Operand(rsrc_conf));
7111
7112 addr = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(3u), addr);
7113 addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), addr, Operand(0u));
7114
7115 sample_pos = bld.tmp(v2);
7116
7117 aco_ptr<MUBUF_instruction> load{create_instruction<MUBUF_instruction>(aco_opcode::buffer_load_dwordx2, Format::MUBUF, 3, 1)};
7118 load->definitions[0] = Definition(sample_pos);
7119 load->operands[0] = Operand(rsrc);
7120 load->operands[1] = Operand(addr);
7121 load->operands[2] = Operand(0u);
7122 load->offset = sample_pos_offset;
7123 load->offen = 0;
7124 load->addr64 = true;
7125 load->glc = false;
7126 load->dlc = false;
7127 load->disable_wqm = false;
7128 load->barrier = barrier_none;
7129 load->can_reorder = true;
7130 ctx->block->instructions.emplace_back(std::move(load));
7131 }
7132
7133 /* sample_pos -= 0.5 */
7134 Temp pos1 = bld.tmp(RegClass(sample_pos.type(), 1));
7135 Temp pos2 = bld.tmp(RegClass(sample_pos.type(), 1));
7136 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), sample_pos);
7137 pos1 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos1, Operand(0x3f000000u));
7138 pos2 = bld.vop2_e64(aco_opcode::v_sub_f32, bld.def(v1), pos2, Operand(0x3f000000u));
7139
7140 emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), pos1, pos2);
7141 break;
7142 }
7143 case nir_intrinsic_load_barycentric_at_offset: {
7144 Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
7145 RegClass rc = RegClass(offset.type(), 1);
7146 Temp pos1 = bld.tmp(rc), pos2 = bld.tmp(rc);
7147 bld.pseudo(aco_opcode::p_split_vector, Definition(pos1), Definition(pos2), offset);
7148 emit_interp_center(ctx, get_ssa_temp(ctx, &instr->dest.ssa), pos1, pos2);
7149 break;
7150 }
7151 case nir_intrinsic_load_front_face: {
7152 bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7153 Operand(0u), get_arg(ctx, ctx->args->ac.front_face)).def(0).setHint(vcc);
7154 break;
7155 }
7156 case nir_intrinsic_load_view_index: {
7157 if (ctx->stage & (sw_vs | sw_gs | sw_tcs | sw_tes)) {
7158 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7159 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.view_index)));
7160 break;
7161 }
7162
7163 /* fallthrough */
7164 }
7165 case nir_intrinsic_load_layer_id: {
7166 unsigned idx = nir_intrinsic_base(instr);
7167 bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7168 Operand(2u), bld.m0(get_arg(ctx, ctx->args->ac.prim_mask)), idx, 0);
7169 break;
7170 }
7171 case nir_intrinsic_load_frag_coord: {
7172 emit_load_frag_coord(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 4);
7173 break;
7174 }
7175 case nir_intrinsic_load_sample_pos: {
7176 Temp posx = get_arg(ctx, ctx->args->ac.frag_pos[0]);
7177 Temp posy = get_arg(ctx, ctx->args->ac.frag_pos[1]);
7178 bld.pseudo(aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7179 posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand(0u),
7180 posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand(0u));
7181 break;
7182 }
7183 case nir_intrinsic_load_tess_coord:
7184 visit_load_tess_coord(ctx, instr);
7185 break;
7186 case nir_intrinsic_load_interpolated_input:
7187 visit_load_interpolated_input(ctx, instr);
7188 break;
7189 case nir_intrinsic_store_output:
7190 visit_store_output(ctx, instr);
7191 break;
7192 case nir_intrinsic_load_input:
7193 case nir_intrinsic_load_input_vertex:
7194 visit_load_input(ctx, instr);
7195 break;
7196 case nir_intrinsic_load_output:
7197 visit_load_output(ctx, instr);
7198 break;
7199 case nir_intrinsic_load_per_vertex_input:
7200 visit_load_per_vertex_input(ctx, instr);
7201 break;
7202 case nir_intrinsic_load_per_vertex_output:
7203 visit_load_per_vertex_output(ctx, instr);
7204 break;
7205 case nir_intrinsic_store_per_vertex_output:
7206 visit_store_per_vertex_output(ctx, instr);
7207 break;
7208 case nir_intrinsic_load_ubo:
7209 visit_load_ubo(ctx, instr);
7210 break;
7211 case nir_intrinsic_load_push_constant:
7212 visit_load_push_constant(ctx, instr);
7213 break;
7214 case nir_intrinsic_load_constant:
7215 visit_load_constant(ctx, instr);
7216 break;
7217 case nir_intrinsic_vulkan_resource_index:
7218 visit_load_resource(ctx, instr);
7219 break;
7220 case nir_intrinsic_discard:
7221 visit_discard(ctx, instr);
7222 break;
7223 case nir_intrinsic_discard_if:
7224 visit_discard_if(ctx, instr);
7225 break;
7226 case nir_intrinsic_load_shared:
7227 visit_load_shared(ctx, instr);
7228 break;
7229 case nir_intrinsic_store_shared:
7230 visit_store_shared(ctx, instr);
7231 break;
7232 case nir_intrinsic_shared_atomic_add:
7233 case nir_intrinsic_shared_atomic_imin:
7234 case nir_intrinsic_shared_atomic_umin:
7235 case nir_intrinsic_shared_atomic_imax:
7236 case nir_intrinsic_shared_atomic_umax:
7237 case nir_intrinsic_shared_atomic_and:
7238 case nir_intrinsic_shared_atomic_or:
7239 case nir_intrinsic_shared_atomic_xor:
7240 case nir_intrinsic_shared_atomic_exchange:
7241 case nir_intrinsic_shared_atomic_comp_swap:
7242 visit_shared_atomic(ctx, instr);
7243 break;
7244 case nir_intrinsic_image_deref_load:
7245 visit_image_load(ctx, instr);
7246 break;
7247 case nir_intrinsic_image_deref_store:
7248 visit_image_store(ctx, instr);
7249 break;
7250 case nir_intrinsic_image_deref_atomic_add:
7251 case nir_intrinsic_image_deref_atomic_umin:
7252 case nir_intrinsic_image_deref_atomic_imin:
7253 case nir_intrinsic_image_deref_atomic_umax:
7254 case nir_intrinsic_image_deref_atomic_imax:
7255 case nir_intrinsic_image_deref_atomic_and:
7256 case nir_intrinsic_image_deref_atomic_or:
7257 case nir_intrinsic_image_deref_atomic_xor:
7258 case nir_intrinsic_image_deref_atomic_exchange:
7259 case nir_intrinsic_image_deref_atomic_comp_swap:
7260 visit_image_atomic(ctx, instr);
7261 break;
7262 case nir_intrinsic_image_deref_size:
7263 visit_image_size(ctx, instr);
7264 break;
7265 case nir_intrinsic_load_ssbo:
7266 visit_load_ssbo(ctx, instr);
7267 break;
7268 case nir_intrinsic_store_ssbo:
7269 visit_store_ssbo(ctx, instr);
7270 break;
7271 case nir_intrinsic_load_global:
7272 visit_load_global(ctx, instr);
7273 break;
7274 case nir_intrinsic_store_global:
7275 visit_store_global(ctx, instr);
7276 break;
7277 case nir_intrinsic_global_atomic_add:
7278 case nir_intrinsic_global_atomic_imin:
7279 case nir_intrinsic_global_atomic_umin:
7280 case nir_intrinsic_global_atomic_imax:
7281 case nir_intrinsic_global_atomic_umax:
7282 case nir_intrinsic_global_atomic_and:
7283 case nir_intrinsic_global_atomic_or:
7284 case nir_intrinsic_global_atomic_xor:
7285 case nir_intrinsic_global_atomic_exchange:
7286 case nir_intrinsic_global_atomic_comp_swap:
7287 visit_global_atomic(ctx, instr);
7288 break;
7289 case nir_intrinsic_ssbo_atomic_add:
7290 case nir_intrinsic_ssbo_atomic_imin:
7291 case nir_intrinsic_ssbo_atomic_umin:
7292 case nir_intrinsic_ssbo_atomic_imax:
7293 case nir_intrinsic_ssbo_atomic_umax:
7294 case nir_intrinsic_ssbo_atomic_and:
7295 case nir_intrinsic_ssbo_atomic_or:
7296 case nir_intrinsic_ssbo_atomic_xor:
7297 case nir_intrinsic_ssbo_atomic_exchange:
7298 case nir_intrinsic_ssbo_atomic_comp_swap:
7299 visit_atomic_ssbo(ctx, instr);
7300 break;
7301 case nir_intrinsic_load_scratch:
7302 visit_load_scratch(ctx, instr);
7303 break;
7304 case nir_intrinsic_store_scratch:
7305 visit_store_scratch(ctx, instr);
7306 break;
7307 case nir_intrinsic_get_buffer_size:
7308 visit_get_buffer_size(ctx, instr);
7309 break;
7310 case nir_intrinsic_control_barrier: {
7311 if (ctx->program->chip_class == GFX6 && ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
7312 /* GFX6 only (thanks to a hw bug workaround):
7313 * The real barrier instruction isn’t needed, because an entire patch
7314 * always fits into a single wave.
7315 */
7316 break;
7317 }
7318
7319 if (ctx->program->workgroup_size > ctx->program->wave_size)
7320 bld.sopp(aco_opcode::s_barrier);
7321
7322 break;
7323 }
7324 case nir_intrinsic_memory_barrier_tcs_patch:
7325 case nir_intrinsic_group_memory_barrier:
7326 case nir_intrinsic_memory_barrier:
7327 case nir_intrinsic_memory_barrier_buffer:
7328 case nir_intrinsic_memory_barrier_image:
7329 case nir_intrinsic_memory_barrier_shared:
7330 emit_memory_barrier(ctx, instr);
7331 break;
7332 case nir_intrinsic_load_num_work_groups: {
7333 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7334 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.num_work_groups)));
7335 emit_split_vector(ctx, dst, 3);
7336 break;
7337 }
7338 case nir_intrinsic_load_local_invocation_id: {
7339 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7340 bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.local_invocation_ids)));
7341 emit_split_vector(ctx, dst, 3);
7342 break;
7343 }
7344 case nir_intrinsic_load_work_group_id: {
7345 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7346 struct ac_arg *args = ctx->args->ac.workgroup_ids;
7347 bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
7348 args[0].used ? Operand(get_arg(ctx, args[0])) : Operand(0u),
7349 args[1].used ? Operand(get_arg(ctx, args[1])) : Operand(0u),
7350 args[2].used ? Operand(get_arg(ctx, args[2])) : Operand(0u));
7351 emit_split_vector(ctx, dst, 3);
7352 break;
7353 }
7354 case nir_intrinsic_load_local_invocation_index: {
7355 Temp id = emit_mbcnt(ctx, bld.def(v1));
7356
7357 /* The tg_size bits [6:11] contain the subgroup id,
7358 * we need this multiplied by the wave size, and then OR the thread id to it.
7359 */
7360 if (ctx->program->wave_size == 64) {
7361 /* After the s_and the bits are already multiplied by 64 (left shifted by 6) so we can just feed that to v_or */
7362 Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u),
7363 get_arg(ctx, ctx->args->ac.tg_size));
7364 bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, id);
7365 } else {
7366 /* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */
7367 Temp tg_num = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
7368 get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16)));
7369 bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, Operand(0x5u), id);
7370 }
7371 break;
7372 }
7373 case nir_intrinsic_load_subgroup_id: {
7374 if (ctx->stage == compute_cs) {
7375 bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc),
7376 get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16)));
7377 } else {
7378 bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x0u));
7379 }
7380 break;
7381 }
7382 case nir_intrinsic_load_subgroup_invocation: {
7383 emit_mbcnt(ctx, Definition(get_ssa_temp(ctx, &instr->dest.ssa)));
7384 break;
7385 }
7386 case nir_intrinsic_load_num_subgroups: {
7387 if (ctx->stage == compute_cs)
7388 bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), Operand(0x3fu),
7389 get_arg(ctx, ctx->args->ac.tg_size));
7390 else
7391 bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x1u));
7392 break;
7393 }
7394 case nir_intrinsic_ballot: {
7395 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7396 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7397 Definition tmp = bld.def(dst.regClass());
7398 Definition lanemask_tmp = dst.size() == bld.lm.size() ? tmp : bld.def(src.regClass());
7399 if (instr->src[0].ssa->bit_size == 1) {
7400 assert(src.regClass() == bld.lm);
7401 bld.sop2(Builder::s_and, lanemask_tmp, bld.def(s1, scc), Operand(exec, bld.lm), src);
7402 } else if (instr->src[0].ssa->bit_size == 32 && src.regClass() == v1) {
7403 bld.vopc(aco_opcode::v_cmp_lg_u32, lanemask_tmp, Operand(0u), src);
7404 } else if (instr->src[0].ssa->bit_size == 64 && src.regClass() == v2) {
7405 bld.vopc(aco_opcode::v_cmp_lg_u64, lanemask_tmp, Operand(0u), src);
7406 } else {
7407 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7408 nir_print_instr(&instr->instr, stderr);
7409 fprintf(stderr, "\n");
7410 }
7411 if (dst.size() != bld.lm.size()) {
7412 /* Wave32 with ballot size set to 64 */
7413 bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), lanemask_tmp.getTemp(), Operand(0u));
7414 }
7415 emit_wqm(ctx, tmp.getTemp(), dst);
7416 break;
7417 }
7418 case nir_intrinsic_shuffle:
7419 case nir_intrinsic_read_invocation: {
7420 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7421 if (!ctx->divergent_vals[instr->src[0].ssa->index]) {
7422 emit_uniform_subgroup(ctx, instr, src);
7423 } else {
7424 Temp tid = get_ssa_temp(ctx, instr->src[1].ssa);
7425 if (instr->intrinsic == nir_intrinsic_read_invocation || !ctx->divergent_vals[instr->src[1].ssa->index])
7426 tid = bld.as_uniform(tid);
7427 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7428 if (src.regClass() == v1) {
7429 emit_wqm(ctx, emit_bpermute(ctx, bld, tid, src), dst);
7430 } else if (src.regClass() == v2) {
7431 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7432 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7433 lo = emit_wqm(ctx, emit_bpermute(ctx, bld, tid, lo));
7434 hi = emit_wqm(ctx, emit_bpermute(ctx, bld, tid, hi));
7435 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7436 emit_split_vector(ctx, dst, 2);
7437 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) {
7438 assert(src.regClass() == bld.lm);
7439 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid);
7440 bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
7441 } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) {
7442 assert(src.regClass() == bld.lm);
7443 Temp tmp;
7444 if (ctx->program->chip_class <= GFX7)
7445 tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), src, tid);
7446 else if (ctx->program->wave_size == 64)
7447 tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src);
7448 else
7449 tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), tid, src);
7450 tmp = emit_extract_vector(ctx, tmp, 0, v1);
7451 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), tmp);
7452 emit_wqm(ctx, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp), dst);
7453 } else {
7454 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7455 nir_print_instr(&instr->instr, stderr);
7456 fprintf(stderr, "\n");
7457 }
7458 }
7459 break;
7460 }
7461 case nir_intrinsic_load_sample_id: {
7462 bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
7463 get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u));
7464 break;
7465 }
7466 case nir_intrinsic_load_sample_mask_in: {
7467 visit_load_sample_mask_in(ctx, instr);
7468 break;
7469 }
7470 case nir_intrinsic_read_first_invocation: {
7471 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7472 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7473 if (src.regClass() == v1) {
7474 emit_wqm(ctx,
7475 bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), src),
7476 dst);
7477 } else if (src.regClass() == v2) {
7478 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7479 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7480 lo = emit_wqm(ctx, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), lo));
7481 hi = emit_wqm(ctx, bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), hi));
7482 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7483 emit_split_vector(ctx, dst, 2);
7484 } else if (instr->dest.ssa.bit_size == 1) {
7485 assert(src.regClass() == bld.lm);
7486 Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src,
7487 bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)));
7488 bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
7489 } else if (src.regClass() == s1) {
7490 bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src);
7491 } else if (src.regClass() == s2) {
7492 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
7493 } else {
7494 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7495 nir_print_instr(&instr->instr, stderr);
7496 fprintf(stderr, "\n");
7497 }
7498 break;
7499 }
7500 case nir_intrinsic_vote_all: {
7501 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7502 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7503 assert(src.regClass() == bld.lm);
7504 assert(dst.regClass() == bld.lm);
7505
7506 Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp();
7507 Temp cond = bool_to_vector_condition(ctx, emit_wqm(ctx, tmp));
7508 bld.sop1(Builder::s_not, Definition(dst), bld.def(s1, scc), cond);
7509 break;
7510 }
7511 case nir_intrinsic_vote_any: {
7512 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7513 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7514 assert(src.regClass() == bld.lm);
7515 assert(dst.regClass() == bld.lm);
7516
7517 Temp tmp = bool_to_scalar_condition(ctx, src);
7518 bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
7519 break;
7520 }
7521 case nir_intrinsic_reduce:
7522 case nir_intrinsic_inclusive_scan:
7523 case nir_intrinsic_exclusive_scan: {
7524 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7525 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7526 nir_op op = (nir_op) nir_intrinsic_reduction_op(instr);
7527 unsigned cluster_size = instr->intrinsic == nir_intrinsic_reduce ?
7528 nir_intrinsic_cluster_size(instr) : 0;
7529 cluster_size = util_next_power_of_two(MIN2(cluster_size ? cluster_size : ctx->program->wave_size, ctx->program->wave_size));
7530
7531 if (!ctx->divergent_vals[instr->src[0].ssa->index] && (op == nir_op_ior || op == nir_op_iand)) {
7532 emit_uniform_subgroup(ctx, instr, src);
7533 } else if (instr->dest.ssa.bit_size == 1) {
7534 if (op == nir_op_imul || op == nir_op_umin || op == nir_op_imin)
7535 op = nir_op_iand;
7536 else if (op == nir_op_iadd)
7537 op = nir_op_ixor;
7538 else if (op == nir_op_umax || op == nir_op_imax)
7539 op = nir_op_ior;
7540 assert(op == nir_op_iand || op == nir_op_ior || op == nir_op_ixor);
7541
7542 switch (instr->intrinsic) {
7543 case nir_intrinsic_reduce:
7544 emit_wqm(ctx, emit_boolean_reduce(ctx, op, cluster_size, src), dst);
7545 break;
7546 case nir_intrinsic_exclusive_scan:
7547 emit_wqm(ctx, emit_boolean_exclusive_scan(ctx, op, src), dst);
7548 break;
7549 case nir_intrinsic_inclusive_scan:
7550 emit_wqm(ctx, emit_boolean_inclusive_scan(ctx, op, src), dst);
7551 break;
7552 default:
7553 assert(false);
7554 }
7555 } else if (cluster_size == 1) {
7556 bld.copy(Definition(dst), src);
7557 } else {
7558 src = as_vgpr(ctx, src);
7559
7560 ReduceOp reduce_op;
7561 switch (op) {
7562 #define CASE(name) case nir_op_##name: reduce_op = (src.regClass() == v1) ? name##32 : name##64; break;
7563 CASE(iadd)
7564 CASE(imul)
7565 CASE(fadd)
7566 CASE(fmul)
7567 CASE(imin)
7568 CASE(umin)
7569 CASE(fmin)
7570 CASE(imax)
7571 CASE(umax)
7572 CASE(fmax)
7573 CASE(iand)
7574 CASE(ior)
7575 CASE(ixor)
7576 default:
7577 unreachable("unknown reduction op");
7578 #undef CASE
7579 }
7580
7581 aco_opcode aco_op;
7582 switch (instr->intrinsic) {
7583 case nir_intrinsic_reduce: aco_op = aco_opcode::p_reduce; break;
7584 case nir_intrinsic_inclusive_scan: aco_op = aco_opcode::p_inclusive_scan; break;
7585 case nir_intrinsic_exclusive_scan: aco_op = aco_opcode::p_exclusive_scan; break;
7586 default:
7587 unreachable("unknown reduce intrinsic");
7588 }
7589
7590 aco_ptr<Pseudo_reduction_instruction> reduce{create_instruction<Pseudo_reduction_instruction>(aco_op, Format::PSEUDO_REDUCTION, 3, 5)};
7591 reduce->operands[0] = Operand(src);
7592 // filled in by aco_reduce_assign.cpp, used internally as part of the
7593 // reduce sequence
7594 assert(dst.size() == 1 || dst.size() == 2);
7595 reduce->operands[1] = Operand(RegClass(RegType::vgpr, dst.size()).as_linear());
7596 reduce->operands[2] = Operand(v1.as_linear());
7597
7598 Temp tmp_dst = bld.tmp(dst.regClass());
7599 reduce->definitions[0] = Definition(tmp_dst);
7600 reduce->definitions[1] = bld.def(ctx->program->lane_mask); // used internally
7601 reduce->definitions[2] = Definition();
7602 reduce->definitions[3] = Definition(scc, s1);
7603 reduce->definitions[4] = Definition();
7604 reduce->reduce_op = reduce_op;
7605 reduce->cluster_size = cluster_size;
7606 ctx->block->instructions.emplace_back(std::move(reduce));
7607
7608 emit_wqm(ctx, tmp_dst, dst);
7609 }
7610 break;
7611 }
7612 case nir_intrinsic_quad_broadcast: {
7613 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7614 if (!ctx->divergent_vals[instr->dest.ssa.index]) {
7615 emit_uniform_subgroup(ctx, instr, src);
7616 } else {
7617 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7618 unsigned lane = nir_src_as_const_value(instr->src[1])->u32;
7619 uint32_t dpp_ctrl = dpp_quad_perm(lane, lane, lane, lane);
7620
7621 if (instr->dest.ssa.bit_size == 1) {
7622 assert(src.regClass() == bld.lm);
7623 assert(dst.regClass() == bld.lm);
7624 uint32_t half_mask = 0x11111111u << lane;
7625 Temp mask_tmp = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(half_mask), Operand(half_mask));
7626 Temp tmp = bld.tmp(bld.lm);
7627 bld.sop1(Builder::s_wqm, Definition(tmp),
7628 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp,
7629 bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm))));
7630 emit_wqm(ctx, tmp, dst);
7631 } else if (instr->dest.ssa.bit_size == 32) {
7632 if (ctx->program->chip_class >= GFX8)
7633 emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), dst);
7634 else
7635 emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl), dst);
7636 } else if (instr->dest.ssa.bit_size == 64) {
7637 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7638 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7639 if (ctx->program->chip_class >= GFX8) {
7640 lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl));
7641 hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl));
7642 } else {
7643 lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, (1 << 15) | dpp_ctrl));
7644 hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, (1 << 15) | dpp_ctrl));
7645 }
7646 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7647 emit_split_vector(ctx, dst, 2);
7648 } else {
7649 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7650 nir_print_instr(&instr->instr, stderr);
7651 fprintf(stderr, "\n");
7652 }
7653 }
7654 break;
7655 }
7656 case nir_intrinsic_quad_swap_horizontal:
7657 case nir_intrinsic_quad_swap_vertical:
7658 case nir_intrinsic_quad_swap_diagonal:
7659 case nir_intrinsic_quad_swizzle_amd: {
7660 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7661 if (!ctx->divergent_vals[instr->dest.ssa.index]) {
7662 emit_uniform_subgroup(ctx, instr, src);
7663 break;
7664 }
7665 uint16_t dpp_ctrl = 0;
7666 switch (instr->intrinsic) {
7667 case nir_intrinsic_quad_swap_horizontal:
7668 dpp_ctrl = dpp_quad_perm(1, 0, 3, 2);
7669 break;
7670 case nir_intrinsic_quad_swap_vertical:
7671 dpp_ctrl = dpp_quad_perm(2, 3, 0, 1);
7672 break;
7673 case nir_intrinsic_quad_swap_diagonal:
7674 dpp_ctrl = dpp_quad_perm(3, 2, 1, 0);
7675 break;
7676 case nir_intrinsic_quad_swizzle_amd:
7677 dpp_ctrl = nir_intrinsic_swizzle_mask(instr);
7678 break;
7679 default:
7680 break;
7681 }
7682 if (ctx->program->chip_class < GFX8)
7683 dpp_ctrl |= (1 << 15);
7684
7685 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7686 if (instr->dest.ssa.bit_size == 1) {
7687 assert(src.regClass() == bld.lm);
7688 src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand((uint32_t)-1), src);
7689 if (ctx->program->chip_class >= GFX8)
7690 src = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
7691 else
7692 src = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl);
7693 Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), src);
7694 emit_wqm(ctx, tmp, dst);
7695 } else if (instr->dest.ssa.bit_size == 32) {
7696 Temp tmp;
7697 if (ctx->program->chip_class >= GFX8)
7698 tmp = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
7699 else
7700 tmp = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl);
7701 emit_wqm(ctx, tmp, dst);
7702 } else if (instr->dest.ssa.bit_size == 64) {
7703 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7704 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7705 if (ctx->program->chip_class >= GFX8) {
7706 lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl));
7707 hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl));
7708 } else {
7709 lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, dpp_ctrl));
7710 hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, dpp_ctrl));
7711 }
7712 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7713 emit_split_vector(ctx, dst, 2);
7714 } else {
7715 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7716 nir_print_instr(&instr->instr, stderr);
7717 fprintf(stderr, "\n");
7718 }
7719 break;
7720 }
7721 case nir_intrinsic_masked_swizzle_amd: {
7722 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7723 if (!ctx->divergent_vals[instr->dest.ssa.index]) {
7724 emit_uniform_subgroup(ctx, instr, src);
7725 break;
7726 }
7727 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7728 uint32_t mask = nir_intrinsic_swizzle_mask(instr);
7729 if (dst.regClass() == v1) {
7730 emit_wqm(ctx,
7731 bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false),
7732 dst);
7733 } else if (dst.regClass() == v2) {
7734 Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
7735 bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
7736 lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, mask, 0, false));
7737 hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, mask, 0, false));
7738 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7739 emit_split_vector(ctx, dst, 2);
7740 } else {
7741 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7742 nir_print_instr(&instr->instr, stderr);
7743 fprintf(stderr, "\n");
7744 }
7745 break;
7746 }
7747 case nir_intrinsic_write_invocation_amd: {
7748 Temp src = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
7749 Temp val = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
7750 Temp lane = bld.as_uniform(get_ssa_temp(ctx, instr->src[2].ssa));
7751 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7752 if (dst.regClass() == v1) {
7753 /* src2 is ignored for writelane. RA assigns the same reg for dst */
7754 emit_wqm(ctx, bld.writelane(bld.def(v1), val, lane, src), dst);
7755 } else if (dst.regClass() == v2) {
7756 Temp src_lo = bld.tmp(v1), src_hi = bld.tmp(v1);
7757 Temp val_lo = bld.tmp(s1), val_hi = bld.tmp(s1);
7758 bld.pseudo(aco_opcode::p_split_vector, Definition(src_lo), Definition(src_hi), src);
7759 bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
7760 Temp lo = emit_wqm(ctx, bld.writelane(bld.def(v1), val_lo, lane, src_hi));
7761 Temp hi = emit_wqm(ctx, bld.writelane(bld.def(v1), val_hi, lane, src_hi));
7762 bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
7763 emit_split_vector(ctx, dst, 2);
7764 } else {
7765 fprintf(stderr, "Unimplemented NIR instr bit size: ");
7766 nir_print_instr(&instr->instr, stderr);
7767 fprintf(stderr, "\n");
7768 }
7769 break;
7770 }
7771 case nir_intrinsic_mbcnt_amd: {
7772 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7773 RegClass rc = RegClass(src.type(), 1);
7774 Temp mask_lo = bld.tmp(rc), mask_hi = bld.tmp(rc);
7775 bld.pseudo(aco_opcode::p_split_vector, Definition(mask_lo), Definition(mask_hi), src);
7776 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7777 Temp wqm_tmp = emit_mbcnt(ctx, bld.def(v1), Operand(mask_lo), Operand(mask_hi));
7778 emit_wqm(ctx, wqm_tmp, dst);
7779 break;
7780 }
7781 case nir_intrinsic_load_helper_invocation: {
7782 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7783 bld.pseudo(aco_opcode::p_load_helper, Definition(dst));
7784 ctx->block->kind |= block_kind_needs_lowering;
7785 ctx->program->needs_exact = true;
7786 break;
7787 }
7788 case nir_intrinsic_is_helper_invocation: {
7789 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7790 bld.pseudo(aco_opcode::p_is_helper, Definition(dst));
7791 ctx->block->kind |= block_kind_needs_lowering;
7792 ctx->program->needs_exact = true;
7793 break;
7794 }
7795 case nir_intrinsic_demote:
7796 bld.pseudo(aco_opcode::p_demote_to_helper, Operand(-1u));
7797
7798 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
7799 ctx->cf_info.exec_potentially_empty_discard = true;
7800 ctx->block->kind |= block_kind_uses_demote;
7801 ctx->program->needs_exact = true;
7802 break;
7803 case nir_intrinsic_demote_if: {
7804 Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
7805 assert(src.regClass() == bld.lm);
7806 Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
7807 bld.pseudo(aco_opcode::p_demote_to_helper, cond);
7808
7809 if (ctx->cf_info.loop_nest_depth || ctx->cf_info.parent_if.is_divergent)
7810 ctx->cf_info.exec_potentially_empty_discard = true;
7811 ctx->block->kind |= block_kind_uses_demote;
7812 ctx->program->needs_exact = true;
7813 break;
7814 }
7815 case nir_intrinsic_first_invocation: {
7816 emit_wqm(ctx, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)),
7817 get_ssa_temp(ctx, &instr->dest.ssa));
7818 break;
7819 }
7820 case nir_intrinsic_shader_clock:
7821 bld.smem(aco_opcode::s_memtime, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), false);
7822 emit_split_vector(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 2);
7823 break;
7824 case nir_intrinsic_load_vertex_id_zero_base: {
7825 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7826 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vertex_id));
7827 break;
7828 }
7829 case nir_intrinsic_load_first_vertex: {
7830 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7831 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.base_vertex));
7832 break;
7833 }
7834 case nir_intrinsic_load_base_instance: {
7835 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7836 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.start_instance));
7837 break;
7838 }
7839 case nir_intrinsic_load_instance_id: {
7840 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7841 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.instance_id));
7842 break;
7843 }
7844 case nir_intrinsic_load_draw_id: {
7845 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7846 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.draw_id));
7847 break;
7848 }
7849 case nir_intrinsic_load_invocation_id: {
7850 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7851
7852 if (ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
7853 if (ctx->options->chip_class >= GFX10)
7854 bld.vop2_e64(aco_opcode::v_and_b32, Definition(dst), Operand(127u), get_arg(ctx, ctx->args->ac.gs_invocation_id));
7855 else
7856 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_invocation_id));
7857 } else if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
7858 bld.vop3(aco_opcode::v_bfe_u32, Definition(dst),
7859 get_arg(ctx, ctx->args->ac.tcs_rel_ids), Operand(8u), Operand(5u));
7860 } else {
7861 unreachable("Unsupported stage for load_invocation_id");
7862 }
7863
7864 break;
7865 }
7866 case nir_intrinsic_load_primitive_id: {
7867 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7868
7869 switch (ctx->shader->info.stage) {
7870 case MESA_SHADER_GEOMETRY:
7871 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.gs_prim_id));
7872 break;
7873 case MESA_SHADER_TESS_CTRL:
7874 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tcs_patch_id));
7875 break;
7876 case MESA_SHADER_TESS_EVAL:
7877 bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.tes_patch_id));
7878 break;
7879 default:
7880 unreachable("Unimplemented shader stage for nir_intrinsic_load_primitive_id");
7881 }
7882
7883 break;
7884 }
7885 case nir_intrinsic_load_patch_vertices_in: {
7886 assert(ctx->shader->info.stage == MESA_SHADER_TESS_CTRL ||
7887 ctx->shader->info.stage == MESA_SHADER_TESS_EVAL);
7888
7889 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
7890 bld.copy(Definition(dst), Operand(ctx->args->options->key.tcs.input_vertices));
7891 break;
7892 }
7893 case nir_intrinsic_emit_vertex_with_counter: {
7894 visit_emit_vertex_with_counter(ctx, instr);
7895 break;
7896 }
7897 case nir_intrinsic_end_primitive_with_counter: {
7898 unsigned stream = nir_intrinsic_stream_id(instr);
7899 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx->gs_wave_id), -1, sendmsg_gs(true, false, stream));
7900 break;
7901 }
7902 case nir_intrinsic_set_vertex_count: {
7903 /* unused, the HW keeps track of this for us */
7904 break;
7905 }
7906 default:
7907 fprintf(stderr, "Unimplemented intrinsic instr: ");
7908 nir_print_instr(&instr->instr, stderr);
7909 fprintf(stderr, "\n");
7910 abort();
7911
7912 break;
7913 }
7914 }
7915
7916
7917 void tex_fetch_ptrs(isel_context *ctx, nir_tex_instr *instr,
7918 Temp *res_ptr, Temp *samp_ptr, Temp *fmask_ptr,
7919 enum glsl_base_type *stype)
7920 {
7921 nir_deref_instr *texture_deref_instr = NULL;
7922 nir_deref_instr *sampler_deref_instr = NULL;
7923 int plane = -1;
7924
7925 for (unsigned i = 0; i < instr->num_srcs; i++) {
7926 switch (instr->src[i].src_type) {
7927 case nir_tex_src_texture_deref:
7928 texture_deref_instr = nir_src_as_deref(instr->src[i].src);
7929 break;
7930 case nir_tex_src_sampler_deref:
7931 sampler_deref_instr = nir_src_as_deref(instr->src[i].src);
7932 break;
7933 case nir_tex_src_plane:
7934 plane = nir_src_as_int(instr->src[i].src);
7935 break;
7936 default:
7937 break;
7938 }
7939 }
7940
7941 *stype = glsl_get_sampler_result_type(texture_deref_instr->type);
7942
7943 if (!sampler_deref_instr)
7944 sampler_deref_instr = texture_deref_instr;
7945
7946 if (plane >= 0) {
7947 assert(instr->op != nir_texop_txf_ms &&
7948 instr->op != nir_texop_samples_identical);
7949 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_BUF);
7950 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, (aco_descriptor_type)(ACO_DESC_PLANE_0 + plane), instr, false, false);
7951 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
7952 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_BUFFER, instr, false, false);
7953 } else if (instr->op == nir_texop_fragment_mask_fetch) {
7954 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_FMASK, instr, false, false);
7955 } else {
7956 *res_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_IMAGE, instr, false, false);
7957 }
7958 if (samp_ptr) {
7959 *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, ACO_DESC_SAMPLER, instr, false, false);
7960
7961 if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT && ctx->options->chip_class < GFX8) {
7962 /* fix sampler aniso on SI/CI: samp[0] = samp[0] & img[7] */
7963 Builder bld(ctx->program, ctx->block);
7964
7965 /* to avoid unnecessary moves, we split and recombine sampler and image */
7966 Temp img[8] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1),
7967 bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)};
7968 Temp samp[4] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)};
7969 bld.pseudo(aco_opcode::p_split_vector, Definition(img[0]), Definition(img[1]),
7970 Definition(img[2]), Definition(img[3]), Definition(img[4]),
7971 Definition(img[5]), Definition(img[6]), Definition(img[7]), *res_ptr);
7972 bld.pseudo(aco_opcode::p_split_vector, Definition(samp[0]), Definition(samp[1]),
7973 Definition(samp[2]), Definition(samp[3]), *samp_ptr);
7974
7975 samp[0] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), samp[0], img[7]);
7976 *res_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s8),
7977 img[0], img[1], img[2], img[3],
7978 img[4], img[5], img[6], img[7]);
7979 *samp_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
7980 samp[0], samp[1], samp[2], samp[3]);
7981 }
7982 }
7983 if (fmask_ptr && (instr->op == nir_texop_txf_ms ||
7984 instr->op == nir_texop_samples_identical))
7985 *fmask_ptr = get_sampler_desc(ctx, texture_deref_instr, ACO_DESC_FMASK, instr, false, false);
7986 }
7987
7988 void build_cube_select(isel_context *ctx, Temp ma, Temp id, Temp deriv,
7989 Temp *out_ma, Temp *out_sc, Temp *out_tc)
7990 {
7991 Builder bld(ctx->program, ctx->block);
7992
7993 Temp deriv_x = emit_extract_vector(ctx, deriv, 0, v1);
7994 Temp deriv_y = emit_extract_vector(ctx, deriv, 1, v1);
7995 Temp deriv_z = emit_extract_vector(ctx, deriv, 2, v1);
7996
7997 Operand neg_one(0xbf800000u);
7998 Operand one(0x3f800000u);
7999 Operand two(0x40000000u);
8000 Operand four(0x40800000u);
8001
8002 Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), ma);
8003 Temp sgn_ma = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, one, is_ma_positive);
8004 Temp neg_sgn_ma = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand(0u), sgn_ma);
8005
8006 Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), four, id);
8007 Temp is_ma_y = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(bld.lm), two, id);
8008 is_ma_y = bld.sop2(Builder::s_andn2, bld.hint_vcc(bld.def(bld.lm)), is_ma_y, is_ma_z);
8009 Temp is_not_ma_x = bld.sop2(aco_opcode::s_or_b64, bld.hint_vcc(bld.def(bld.lm)), bld.def(s1, scc), is_ma_z, is_ma_y);
8010
8011 // select sc
8012 Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_z, deriv_x, is_not_ma_x);
8013 Temp sgn = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1),
8014 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_sgn_ma, sgn_ma, is_ma_z),
8015 one, is_ma_y);
8016 *out_sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
8017
8018 // select tc
8019 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_y, deriv_z, is_ma_y);
8020 sgn = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, sgn_ma, is_ma_y);
8021 *out_tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tmp, sgn);
8022
8023 // select ma
8024 tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
8025 bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_x, deriv_y, is_ma_y),
8026 deriv_z, is_ma_z);
8027 tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffffu), tmp);
8028 *out_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), two, tmp);
8029 }
8030
8031 void prepare_cube_coords(isel_context *ctx, std::vector<Temp>& coords, Temp* ddx, Temp* ddy, bool is_deriv, bool is_array)
8032 {
8033 Builder bld(ctx->program, ctx->block);
8034 Temp ma, tc, sc, id;
8035
8036 if (is_array) {
8037 coords[3] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[3]);
8038
8039 // see comment in ac_prepare_cube_coords()
8040 if (ctx->options->chip_class <= GFX8)
8041 coords[3] = bld.vop2(aco_opcode::v_max_f32, bld.def(v1), Operand(0u), coords[3]);
8042 }
8043
8044 ma = bld.vop3(aco_opcode::v_cubema_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8045
8046 aco_ptr<VOP3A_instruction> vop3a{create_instruction<VOP3A_instruction>(aco_opcode::v_rcp_f32, asVOP3(Format::VOP1), 1, 1)};
8047 vop3a->operands[0] = Operand(ma);
8048 vop3a->abs[0] = true;
8049 Temp invma = bld.tmp(v1);
8050 vop3a->definitions[0] = Definition(invma);
8051 ctx->block->instructions.emplace_back(std::move(vop3a));
8052
8053 sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8054 if (!is_deriv)
8055 sc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), sc, invma, Operand(0x3fc00000u/*1.5*/));
8056
8057 tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8058 if (!is_deriv)
8059 tc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), tc, invma, Operand(0x3fc00000u/*1.5*/));
8060
8061 id = bld.vop3(aco_opcode::v_cubeid_f32, bld.def(v1), coords[0], coords[1], coords[2]);
8062
8063 if (is_deriv) {
8064 sc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), sc, invma);
8065 tc = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tc, invma);
8066
8067 for (unsigned i = 0; i < 2; i++) {
8068 // see comment in ac_prepare_cube_coords()
8069 Temp deriv_ma;
8070 Temp deriv_sc, deriv_tc;
8071 build_cube_select(ctx, ma, id, i ? *ddy : *ddx,
8072 &deriv_ma, &deriv_sc, &deriv_tc);
8073
8074 deriv_ma = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, invma);
8075
8076 Temp x = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
8077 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_sc, invma),
8078 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, sc));
8079 Temp y = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1),
8080 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_tc, invma),
8081 bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), deriv_ma, tc));
8082 *(i ? ddy : ddx) = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), x, y);
8083 }
8084
8085 sc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand(0x3fc00000u/*1.5*/), sc);
8086 tc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1), Operand(0x3fc00000u/*1.5*/), tc);
8087 }
8088
8089 if (is_array)
8090 id = bld.vop2(aco_opcode::v_madmk_f32, bld.def(v1), coords[3], id, Operand(0x41000000u/*8.0*/));
8091 coords.resize(3);
8092 coords[0] = sc;
8093 coords[1] = tc;
8094 coords[2] = id;
8095 }
8096
8097 void get_const_vec(nir_ssa_def *vec, nir_const_value *cv[4])
8098 {
8099 if (vec->parent_instr->type != nir_instr_type_alu)
8100 return;
8101 nir_alu_instr *vec_instr = nir_instr_as_alu(vec->parent_instr);
8102 if (vec_instr->op != nir_op_vec(vec->num_components))
8103 return;
8104
8105 for (unsigned i = 0; i < vec->num_components; i++) {
8106 cv[i] = vec_instr->src[i].swizzle[0] == 0 ?
8107 nir_src_as_const_value(vec_instr->src[i].src) : NULL;
8108 }
8109 }
8110
8111 void visit_tex(isel_context *ctx, nir_tex_instr *instr)
8112 {
8113 Builder bld(ctx->program, ctx->block);
8114 bool has_bias = false, has_lod = false, level_zero = false, has_compare = false,
8115 has_offset = false, has_ddx = false, has_ddy = false, has_derivs = false, has_sample_index = false;
8116 Temp resource, sampler, fmask_ptr, bias = Temp(), compare = Temp(), sample_index = Temp(),
8117 lod = Temp(), offset = Temp(), ddx = Temp(), ddy = Temp();
8118 std::vector<Temp> coords;
8119 std::vector<Temp> derivs;
8120 nir_const_value *sample_index_cv = NULL;
8121 nir_const_value *const_offset[4] = {NULL, NULL, NULL, NULL};
8122 enum glsl_base_type stype;
8123 tex_fetch_ptrs(ctx, instr, &resource, &sampler, &fmask_ptr, &stype);
8124
8125 bool tg4_integer_workarounds = ctx->options->chip_class <= GFX8 && instr->op == nir_texop_tg4 &&
8126 (stype == GLSL_TYPE_UINT || stype == GLSL_TYPE_INT);
8127 bool tg4_integer_cube_workaround = tg4_integer_workarounds &&
8128 instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
8129
8130 for (unsigned i = 0; i < instr->num_srcs; i++) {
8131 switch (instr->src[i].src_type) {
8132 case nir_tex_src_coord: {
8133 Temp coord = get_ssa_temp(ctx, instr->src[i].src.ssa);
8134 for (unsigned i = 0; i < coord.size(); i++)
8135 coords.emplace_back(emit_extract_vector(ctx, coord, i, v1));
8136 break;
8137 }
8138 case nir_tex_src_bias:
8139 if (instr->op == nir_texop_txb) {
8140 bias = get_ssa_temp(ctx, instr->src[i].src.ssa);
8141 has_bias = true;
8142 }
8143 break;
8144 case nir_tex_src_lod: {
8145 nir_const_value *val = nir_src_as_const_value(instr->src[i].src);
8146
8147 if (val && val->f32 <= 0.0) {
8148 level_zero = true;
8149 } else {
8150 lod = get_ssa_temp(ctx, instr->src[i].src.ssa);
8151 has_lod = true;
8152 }
8153 break;
8154 }
8155 case nir_tex_src_comparator:
8156 if (instr->is_shadow) {
8157 compare = get_ssa_temp(ctx, instr->src[i].src.ssa);
8158 has_compare = true;
8159 }
8160 break;
8161 case nir_tex_src_offset:
8162 offset = get_ssa_temp(ctx, instr->src[i].src.ssa);
8163 get_const_vec(instr->src[i].src.ssa, const_offset);
8164 has_offset = true;
8165 break;
8166 case nir_tex_src_ddx:
8167 ddx = get_ssa_temp(ctx, instr->src[i].src.ssa);
8168 has_ddx = true;
8169 break;
8170 case nir_tex_src_ddy:
8171 ddy = get_ssa_temp(ctx, instr->src[i].src.ssa);
8172 has_ddy = true;
8173 break;
8174 case nir_tex_src_ms_index:
8175 sample_index = get_ssa_temp(ctx, instr->src[i].src.ssa);
8176 sample_index_cv = nir_src_as_const_value(instr->src[i].src);
8177 has_sample_index = true;
8178 break;
8179 case nir_tex_src_texture_offset:
8180 case nir_tex_src_sampler_offset:
8181 default:
8182 break;
8183 }
8184 }
8185
8186 if (instr->op == nir_texop_txs && instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
8187 return get_buffer_size(ctx, resource, get_ssa_temp(ctx, &instr->dest.ssa), true);
8188
8189 if (instr->op == nir_texop_texture_samples) {
8190 Temp dword3 = emit_extract_vector(ctx, resource, 3, s1);
8191
8192 Temp samples_log2 = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3, Operand(16u | 4u<<16));
8193 Temp samples = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), Operand(1u), samples_log2);
8194 Temp type = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3, Operand(28u | 4u<<16 /* offset=28, width=4 */));
8195 Temp is_msaa = bld.sopc(aco_opcode::s_cmp_ge_u32, bld.def(s1, scc), type, Operand(14u));
8196
8197 bld.sop2(aco_opcode::s_cselect_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
8198 samples, Operand(1u), bld.scc(is_msaa));
8199 return;
8200 }
8201
8202 if (has_offset && instr->op != nir_texop_txf && instr->op != nir_texop_txf_ms) {
8203 aco_ptr<Instruction> tmp_instr;
8204 Temp acc, pack = Temp();
8205
8206 uint32_t pack_const = 0;
8207 for (unsigned i = 0; i < offset.size(); i++) {
8208 if (!const_offset[i])
8209 continue;
8210 pack_const |= (const_offset[i]->u32 & 0x3Fu) << (8u * i);
8211 }
8212
8213 if (offset.type() == RegType::sgpr) {
8214 for (unsigned i = 0; i < offset.size(); i++) {
8215 if (const_offset[i])
8216 continue;
8217
8218 acc = emit_extract_vector(ctx, offset, i, s1);
8219 acc = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), acc, Operand(0x3Fu));
8220
8221 if (i) {
8222 acc = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), acc, Operand(8u * i));
8223 }
8224
8225 if (pack == Temp()) {
8226 pack = acc;
8227 } else {
8228 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), pack, acc);
8229 }
8230 }
8231
8232 if (pack_const && pack != Temp())
8233 pack = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(pack_const), pack);
8234 } else {
8235 for (unsigned i = 0; i < offset.size(); i++) {
8236 if (const_offset[i])
8237 continue;
8238
8239 acc = emit_extract_vector(ctx, offset, i, v1);
8240 acc = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x3Fu), acc);
8241
8242 if (i) {
8243 acc = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(8u * i), acc);
8244 }
8245
8246 if (pack == Temp()) {
8247 pack = acc;
8248 } else {
8249 pack = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), pack, acc);
8250 }
8251 }
8252
8253 if (pack_const && pack != Temp())
8254 pack = bld.sop2(aco_opcode::v_or_b32, bld.def(v1), Operand(pack_const), pack);
8255 }
8256 if (pack_const && pack == Temp())
8257 offset = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(pack_const));
8258 else if (pack == Temp())
8259 has_offset = false;
8260 else
8261 offset = pack;
8262 }
8263
8264 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && instr->coord_components)
8265 prepare_cube_coords(ctx, coords, &ddx, &ddy, instr->op == nir_texop_txd, instr->is_array && instr->op != nir_texop_lod);
8266
8267 /* pack derivatives */
8268 if (has_ddx || has_ddy) {
8269 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D && ctx->options->chip_class == GFX9) {
8270 assert(has_ddx && has_ddy && ddx.size() == 1 && ddy.size() == 1);
8271 Temp zero = bld.copy(bld.def(v1), Operand(0u));
8272 derivs = {ddy, zero, ddy, zero};
8273 } else {
8274 for (unsigned i = 0; has_ddx && i < ddx.size(); i++)
8275 derivs.emplace_back(emit_extract_vector(ctx, ddx, i, v1));
8276 for (unsigned i = 0; has_ddy && i < ddy.size(); i++)
8277 derivs.emplace_back(emit_extract_vector(ctx, ddy, i, v1));
8278 }
8279 has_derivs = true;
8280 }
8281
8282 if (instr->coord_components > 1 &&
8283 instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
8284 instr->is_array &&
8285 instr->op != nir_texop_txf)
8286 coords[1] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[1]);
8287
8288 if (instr->coord_components > 2 &&
8289 (instr->sampler_dim == GLSL_SAMPLER_DIM_2D ||
8290 instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
8291 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS ||
8292 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
8293 instr->is_array &&
8294 instr->op != nir_texop_txf &&
8295 instr->op != nir_texop_txf_ms &&
8296 instr->op != nir_texop_fragment_fetch &&
8297 instr->op != nir_texop_fragment_mask_fetch)
8298 coords[2] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[2]);
8299
8300 if (ctx->options->chip_class == GFX9 &&
8301 instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
8302 instr->op != nir_texop_lod && instr->coord_components) {
8303 assert(coords.size() > 0 && coords.size() < 3);
8304
8305 coords.insert(std::next(coords.begin()), bld.copy(bld.def(v1), instr->op == nir_texop_txf ?
8306 Operand((uint32_t) 0) :
8307 Operand((uint32_t) 0x3f000000)));
8308 }
8309
8310 bool da = should_declare_array(ctx, instr->sampler_dim, instr->is_array);
8311
8312 if (instr->op == nir_texop_samples_identical)
8313 resource = fmask_ptr;
8314
8315 else if ((instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
8316 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS) &&
8317 instr->op != nir_texop_txs &&
8318 instr->op != nir_texop_fragment_fetch &&
8319 instr->op != nir_texop_fragment_mask_fetch) {
8320 assert(has_sample_index);
8321 Operand op(sample_index);
8322 if (sample_index_cv)
8323 op = Operand(sample_index_cv->u32);
8324 sample_index = adjust_sample_index_using_fmask(ctx, da, coords, op, fmask_ptr);
8325 }
8326
8327 if (has_offset && (instr->op == nir_texop_txf || instr->op == nir_texop_txf_ms)) {
8328 for (unsigned i = 0; i < std::min(offset.size(), instr->coord_components); i++) {
8329 Temp off = emit_extract_vector(ctx, offset, i, v1);
8330 coords[i] = bld.vadd32(bld.def(v1), coords[i], off);
8331 }
8332 has_offset = false;
8333 }
8334
8335 /* Build tex instruction */
8336 unsigned dmask = nir_ssa_def_components_read(&instr->dest.ssa);
8337 unsigned dim = ctx->options->chip_class >= GFX10 && instr->sampler_dim != GLSL_SAMPLER_DIM_BUF
8338 ? ac_get_sampler_dim(ctx->options->chip_class, instr->sampler_dim, instr->is_array)
8339 : 0;
8340 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8341 Temp tmp_dst = dst;
8342
8343 /* gather4 selects the component by dmask and always returns vec4 */
8344 if (instr->op == nir_texop_tg4) {
8345 assert(instr->dest.ssa.num_components == 4);
8346 if (instr->is_shadow)
8347 dmask = 1;
8348 else
8349 dmask = 1 << instr->component;
8350 if (tg4_integer_cube_workaround || dst.type() == RegType::sgpr)
8351 tmp_dst = bld.tmp(v4);
8352 } else if (instr->op == nir_texop_samples_identical) {
8353 tmp_dst = bld.tmp(v1);
8354 } else if (util_bitcount(dmask) != instr->dest.ssa.num_components || dst.type() == RegType::sgpr) {
8355 tmp_dst = bld.tmp(RegClass(RegType::vgpr, util_bitcount(dmask)));
8356 }
8357
8358 aco_ptr<MIMG_instruction> tex;
8359 if (instr->op == nir_texop_txs || instr->op == nir_texop_query_levels) {
8360 if (!has_lod)
8361 lod = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0u));
8362
8363 bool div_by_6 = instr->op == nir_texop_txs &&
8364 instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
8365 instr->is_array &&
8366 (dmask & (1 << 2));
8367 if (tmp_dst.id() == dst.id() && div_by_6)
8368 tmp_dst = bld.tmp(tmp_dst.regClass());
8369
8370 tex.reset(create_instruction<MIMG_instruction>(aco_opcode::image_get_resinfo, Format::MIMG, 3, 1));
8371 tex->operands[0] = Operand(resource);
8372 tex->operands[1] = Operand(s4); /* no sampler */
8373 tex->operands[2] = Operand(as_vgpr(ctx,lod));
8374 if (ctx->options->chip_class == GFX9 &&
8375 instr->op == nir_texop_txs &&
8376 instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
8377 instr->is_array) {
8378 tex->dmask = (dmask & 0x1) | ((dmask & 0x2) << 1);
8379 } else if (instr->op == nir_texop_query_levels) {
8380 tex->dmask = 1 << 3;
8381 } else {
8382 tex->dmask = dmask;
8383 }
8384 tex->da = da;
8385 tex->definitions[0] = Definition(tmp_dst);
8386 tex->dim = dim;
8387 tex->can_reorder = true;
8388 ctx->block->instructions.emplace_back(std::move(tex));
8389
8390 if (div_by_6) {
8391 /* divide 3rd value by 6 by multiplying with magic number */
8392 emit_split_vector(ctx, tmp_dst, tmp_dst.size());
8393 Temp c = bld.copy(bld.def(s1), Operand((uint32_t) 0x2AAAAAAB));
8394 Temp by_6 = bld.vop3(aco_opcode::v_mul_hi_i32, bld.def(v1), emit_extract_vector(ctx, tmp_dst, 2, v1), c);
8395 assert(instr->dest.ssa.num_components == 3);
8396 Temp tmp = dst.type() == RegType::vgpr ? dst : bld.tmp(v3);
8397 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp),
8398 emit_extract_vector(ctx, tmp_dst, 0, v1),
8399 emit_extract_vector(ctx, tmp_dst, 1, v1),
8400 by_6);
8401
8402 }
8403
8404 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
8405 return;
8406 }
8407
8408 Temp tg4_compare_cube_wa64 = Temp();
8409
8410 if (tg4_integer_workarounds) {
8411 tex.reset(create_instruction<MIMG_instruction>(aco_opcode::image_get_resinfo, Format::MIMG, 3, 1));
8412 tex->operands[0] = Operand(resource);
8413 tex->operands[1] = Operand(s4); /* no sampler */
8414 tex->operands[2] = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0u));
8415 tex->dim = dim;
8416 tex->dmask = 0x3;
8417 tex->da = da;
8418 Temp size = bld.tmp(v2);
8419 tex->definitions[0] = Definition(size);
8420 tex->can_reorder = true;
8421 ctx->block->instructions.emplace_back(std::move(tex));
8422 emit_split_vector(ctx, size, size.size());
8423
8424 Temp half_texel[2];
8425 for (unsigned i = 0; i < 2; i++) {
8426 half_texel[i] = emit_extract_vector(ctx, size, i, v1);
8427 half_texel[i] = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), half_texel[i]);
8428 half_texel[i] = bld.vop1(aco_opcode::v_rcp_iflag_f32, bld.def(v1), half_texel[i]);
8429 half_texel[i] = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0xbf000000/*-0.5*/), half_texel[i]);
8430 }
8431
8432 Temp new_coords[2] = {
8433 bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[0], half_texel[0]),
8434 bld.vop2(aco_opcode::v_add_f32, bld.def(v1), coords[1], half_texel[1])
8435 };
8436
8437 if (tg4_integer_cube_workaround) {
8438 // see comment in ac_nir_to_llvm.c's lower_gather4_integer()
8439 Temp desc[resource.size()];
8440 aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector,
8441 Format::PSEUDO, 1, resource.size())};
8442 split->operands[0] = Operand(resource);
8443 for (unsigned i = 0; i < resource.size(); i++) {
8444 desc[i] = bld.tmp(s1);
8445 split->definitions[i] = Definition(desc[i]);
8446 }
8447 ctx->block->instructions.emplace_back(std::move(split));
8448
8449 Temp dfmt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), desc[1], Operand(20u | (6u << 16)));
8450 Temp compare_cube_wa = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), dfmt,
8451 Operand((uint32_t)V_008F14_IMG_DATA_FORMAT_8_8_8_8));
8452
8453 Temp nfmt;
8454 if (stype == GLSL_TYPE_UINT) {
8455 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
8456 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_USCALED),
8457 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_UINT),
8458 bld.scc(compare_cube_wa));
8459 } else {
8460 nfmt = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1),
8461 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_SSCALED),
8462 Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_SINT),
8463 bld.scc(compare_cube_wa));
8464 }
8465 tg4_compare_cube_wa64 = bld.tmp(bld.lm);
8466 bool_to_vector_condition(ctx, compare_cube_wa, tg4_compare_cube_wa64);
8467
8468 nfmt = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), nfmt, Operand(26u));
8469
8470 desc[1] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), desc[1],
8471 Operand((uint32_t)C_008F14_NUM_FORMAT));
8472 desc[1] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), desc[1], nfmt);
8473
8474 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
8475 Format::PSEUDO, resource.size(), 1)};
8476 for (unsigned i = 0; i < resource.size(); i++)
8477 vec->operands[i] = Operand(desc[i]);
8478 resource = bld.tmp(resource.regClass());
8479 vec->definitions[0] = Definition(resource);
8480 ctx->block->instructions.emplace_back(std::move(vec));
8481
8482 new_coords[0] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
8483 new_coords[0], coords[0], tg4_compare_cube_wa64);
8484 new_coords[1] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
8485 new_coords[1], coords[1], tg4_compare_cube_wa64);
8486 }
8487 coords[0] = new_coords[0];
8488 coords[1] = new_coords[1];
8489 }
8490
8491 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
8492 //FIXME: if (ctx->abi->gfx9_stride_size_workaround) return ac_build_buffer_load_format_gfx9_safe()
8493
8494 assert(coords.size() == 1);
8495 unsigned last_bit = util_last_bit(nir_ssa_def_components_read(&instr->dest.ssa));
8496 aco_opcode op;
8497 switch (last_bit) {
8498 case 1:
8499 op = aco_opcode::buffer_load_format_x; break;
8500 case 2:
8501 op = aco_opcode::buffer_load_format_xy; break;
8502 case 3:
8503 op = aco_opcode::buffer_load_format_xyz; break;
8504 case 4:
8505 op = aco_opcode::buffer_load_format_xyzw; break;
8506 default:
8507 unreachable("Tex instruction loads more than 4 components.");
8508 }
8509
8510 /* if the instruction return value matches exactly the nir dest ssa, we can use it directly */
8511 if (last_bit == instr->dest.ssa.num_components && dst.type() == RegType::vgpr)
8512 tmp_dst = dst;
8513 else
8514 tmp_dst = bld.tmp(RegType::vgpr, last_bit);
8515
8516 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
8517 mubuf->operands[0] = Operand(resource);
8518 mubuf->operands[1] = Operand(coords[0]);
8519 mubuf->operands[2] = Operand((uint32_t) 0);
8520 mubuf->definitions[0] = Definition(tmp_dst);
8521 mubuf->idxen = true;
8522 mubuf->can_reorder = true;
8523 ctx->block->instructions.emplace_back(std::move(mubuf));
8524
8525 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, (1 << last_bit) - 1);
8526 return;
8527 }
8528
8529 /* gather MIMG address components */
8530 std::vector<Temp> args;
8531 if (has_offset)
8532 args.emplace_back(offset);
8533 if (has_bias)
8534 args.emplace_back(bias);
8535 if (has_compare)
8536 args.emplace_back(compare);
8537 if (has_derivs)
8538 args.insert(args.end(), derivs.begin(), derivs.end());
8539
8540 args.insert(args.end(), coords.begin(), coords.end());
8541 if (has_sample_index)
8542 args.emplace_back(sample_index);
8543 if (has_lod)
8544 args.emplace_back(lod);
8545
8546 Temp arg = bld.tmp(RegClass(RegType::vgpr, args.size()));
8547 aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, args.size(), 1)};
8548 vec->definitions[0] = Definition(arg);
8549 for (unsigned i = 0; i < args.size(); i++)
8550 vec->operands[i] = Operand(args[i]);
8551 ctx->block->instructions.emplace_back(std::move(vec));
8552
8553
8554 if (instr->op == nir_texop_txf ||
8555 instr->op == nir_texop_txf_ms ||
8556 instr->op == nir_texop_samples_identical ||
8557 instr->op == nir_texop_fragment_fetch ||
8558 instr->op == nir_texop_fragment_mask_fetch) {
8559 aco_opcode op = level_zero || instr->sampler_dim == GLSL_SAMPLER_DIM_MS || instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS ? aco_opcode::image_load : aco_opcode::image_load_mip;
8560 tex.reset(create_instruction<MIMG_instruction>(op, Format::MIMG, 3, 1));
8561 tex->operands[0] = Operand(resource);
8562 tex->operands[1] = Operand(s4); /* no sampler */
8563 tex->operands[2] = Operand(arg);
8564 tex->dim = dim;
8565 tex->dmask = dmask;
8566 tex->unrm = true;
8567 tex->da = da;
8568 tex->definitions[0] = Definition(tmp_dst);
8569 tex->can_reorder = true;
8570 ctx->block->instructions.emplace_back(std::move(tex));
8571
8572 if (instr->op == nir_texop_samples_identical) {
8573 assert(dmask == 1 && dst.regClass() == v1);
8574 assert(dst.id() != tmp_dst.id());
8575
8576 Temp tmp = bld.tmp(bld.lm);
8577 bld.vopc(aco_opcode::v_cmp_eq_u32, Definition(tmp), Operand(0u), tmp_dst).def(0).setHint(vcc);
8578 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand((uint32_t)-1), tmp);
8579
8580 } else {
8581 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, dmask);
8582 }
8583 return;
8584 }
8585
8586 // TODO: would be better to do this by adding offsets, but needs the opcodes ordered.
8587 aco_opcode opcode = aco_opcode::image_sample;
8588 if (has_offset) { /* image_sample_*_o */
8589 if (has_compare) {
8590 opcode = aco_opcode::image_sample_c_o;
8591 if (has_derivs)
8592 opcode = aco_opcode::image_sample_c_d_o;
8593 if (has_bias)
8594 opcode = aco_opcode::image_sample_c_b_o;
8595 if (level_zero)
8596 opcode = aco_opcode::image_sample_c_lz_o;
8597 if (has_lod)
8598 opcode = aco_opcode::image_sample_c_l_o;
8599 } else {
8600 opcode = aco_opcode::image_sample_o;
8601 if (has_derivs)
8602 opcode = aco_opcode::image_sample_d_o;
8603 if (has_bias)
8604 opcode = aco_opcode::image_sample_b_o;
8605 if (level_zero)
8606 opcode = aco_opcode::image_sample_lz_o;
8607 if (has_lod)
8608 opcode = aco_opcode::image_sample_l_o;
8609 }
8610 } else { /* no offset */
8611 if (has_compare) {
8612 opcode = aco_opcode::image_sample_c;
8613 if (has_derivs)
8614 opcode = aco_opcode::image_sample_c_d;
8615 if (has_bias)
8616 opcode = aco_opcode::image_sample_c_b;
8617 if (level_zero)
8618 opcode = aco_opcode::image_sample_c_lz;
8619 if (has_lod)
8620 opcode = aco_opcode::image_sample_c_l;
8621 } else {
8622 opcode = aco_opcode::image_sample;
8623 if (has_derivs)
8624 opcode = aco_opcode::image_sample_d;
8625 if (has_bias)
8626 opcode = aco_opcode::image_sample_b;
8627 if (level_zero)
8628 opcode = aco_opcode::image_sample_lz;
8629 if (has_lod)
8630 opcode = aco_opcode::image_sample_l;
8631 }
8632 }
8633
8634 if (instr->op == nir_texop_tg4) {
8635 if (has_offset) {
8636 opcode = aco_opcode::image_gather4_lz_o;
8637 if (has_compare)
8638 opcode = aco_opcode::image_gather4_c_lz_o;
8639 } else {
8640 opcode = aco_opcode::image_gather4_lz;
8641 if (has_compare)
8642 opcode = aco_opcode::image_gather4_c_lz;
8643 }
8644 } else if (instr->op == nir_texop_lod) {
8645 opcode = aco_opcode::image_get_lod;
8646 }
8647
8648 /* we don't need the bias, sample index, compare value or offset to be
8649 * computed in WQM but if the p_create_vector copies the coordinates, then it
8650 * needs to be in WQM */
8651 if (ctx->stage == fragment_fs &&
8652 !has_derivs && !has_lod && !level_zero &&
8653 instr->sampler_dim != GLSL_SAMPLER_DIM_MS &&
8654 instr->sampler_dim != GLSL_SAMPLER_DIM_SUBPASS_MS)
8655 arg = emit_wqm(ctx, arg, bld.tmp(arg.regClass()), true);
8656
8657 tex.reset(create_instruction<MIMG_instruction>(opcode, Format::MIMG, 3, 1));
8658 tex->operands[0] = Operand(resource);
8659 tex->operands[1] = Operand(sampler);
8660 tex->operands[2] = Operand(arg);
8661 tex->dim = dim;
8662 tex->dmask = dmask;
8663 tex->da = da;
8664 tex->definitions[0] = Definition(tmp_dst);
8665 tex->can_reorder = true;
8666 ctx->block->instructions.emplace_back(std::move(tex));
8667
8668 if (tg4_integer_cube_workaround) {
8669 assert(tmp_dst.id() != dst.id());
8670 assert(tmp_dst.size() == dst.size() && dst.size() == 4);
8671
8672 emit_split_vector(ctx, tmp_dst, tmp_dst.size());
8673 Temp val[4];
8674 for (unsigned i = 0; i < dst.size(); i++) {
8675 val[i] = emit_extract_vector(ctx, tmp_dst, i, v1);
8676 Temp cvt_val;
8677 if (stype == GLSL_TYPE_UINT)
8678 cvt_val = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), val[i]);
8679 else
8680 cvt_val = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), val[i]);
8681 val[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), val[i], cvt_val, tg4_compare_cube_wa64);
8682 }
8683 Temp tmp = dst.regClass() == v4 ? dst : bld.tmp(v4);
8684 tmp_dst = bld.pseudo(aco_opcode::p_create_vector, Definition(tmp),
8685 val[0], val[1], val[2], val[3]);
8686 }
8687 unsigned mask = instr->op == nir_texop_tg4 ? 0xF : dmask;
8688 expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, mask);
8689
8690 }
8691
8692
8693 Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa)
8694 {
8695 Temp tmp = get_ssa_temp(ctx, ssa);
8696 if (ssa->parent_instr->type == nir_instr_type_ssa_undef)
8697 return Operand(tmp.regClass());
8698 else
8699 return Operand(tmp);
8700 }
8701
8702 void visit_phi(isel_context *ctx, nir_phi_instr *instr)
8703 {
8704 aco_ptr<Pseudo_instruction> phi;
8705 Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
8706 assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
8707
8708 bool logical = !dst.is_linear() || ctx->divergent_vals[instr->dest.ssa.index];
8709 logical |= ctx->block->kind & block_kind_merge;
8710 aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
8711
8712 /* we want a sorted list of sources, since the predecessor list is also sorted */
8713 std::map<unsigned, nir_ssa_def*> phi_src;
8714 nir_foreach_phi_src(src, instr)
8715 phi_src[src->pred->index] = src->src.ssa;
8716
8717 std::vector<unsigned>& preds = logical ? ctx->block->logical_preds : ctx->block->linear_preds;
8718 unsigned num_operands = 0;
8719 Operand operands[std::max(exec_list_length(&instr->srcs), (unsigned)preds.size()) + 1];
8720 unsigned num_defined = 0;
8721 unsigned cur_pred_idx = 0;
8722 for (std::pair<unsigned, nir_ssa_def *> src : phi_src) {
8723 if (cur_pred_idx < preds.size()) {
8724 /* handle missing preds (IF merges with discard/break) and extra preds (loop exit with discard) */
8725 unsigned block = ctx->cf_info.nir_to_aco[src.first];
8726 unsigned skipped = 0;
8727 while (cur_pred_idx + skipped < preds.size() && preds[cur_pred_idx + skipped] != block)
8728 skipped++;
8729 if (cur_pred_idx + skipped < preds.size()) {
8730 for (unsigned i = 0; i < skipped; i++)
8731 operands[num_operands++] = Operand(dst.regClass());
8732 cur_pred_idx += skipped;
8733 } else {
8734 continue;
8735 }
8736 }
8737 /* Handle missing predecessors at the end. This shouldn't happen with loop
8738 * headers and we can't ignore these sources for loop header phis. */
8739 if (!(ctx->block->kind & block_kind_loop_header) && cur_pred_idx >= preds.size())
8740 continue;
8741 cur_pred_idx++;
8742 Operand op = get_phi_operand(ctx, src.second);
8743 operands[num_operands++] = op;
8744 num_defined += !op.isUndefined();
8745 }
8746 /* handle block_kind_continue_or_break at loop exit blocks */
8747 while (cur_pred_idx++ < preds.size())
8748 operands[num_operands++] = Operand(dst.regClass());
8749
8750 /* If the loop ends with a break, still add a linear continue edge in case
8751 * that break is divergent or continue_or_break is used. We'll either remove
8752 * this operand later in visit_loop() if it's not necessary or replace the
8753 * undef with something correct. */
8754 if (!logical && ctx->block->kind & block_kind_loop_header) {
8755 nir_loop *loop = nir_cf_node_as_loop(instr->instr.block->cf_node.parent);
8756 nir_block *last = nir_loop_last_block(loop);
8757 if (last->successors[0] != instr->instr.block)
8758 operands[num_operands++] = Operand(RegClass());
8759 }
8760
8761 if (num_defined == 0) {
8762 Builder bld(ctx->program, ctx->block);
8763 if (dst.regClass() == s1) {
8764 bld.sop1(aco_opcode::s_mov_b32, Definition(dst), Operand(0u));
8765 } else if (dst.regClass() == v1) {
8766 bld.vop1(aco_opcode::v_mov_b32, Definition(dst), Operand(0u));
8767 } else {
8768 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
8769 for (unsigned i = 0; i < dst.size(); i++)
8770 vec->operands[i] = Operand(0u);
8771 vec->definitions[0] = Definition(dst);
8772 ctx->block->instructions.emplace_back(std::move(vec));
8773 }
8774 return;
8775 }
8776
8777 /* we can use a linear phi in some cases if one src is undef */
8778 if (dst.is_linear() && ctx->block->kind & block_kind_merge && num_defined == 1) {
8779 phi.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, num_operands, 1));
8780
8781 Block *linear_else = &ctx->program->blocks[ctx->block->linear_preds[1]];
8782 Block *invert = &ctx->program->blocks[linear_else->linear_preds[0]];
8783 assert(invert->kind & block_kind_invert);
8784
8785 unsigned then_block = invert->linear_preds[0];
8786
8787 Block* insert_block = NULL;
8788 for (unsigned i = 0; i < num_operands; i++) {
8789 Operand op = operands[i];
8790 if (op.isUndefined())
8791 continue;
8792 insert_block = ctx->block->logical_preds[i] == then_block ? invert : ctx->block;
8793 phi->operands[0] = op;
8794 break;
8795 }
8796 assert(insert_block); /* should be handled by the "num_defined == 0" case above */
8797 phi->operands[1] = Operand(dst.regClass());
8798 phi->definitions[0] = Definition(dst);
8799 insert_block->instructions.emplace(insert_block->instructions.begin(), std::move(phi));
8800 return;
8801 }
8802
8803 /* try to scalarize vector phis */
8804 if (instr->dest.ssa.bit_size != 1 && dst.size() > 1) {
8805 // TODO: scalarize linear phis on divergent ifs
8806 bool can_scalarize = (opcode == aco_opcode::p_phi || !(ctx->block->kind & block_kind_merge));
8807 std::array<Temp, NIR_MAX_VEC_COMPONENTS> new_vec;
8808 for (unsigned i = 0; can_scalarize && (i < num_operands); i++) {
8809 Operand src = operands[i];
8810 if (src.isTemp() && ctx->allocated_vec.find(src.tempId()) == ctx->allocated_vec.end())
8811 can_scalarize = false;
8812 }
8813 if (can_scalarize) {
8814 unsigned num_components = instr->dest.ssa.num_components;
8815 assert(dst.size() % num_components == 0);
8816 RegClass rc = RegClass(dst.type(), dst.size() / num_components);
8817
8818 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
8819 for (unsigned k = 0; k < num_components; k++) {
8820 phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
8821 for (unsigned i = 0; i < num_operands; i++) {
8822 Operand src = operands[i];
8823 phi->operands[i] = src.isTemp() ? Operand(ctx->allocated_vec[src.tempId()][k]) : Operand(rc);
8824 }
8825 Temp phi_dst = {ctx->program->allocateId(), rc};
8826 phi->definitions[0] = Definition(phi_dst);
8827 ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi));
8828 new_vec[k] = phi_dst;
8829 vec->operands[k] = Operand(phi_dst);
8830 }
8831 vec->definitions[0] = Definition(dst);
8832 ctx->block->instructions.emplace_back(std::move(vec));
8833 ctx->allocated_vec.emplace(dst.id(), new_vec);
8834 return;
8835 }
8836 }
8837
8838 phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
8839 for (unsigned i = 0; i < num_operands; i++)
8840 phi->operands[i] = operands[i];
8841 phi->definitions[0] = Definition(dst);
8842 ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi));
8843 }
8844
8845
8846 void visit_undef(isel_context *ctx, nir_ssa_undef_instr *instr)
8847 {
8848 Temp dst = get_ssa_temp(ctx, &instr->def);
8849
8850 assert(dst.type() == RegType::sgpr);
8851
8852 if (dst.size() == 1) {
8853 Builder(ctx->program, ctx->block).copy(Definition(dst), Operand(0u));
8854 } else {
8855 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size(), 1)};
8856 for (unsigned i = 0; i < dst.size(); i++)
8857 vec->operands[i] = Operand(0u);
8858 vec->definitions[0] = Definition(dst);
8859 ctx->block->instructions.emplace_back(std::move(vec));
8860 }
8861 }
8862
8863 void visit_jump(isel_context *ctx, nir_jump_instr *instr)
8864 {
8865 Builder bld(ctx->program, ctx->block);
8866 Block *logical_target;
8867 append_logical_end(ctx->block);
8868 unsigned idx = ctx->block->index;
8869
8870 switch (instr->type) {
8871 case nir_jump_break:
8872 logical_target = ctx->cf_info.parent_loop.exit;
8873 add_logical_edge(idx, logical_target);
8874 ctx->block->kind |= block_kind_break;
8875
8876 if (!ctx->cf_info.parent_if.is_divergent &&
8877 !ctx->cf_info.parent_loop.has_divergent_continue) {
8878 /* uniform break - directly jump out of the loop */
8879 ctx->block->kind |= block_kind_uniform;
8880 ctx->cf_info.has_branch = true;
8881 bld.branch(aco_opcode::p_branch);
8882 add_linear_edge(idx, logical_target);
8883 return;
8884 }
8885 ctx->cf_info.parent_loop.has_divergent_branch = true;
8886 ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
8887 break;
8888 case nir_jump_continue:
8889 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
8890 add_logical_edge(idx, logical_target);
8891 ctx->block->kind |= block_kind_continue;
8892
8893 if (ctx->cf_info.parent_if.is_divergent) {
8894 /* for potential uniform breaks after this continue,
8895 we must ensure that they are handled correctly */
8896 ctx->cf_info.parent_loop.has_divergent_continue = true;
8897 ctx->cf_info.parent_loop.has_divergent_branch = true;
8898 ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
8899 } else {
8900 /* uniform continue - directly jump to the loop header */
8901 ctx->block->kind |= block_kind_uniform;
8902 ctx->cf_info.has_branch = true;
8903 bld.branch(aco_opcode::p_branch);
8904 add_linear_edge(idx, logical_target);
8905 return;
8906 }
8907 break;
8908 default:
8909 fprintf(stderr, "Unknown NIR jump instr: ");
8910 nir_print_instr(&instr->instr, stderr);
8911 fprintf(stderr, "\n");
8912 abort();
8913 }
8914
8915 if (ctx->cf_info.parent_if.is_divergent && !ctx->cf_info.exec_potentially_empty_break) {
8916 ctx->cf_info.exec_potentially_empty_break = true;
8917 ctx->cf_info.exec_potentially_empty_break_depth = ctx->cf_info.loop_nest_depth;
8918 }
8919
8920 /* remove critical edges from linear CFG */
8921 bld.branch(aco_opcode::p_branch);
8922 Block* break_block = ctx->program->create_and_insert_block();
8923 break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
8924 break_block->kind |= block_kind_uniform;
8925 add_linear_edge(idx, break_block);
8926 /* the loop_header pointer might be invalidated by this point */
8927 if (instr->type == nir_jump_continue)
8928 logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
8929 add_linear_edge(break_block->index, logical_target);
8930 bld.reset(break_block);
8931 bld.branch(aco_opcode::p_branch);
8932
8933 Block* continue_block = ctx->program->create_and_insert_block();
8934 continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
8935 add_linear_edge(idx, continue_block);
8936 append_logical_start(continue_block);
8937 ctx->block = continue_block;
8938 return;
8939 }
8940
8941 void visit_block(isel_context *ctx, nir_block *block)
8942 {
8943 nir_foreach_instr(instr, block) {
8944 switch (instr->type) {
8945 case nir_instr_type_alu:
8946 visit_alu_instr(ctx, nir_instr_as_alu(instr));
8947 break;
8948 case nir_instr_type_load_const:
8949 visit_load_const(ctx, nir_instr_as_load_const(instr));
8950 break;
8951 case nir_instr_type_intrinsic:
8952 visit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
8953 break;
8954 case nir_instr_type_tex:
8955 visit_tex(ctx, nir_instr_as_tex(instr));
8956 break;
8957 case nir_instr_type_phi:
8958 visit_phi(ctx, nir_instr_as_phi(instr));
8959 break;
8960 case nir_instr_type_ssa_undef:
8961 visit_undef(ctx, nir_instr_as_ssa_undef(instr));
8962 break;
8963 case nir_instr_type_deref:
8964 break;
8965 case nir_instr_type_jump:
8966 visit_jump(ctx, nir_instr_as_jump(instr));
8967 break;
8968 default:
8969 fprintf(stderr, "Unknown NIR instr type: ");
8970 nir_print_instr(instr, stderr);
8971 fprintf(stderr, "\n");
8972 //abort();
8973 }
8974 }
8975
8976 if (!ctx->cf_info.parent_loop.has_divergent_branch)
8977 ctx->cf_info.nir_to_aco[block->index] = ctx->block->index;
8978 }
8979
8980
8981
8982 static Operand create_continue_phis(isel_context *ctx, unsigned first, unsigned last,
8983 aco_ptr<Instruction>& header_phi, Operand *vals)
8984 {
8985 vals[0] = Operand(header_phi->definitions[0].getTemp());
8986 RegClass rc = vals[0].regClass();
8987
8988 unsigned loop_nest_depth = ctx->program->blocks[first].loop_nest_depth;
8989
8990 unsigned next_pred = 1;
8991
8992 for (unsigned idx = first + 1; idx <= last; idx++) {
8993 Block& block = ctx->program->blocks[idx];
8994 if (block.loop_nest_depth != loop_nest_depth) {
8995 vals[idx - first] = vals[idx - 1 - first];
8996 continue;
8997 }
8998
8999 if (block.kind & block_kind_continue) {
9000 vals[idx - first] = header_phi->operands[next_pred];
9001 next_pred++;
9002 continue;
9003 }
9004
9005 bool all_same = true;
9006 for (unsigned i = 1; all_same && (i < block.linear_preds.size()); i++)
9007 all_same = vals[block.linear_preds[i] - first] == vals[block.linear_preds[0] - first];
9008
9009 Operand val;
9010 if (all_same) {
9011 val = vals[block.linear_preds[0] - first];
9012 } else {
9013 aco_ptr<Instruction> phi(create_instruction<Pseudo_instruction>(
9014 aco_opcode::p_linear_phi, Format::PSEUDO, block.linear_preds.size(), 1));
9015 for (unsigned i = 0; i < block.linear_preds.size(); i++)
9016 phi->operands[i] = vals[block.linear_preds[i] - first];
9017 val = Operand(Temp(ctx->program->allocateId(), rc));
9018 phi->definitions[0] = Definition(val.getTemp());
9019 block.instructions.emplace(block.instructions.begin(), std::move(phi));
9020 }
9021 vals[idx - first] = val;
9022 }
9023
9024 return vals[last - first];
9025 }
9026
9027 static void visit_loop(isel_context *ctx, nir_loop *loop)
9028 {
9029 //TODO: we might want to wrap the loop around a branch if exec_potentially_empty=true
9030 append_logical_end(ctx->block);
9031 ctx->block->kind |= block_kind_loop_preheader | block_kind_uniform;
9032 Builder bld(ctx->program, ctx->block);
9033 bld.branch(aco_opcode::p_branch);
9034 unsigned loop_preheader_idx = ctx->block->index;
9035
9036 Block loop_exit = Block();
9037 loop_exit.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9038 loop_exit.kind |= (block_kind_loop_exit | (ctx->block->kind & block_kind_top_level));
9039
9040 Block* loop_header = ctx->program->create_and_insert_block();
9041 loop_header->loop_nest_depth = ctx->cf_info.loop_nest_depth + 1;
9042 loop_header->kind |= block_kind_loop_header;
9043 add_edge(loop_preheader_idx, loop_header);
9044 ctx->block = loop_header;
9045
9046 /* emit loop body */
9047 unsigned loop_header_idx = loop_header->index;
9048 loop_info_RAII loop_raii(ctx, loop_header_idx, &loop_exit);
9049 append_logical_start(ctx->block);
9050 bool unreachable = visit_cf_list(ctx, &loop->body);
9051
9052 //TODO: what if a loop ends with a unconditional or uniformly branched continue and this branch is never taken?
9053 if (!ctx->cf_info.has_branch) {
9054 append_logical_end(ctx->block);
9055 if (ctx->cf_info.exec_potentially_empty_discard || ctx->cf_info.exec_potentially_empty_break) {
9056 /* Discards can result in code running with an empty exec mask.
9057 * This would result in divergent breaks not ever being taken. As a
9058 * workaround, break the loop when the loop mask is empty instead of
9059 * always continuing. */
9060 ctx->block->kind |= (block_kind_continue_or_break | block_kind_uniform);
9061 unsigned block_idx = ctx->block->index;
9062
9063 /* create helper blocks to avoid critical edges */
9064 Block *break_block = ctx->program->create_and_insert_block();
9065 break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9066 break_block->kind = block_kind_uniform;
9067 bld.reset(break_block);
9068 bld.branch(aco_opcode::p_branch);
9069 add_linear_edge(block_idx, break_block);
9070 add_linear_edge(break_block->index, &loop_exit);
9071
9072 Block *continue_block = ctx->program->create_and_insert_block();
9073 continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9074 continue_block->kind = block_kind_uniform;
9075 bld.reset(continue_block);
9076 bld.branch(aco_opcode::p_branch);
9077 add_linear_edge(block_idx, continue_block);
9078 add_linear_edge(continue_block->index, &ctx->program->blocks[loop_header_idx]);
9079
9080 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9081 add_logical_edge(block_idx, &ctx->program->blocks[loop_header_idx]);
9082 ctx->block = &ctx->program->blocks[block_idx];
9083 } else {
9084 ctx->block->kind |= (block_kind_continue | block_kind_uniform);
9085 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9086 add_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
9087 else
9088 add_linear_edge(ctx->block->index, &ctx->program->blocks[loop_header_idx]);
9089 }
9090
9091 bld.reset(ctx->block);
9092 bld.branch(aco_opcode::p_branch);
9093 }
9094
9095 /* Fixup phis in loop header from unreachable blocks.
9096 * has_branch/has_divergent_branch also indicates if the loop ends with a
9097 * break/continue instruction, but we don't emit those if unreachable=true */
9098 if (unreachable) {
9099 assert(ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch);
9100 bool linear = ctx->cf_info.has_branch;
9101 bool logical = ctx->cf_info.has_branch || ctx->cf_info.parent_loop.has_divergent_branch;
9102 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
9103 if ((logical && instr->opcode == aco_opcode::p_phi) ||
9104 (linear && instr->opcode == aco_opcode::p_linear_phi)) {
9105 /* the last operand should be the one that needs to be removed */
9106 instr->operands.pop_back();
9107 } else if (!is_phi(instr)) {
9108 break;
9109 }
9110 }
9111 }
9112
9113 /* Fixup linear phis in loop header from expecting a continue. Both this fixup
9114 * and the previous one shouldn't both happen at once because a break in the
9115 * merge block would get CSE'd */
9116 if (nir_loop_last_block(loop)->successors[0] != nir_loop_first_block(loop)) {
9117 unsigned num_vals = ctx->cf_info.has_branch ? 1 : (ctx->block->index - loop_header_idx + 1);
9118 Operand vals[num_vals];
9119 for (aco_ptr<Instruction>& instr : ctx->program->blocks[loop_header_idx].instructions) {
9120 if (instr->opcode == aco_opcode::p_linear_phi) {
9121 if (ctx->cf_info.has_branch)
9122 instr->operands.pop_back();
9123 else
9124 instr->operands.back() = create_continue_phis(ctx, loop_header_idx, ctx->block->index, instr, vals);
9125 } else if (!is_phi(instr)) {
9126 break;
9127 }
9128 }
9129 }
9130
9131 ctx->cf_info.has_branch = false;
9132
9133 // TODO: if the loop has not a single exit, we must add one °°
9134 /* emit loop successor block */
9135 ctx->block = ctx->program->insert_block(std::move(loop_exit));
9136 append_logical_start(ctx->block);
9137
9138 #if 0
9139 // TODO: check if it is beneficial to not branch on continues
9140 /* trim linear phis in loop header */
9141 for (auto&& instr : loop_entry->instructions) {
9142 if (instr->opcode == aco_opcode::p_linear_phi) {
9143 aco_ptr<Pseudo_instruction> new_phi{create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, loop_entry->linear_predecessors.size(), 1)};
9144 new_phi->definitions[0] = instr->definitions[0];
9145 for (unsigned i = 0; i < new_phi->operands.size(); i++)
9146 new_phi->operands[i] = instr->operands[i];
9147 /* check that the remaining operands are all the same */
9148 for (unsigned i = new_phi->operands.size(); i < instr->operands.size(); i++)
9149 assert(instr->operands[i].tempId() == instr->operands.back().tempId());
9150 instr.swap(new_phi);
9151 } else if (instr->opcode == aco_opcode::p_phi) {
9152 continue;
9153 } else {
9154 break;
9155 }
9156 }
9157 #endif
9158 }
9159
9160 static void begin_divergent_if_then(isel_context *ctx, if_context *ic, Temp cond)
9161 {
9162 ic->cond = cond;
9163
9164 append_logical_end(ctx->block);
9165 ctx->block->kind |= block_kind_branch;
9166
9167 /* branch to linear then block */
9168 assert(cond.regClass() == ctx->program->lane_mask);
9169 aco_ptr<Pseudo_branch_instruction> branch;
9170 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 0));
9171 branch->operands[0] = Operand(cond);
9172 ctx->block->instructions.push_back(std::move(branch));
9173
9174 ic->BB_if_idx = ctx->block->index;
9175 ic->BB_invert = Block();
9176 ic->BB_invert.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9177 /* Invert blocks are intentionally not marked as top level because they
9178 * are not part of the logical cfg. */
9179 ic->BB_invert.kind |= block_kind_invert;
9180 ic->BB_endif = Block();
9181 ic->BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9182 ic->BB_endif.kind |= (block_kind_merge | (ctx->block->kind & block_kind_top_level));
9183
9184 ic->exec_potentially_empty_discard_old = ctx->cf_info.exec_potentially_empty_discard;
9185 ic->exec_potentially_empty_break_old = ctx->cf_info.exec_potentially_empty_break;
9186 ic->exec_potentially_empty_break_depth_old = ctx->cf_info.exec_potentially_empty_break_depth;
9187 ic->divergent_old = ctx->cf_info.parent_if.is_divergent;
9188 ctx->cf_info.parent_if.is_divergent = true;
9189
9190 /* divergent branches use cbranch_execz */
9191 ctx->cf_info.exec_potentially_empty_discard = false;
9192 ctx->cf_info.exec_potentially_empty_break = false;
9193 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9194
9195 /** emit logical then block */
9196 Block* BB_then_logical = ctx->program->create_and_insert_block();
9197 BB_then_logical->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9198 add_edge(ic->BB_if_idx, BB_then_logical);
9199 ctx->block = BB_then_logical;
9200 append_logical_start(BB_then_logical);
9201 }
9202
9203 static void begin_divergent_if_else(isel_context *ctx, if_context *ic)
9204 {
9205 Block *BB_then_logical = ctx->block;
9206 append_logical_end(BB_then_logical);
9207 /* branch from logical then block to invert block */
9208 aco_ptr<Pseudo_branch_instruction> branch;
9209 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9210 BB_then_logical->instructions.emplace_back(std::move(branch));
9211 add_linear_edge(BB_then_logical->index, &ic->BB_invert);
9212 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9213 add_logical_edge(BB_then_logical->index, &ic->BB_endif);
9214 BB_then_logical->kind |= block_kind_uniform;
9215 assert(!ctx->cf_info.has_branch);
9216 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
9217 ctx->cf_info.parent_loop.has_divergent_branch = false;
9218
9219 /** emit linear then block */
9220 Block* BB_then_linear = ctx->program->create_and_insert_block();
9221 BB_then_linear->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9222 BB_then_linear->kind |= block_kind_uniform;
9223 add_linear_edge(ic->BB_if_idx, BB_then_linear);
9224 /* branch from linear then block to invert block */
9225 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9226 BB_then_linear->instructions.emplace_back(std::move(branch));
9227 add_linear_edge(BB_then_linear->index, &ic->BB_invert);
9228
9229 /** emit invert merge block */
9230 ctx->block = ctx->program->insert_block(std::move(ic->BB_invert));
9231 ic->invert_idx = ctx->block->index;
9232
9233 /* branch to linear else block (skip else) */
9234 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_nz, Format::PSEUDO_BRANCH, 1, 0));
9235 branch->operands[0] = Operand(ic->cond);
9236 ctx->block->instructions.push_back(std::move(branch));
9237
9238 ic->exec_potentially_empty_discard_old |= ctx->cf_info.exec_potentially_empty_discard;
9239 ic->exec_potentially_empty_break_old |= ctx->cf_info.exec_potentially_empty_break;
9240 ic->exec_potentially_empty_break_depth_old =
9241 std::min(ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
9242 /* divergent branches use cbranch_execz */
9243 ctx->cf_info.exec_potentially_empty_discard = false;
9244 ctx->cf_info.exec_potentially_empty_break = false;
9245 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9246
9247 /** emit logical else block */
9248 Block* BB_else_logical = ctx->program->create_and_insert_block();
9249 BB_else_logical->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9250 add_logical_edge(ic->BB_if_idx, BB_else_logical);
9251 add_linear_edge(ic->invert_idx, BB_else_logical);
9252 ctx->block = BB_else_logical;
9253 append_logical_start(BB_else_logical);
9254 }
9255
9256 static void end_divergent_if(isel_context *ctx, if_context *ic)
9257 {
9258 Block *BB_else_logical = ctx->block;
9259 append_logical_end(BB_else_logical);
9260
9261 /* branch from logical else block to endif block */
9262 aco_ptr<Pseudo_branch_instruction> branch;
9263 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9264 BB_else_logical->instructions.emplace_back(std::move(branch));
9265 add_linear_edge(BB_else_logical->index, &ic->BB_endif);
9266 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9267 add_logical_edge(BB_else_logical->index, &ic->BB_endif);
9268 BB_else_logical->kind |= block_kind_uniform;
9269
9270 assert(!ctx->cf_info.has_branch);
9271 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
9272
9273
9274 /** emit linear else block */
9275 Block* BB_else_linear = ctx->program->create_and_insert_block();
9276 BB_else_linear->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9277 BB_else_linear->kind |= block_kind_uniform;
9278 add_linear_edge(ic->invert_idx, BB_else_linear);
9279
9280 /* branch from linear else block to endif block */
9281 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9282 BB_else_linear->instructions.emplace_back(std::move(branch));
9283 add_linear_edge(BB_else_linear->index, &ic->BB_endif);
9284
9285
9286 /** emit endif merge block */
9287 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
9288 append_logical_start(ctx->block);
9289
9290
9291 ctx->cf_info.parent_if.is_divergent = ic->divergent_old;
9292 ctx->cf_info.exec_potentially_empty_discard |= ic->exec_potentially_empty_discard_old;
9293 ctx->cf_info.exec_potentially_empty_break |= ic->exec_potentially_empty_break_old;
9294 ctx->cf_info.exec_potentially_empty_break_depth =
9295 std::min(ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
9296 if (ctx->cf_info.loop_nest_depth == ctx->cf_info.exec_potentially_empty_break_depth &&
9297 !ctx->cf_info.parent_if.is_divergent) {
9298 ctx->cf_info.exec_potentially_empty_break = false;
9299 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9300 }
9301 /* uniform control flow never has an empty exec-mask */
9302 if (!ctx->cf_info.loop_nest_depth && !ctx->cf_info.parent_if.is_divergent) {
9303 ctx->cf_info.exec_potentially_empty_discard = false;
9304 ctx->cf_info.exec_potentially_empty_break = false;
9305 ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
9306 }
9307 }
9308
9309 static void begin_uniform_if_then(isel_context *ctx, if_context *ic, Temp cond)
9310 {
9311 assert(cond.regClass() == s1);
9312
9313 append_logical_end(ctx->block);
9314 ctx->block->kind |= block_kind_uniform;
9315
9316 aco_ptr<Pseudo_branch_instruction> branch;
9317 aco_opcode branch_opcode = aco_opcode::p_cbranch_z;
9318 branch.reset(create_instruction<Pseudo_branch_instruction>(branch_opcode, Format::PSEUDO_BRANCH, 1, 0));
9319 branch->operands[0] = Operand(cond);
9320 branch->operands[0].setFixed(scc);
9321 ctx->block->instructions.emplace_back(std::move(branch));
9322
9323 ic->BB_if_idx = ctx->block->index;
9324 ic->BB_endif = Block();
9325 ic->BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth;
9326 ic->BB_endif.kind |= ctx->block->kind & block_kind_top_level;
9327
9328 ctx->cf_info.has_branch = false;
9329 ctx->cf_info.parent_loop.has_divergent_branch = false;
9330
9331 /** emit then block */
9332 Block* BB_then = ctx->program->create_and_insert_block();
9333 BB_then->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9334 add_edge(ic->BB_if_idx, BB_then);
9335 append_logical_start(BB_then);
9336 ctx->block = BB_then;
9337 }
9338
9339 static void begin_uniform_if_else(isel_context *ctx, if_context *ic)
9340 {
9341 Block *BB_then = ctx->block;
9342
9343 ic->uniform_has_then_branch = ctx->cf_info.has_branch;
9344 ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
9345
9346 if (!ic->uniform_has_then_branch) {
9347 append_logical_end(BB_then);
9348 /* branch from then block to endif block */
9349 aco_ptr<Pseudo_branch_instruction> branch;
9350 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9351 BB_then->instructions.emplace_back(std::move(branch));
9352 add_linear_edge(BB_then->index, &ic->BB_endif);
9353 if (!ic->then_branch_divergent)
9354 add_logical_edge(BB_then->index, &ic->BB_endif);
9355 BB_then->kind |= block_kind_uniform;
9356 }
9357
9358 ctx->cf_info.has_branch = false;
9359 ctx->cf_info.parent_loop.has_divergent_branch = false;
9360
9361 /** emit else block */
9362 Block* BB_else = ctx->program->create_and_insert_block();
9363 BB_else->loop_nest_depth = ctx->cf_info.loop_nest_depth;
9364 add_edge(ic->BB_if_idx, BB_else);
9365 append_logical_start(BB_else);
9366 ctx->block = BB_else;
9367 }
9368
9369 static void end_uniform_if(isel_context *ctx, if_context *ic)
9370 {
9371 Block *BB_else = ctx->block;
9372
9373 if (!ctx->cf_info.has_branch) {
9374 append_logical_end(BB_else);
9375 /* branch from then block to endif block */
9376 aco_ptr<Pseudo_branch_instruction> branch;
9377 branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
9378 BB_else->instructions.emplace_back(std::move(branch));
9379 add_linear_edge(BB_else->index, &ic->BB_endif);
9380 if (!ctx->cf_info.parent_loop.has_divergent_branch)
9381 add_logical_edge(BB_else->index, &ic->BB_endif);
9382 BB_else->kind |= block_kind_uniform;
9383 }
9384
9385 ctx->cf_info.has_branch &= ic->uniform_has_then_branch;
9386 ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
9387
9388 /** emit endif merge block */
9389 if (!ctx->cf_info.has_branch) {
9390 ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
9391 append_logical_start(ctx->block);
9392 }
9393 }
9394
9395 static bool visit_if(isel_context *ctx, nir_if *if_stmt)
9396 {
9397 Temp cond = get_ssa_temp(ctx, if_stmt->condition.ssa);
9398 Builder bld(ctx->program, ctx->block);
9399 aco_ptr<Pseudo_branch_instruction> branch;
9400 if_context ic;
9401
9402 if (!ctx->divergent_vals[if_stmt->condition.ssa->index]) { /* uniform condition */
9403 /**
9404 * Uniform conditionals are represented in the following way*) :
9405 *
9406 * The linear and logical CFG:
9407 * BB_IF
9408 * / \
9409 * BB_THEN (logical) BB_ELSE (logical)
9410 * \ /
9411 * BB_ENDIF
9412 *
9413 * *) Exceptions may be due to break and continue statements within loops
9414 * If a break/continue happens within uniform control flow, it branches
9415 * to the loop exit/entry block. Otherwise, it branches to the next
9416 * merge block.
9417 **/
9418
9419 // TODO: in a post-RA optimizer, we could check if the condition is in VCC and omit this instruction
9420 assert(cond.regClass() == ctx->program->lane_mask);
9421 cond = bool_to_scalar_condition(ctx, cond);
9422
9423 begin_uniform_if_then(ctx, &ic, cond);
9424 visit_cf_list(ctx, &if_stmt->then_list);
9425
9426 begin_uniform_if_else(ctx, &ic);
9427 visit_cf_list(ctx, &if_stmt->else_list);
9428
9429 end_uniform_if(ctx, &ic);
9430
9431 return !ctx->cf_info.has_branch;
9432 } else { /* non-uniform condition */
9433 /**
9434 * To maintain a logical and linear CFG without critical edges,
9435 * non-uniform conditionals are represented in the following way*) :
9436 *
9437 * The linear CFG:
9438 * BB_IF
9439 * / \
9440 * BB_THEN (logical) BB_THEN (linear)
9441 * \ /
9442 * BB_INVERT (linear)
9443 * / \
9444 * BB_ELSE (logical) BB_ELSE (linear)
9445 * \ /
9446 * BB_ENDIF
9447 *
9448 * The logical CFG:
9449 * BB_IF
9450 * / \
9451 * BB_THEN (logical) BB_ELSE (logical)
9452 * \ /
9453 * BB_ENDIF
9454 *
9455 * *) Exceptions may be due to break and continue statements within loops
9456 **/
9457
9458 begin_divergent_if_then(ctx, &ic, cond);
9459 visit_cf_list(ctx, &if_stmt->then_list);
9460
9461 begin_divergent_if_else(ctx, &ic);
9462 visit_cf_list(ctx, &if_stmt->else_list);
9463
9464 end_divergent_if(ctx, &ic);
9465
9466 return true;
9467 }
9468 }
9469
9470 static bool visit_cf_list(isel_context *ctx,
9471 struct exec_list *list)
9472 {
9473 foreach_list_typed(nir_cf_node, node, node, list) {
9474 switch (node->type) {
9475 case nir_cf_node_block:
9476 visit_block(ctx, nir_cf_node_as_block(node));
9477 break;
9478 case nir_cf_node_if:
9479 if (!visit_if(ctx, nir_cf_node_as_if(node)))
9480 return true;
9481 break;
9482 case nir_cf_node_loop:
9483 visit_loop(ctx, nir_cf_node_as_loop(node));
9484 break;
9485 default:
9486 unreachable("unimplemented cf list type");
9487 }
9488 }
9489 return false;
9490 }
9491
9492 static void create_null_export(isel_context *ctx)
9493 {
9494 /* Some shader stages always need to have exports.
9495 * So when there is none, we need to add a null export.
9496 */
9497
9498 unsigned dest = (ctx->program->stage & hw_fs) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
9499 bool vm = (ctx->program->stage & hw_fs) || ctx->program->chip_class >= GFX10;
9500 Builder bld(ctx->program, ctx->block);
9501 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
9502 /* enabled_mask */ 0, dest, /* compr */ false, /* done */ true, vm);
9503 }
9504
9505 static bool export_vs_varying(isel_context *ctx, int slot, bool is_pos, int *next_pos)
9506 {
9507 assert(ctx->stage == vertex_vs ||
9508 ctx->stage == tess_eval_vs ||
9509 ctx->stage == gs_copy_vs);
9510
9511 int offset = ctx->stage == tess_eval_vs
9512 ? ctx->program->info->tes.outinfo.vs_output_param_offset[slot]
9513 : ctx->program->info->vs.outinfo.vs_output_param_offset[slot];
9514 uint64_t mask = ctx->outputs.mask[slot];
9515 if (!is_pos && !mask)
9516 return false;
9517 if (!is_pos && offset == AC_EXP_PARAM_UNDEFINED)
9518 return false;
9519 aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
9520 exp->enabled_mask = mask;
9521 for (unsigned i = 0; i < 4; ++i) {
9522 if (mask & (1 << i))
9523 exp->operands[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
9524 else
9525 exp->operands[i] = Operand(v1);
9526 }
9527 /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
9528 * Setting valid_mask=1 prevents it and has no other effect.
9529 */
9530 exp->valid_mask = ctx->options->chip_class >= GFX10 && is_pos && *next_pos == 0;
9531 exp->done = false;
9532 exp->compressed = false;
9533 if (is_pos)
9534 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
9535 else
9536 exp->dest = V_008DFC_SQ_EXP_PARAM + offset;
9537 ctx->block->instructions.emplace_back(std::move(exp));
9538
9539 return true;
9540 }
9541
9542 static void export_vs_psiz_layer_viewport(isel_context *ctx, int *next_pos)
9543 {
9544 aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
9545 exp->enabled_mask = 0;
9546 for (unsigned i = 0; i < 4; ++i)
9547 exp->operands[i] = Operand(v1);
9548 if (ctx->outputs.mask[VARYING_SLOT_PSIZ]) {
9549 exp->operands[0] = Operand(ctx->outputs.temps[VARYING_SLOT_PSIZ * 4u]);
9550 exp->enabled_mask |= 0x1;
9551 }
9552 if (ctx->outputs.mask[VARYING_SLOT_LAYER]) {
9553 exp->operands[2] = Operand(ctx->outputs.temps[VARYING_SLOT_LAYER * 4u]);
9554 exp->enabled_mask |= 0x4;
9555 }
9556 if (ctx->outputs.mask[VARYING_SLOT_VIEWPORT]) {
9557 if (ctx->options->chip_class < GFX9) {
9558 exp->operands[3] = Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]);
9559 exp->enabled_mask |= 0x8;
9560 } else {
9561 Builder bld(ctx->program, ctx->block);
9562
9563 Temp out = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u),
9564 Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]));
9565 if (exp->operands[2].isTemp())
9566 out = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(out), exp->operands[2]);
9567
9568 exp->operands[2] = Operand(out);
9569 exp->enabled_mask |= 0x4;
9570 }
9571 }
9572 exp->valid_mask = ctx->options->chip_class >= GFX10 && *next_pos == 0;
9573 exp->done = false;
9574 exp->compressed = false;
9575 exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
9576 ctx->block->instructions.emplace_back(std::move(exp));
9577 }
9578
9579 static void create_vs_exports(isel_context *ctx)
9580 {
9581 assert(ctx->stage == vertex_vs ||
9582 ctx->stage == tess_eval_vs ||
9583 ctx->stage == gs_copy_vs);
9584
9585 radv_vs_output_info *outinfo = ctx->stage == tess_eval_vs
9586 ? &ctx->program->info->tes.outinfo
9587 : &ctx->program->info->vs.outinfo;
9588
9589 if (outinfo->export_prim_id) {
9590 ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1;
9591 ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = get_arg(ctx, ctx->args->vs_prim_id);
9592 }
9593
9594 if (ctx->options->key.has_multiview_view_index) {
9595 ctx->outputs.mask[VARYING_SLOT_LAYER] |= 0x1;
9596 ctx->outputs.temps[VARYING_SLOT_LAYER * 4u] = as_vgpr(ctx, get_arg(ctx, ctx->args->ac.view_index));
9597 }
9598
9599 /* the order these position exports are created is important */
9600 int next_pos = 0;
9601 bool exported_pos = export_vs_varying(ctx, VARYING_SLOT_POS, true, &next_pos);
9602 if (outinfo->writes_pointsize || outinfo->writes_layer || outinfo->writes_viewport_index) {
9603 export_vs_psiz_layer_viewport(ctx, &next_pos);
9604 exported_pos = true;
9605 }
9606 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
9607 exported_pos |= export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, true, &next_pos);
9608 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
9609 exported_pos |= export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, true, &next_pos);
9610
9611 if (ctx->export_clip_dists) {
9612 if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
9613 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, false, &next_pos);
9614 if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
9615 export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, false, &next_pos);
9616 }
9617
9618 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
9619 if (i < VARYING_SLOT_VAR0 && i != VARYING_SLOT_LAYER &&
9620 i != VARYING_SLOT_PRIMITIVE_ID)
9621 continue;
9622
9623 export_vs_varying(ctx, i, false, NULL);
9624 }
9625
9626 if (!exported_pos)
9627 create_null_export(ctx);
9628 }
9629
9630 static bool export_fs_mrt_z(isel_context *ctx)
9631 {
9632 Builder bld(ctx->program, ctx->block);
9633 unsigned enabled_channels = 0;
9634 bool compr = false;
9635 Operand values[4];
9636
9637 for (unsigned i = 0; i < 4; ++i) {
9638 values[i] = Operand(v1);
9639 }
9640
9641 /* Both stencil and sample mask only need 16-bits. */
9642 if (!ctx->program->info->ps.writes_z &&
9643 (ctx->program->info->ps.writes_stencil ||
9644 ctx->program->info->ps.writes_sample_mask)) {
9645 compr = true; /* COMPR flag */
9646
9647 if (ctx->program->info->ps.writes_stencil) {
9648 /* Stencil should be in X[23:16]. */
9649 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
9650 values[0] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u), values[0]);
9651 enabled_channels |= 0x3;
9652 }
9653
9654 if (ctx->program->info->ps.writes_sample_mask) {
9655 /* SampleMask should be in Y[15:0]. */
9656 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
9657 enabled_channels |= 0xc;
9658 }
9659 } else {
9660 if (ctx->program->info->ps.writes_z) {
9661 values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_DEPTH * 4u]);
9662 enabled_channels |= 0x1;
9663 }
9664
9665 if (ctx->program->info->ps.writes_stencil) {
9666 values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
9667 enabled_channels |= 0x2;
9668 }
9669
9670 if (ctx->program->info->ps.writes_sample_mask) {
9671 values[2] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
9672 enabled_channels |= 0x4;
9673 }
9674 }
9675
9676 /* GFX6 (except OLAND and HAINAN) has a bug that it only looks at the X
9677 * writemask component.
9678 */
9679 if (ctx->options->chip_class == GFX6 &&
9680 ctx->options->family != CHIP_OLAND &&
9681 ctx->options->family != CHIP_HAINAN) {
9682 enabled_channels |= 0x1;
9683 }
9684
9685 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3],
9686 enabled_channels, V_008DFC_SQ_EXP_MRTZ, compr);
9687
9688 return true;
9689 }
9690
9691 static bool export_fs_mrt_color(isel_context *ctx, int slot)
9692 {
9693 Builder bld(ctx->program, ctx->block);
9694 unsigned write_mask = ctx->outputs.mask[slot];
9695 Operand values[4];
9696
9697 for (unsigned i = 0; i < 4; ++i) {
9698 if (write_mask & (1 << i)) {
9699 values[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
9700 } else {
9701 values[i] = Operand(v1);
9702 }
9703 }
9704
9705 unsigned target, col_format;
9706 unsigned enabled_channels = 0;
9707 aco_opcode compr_op = (aco_opcode)0;
9708
9709 slot -= FRAG_RESULT_DATA0;
9710 target = V_008DFC_SQ_EXP_MRT + slot;
9711 col_format = (ctx->options->key.fs.col_format >> (4 * slot)) & 0xf;
9712
9713 bool is_int8 = (ctx->options->key.fs.is_int8 >> slot) & 1;
9714 bool is_int10 = (ctx->options->key.fs.is_int10 >> slot) & 1;
9715
9716 switch (col_format)
9717 {
9718 case V_028714_SPI_SHADER_ZERO:
9719 enabled_channels = 0; /* writemask */
9720 target = V_008DFC_SQ_EXP_NULL;
9721 break;
9722
9723 case V_028714_SPI_SHADER_32_R:
9724 enabled_channels = 1;
9725 break;
9726
9727 case V_028714_SPI_SHADER_32_GR:
9728 enabled_channels = 0x3;
9729 break;
9730
9731 case V_028714_SPI_SHADER_32_AR:
9732 if (ctx->options->chip_class >= GFX10) {
9733 /* Special case: on GFX10, the outputs are different for 32_AR */
9734 enabled_channels = 0x3;
9735 values[1] = values[3];
9736 values[3] = Operand(v1);
9737 } else {
9738 enabled_channels = 0x9;
9739 }
9740 break;
9741
9742 case V_028714_SPI_SHADER_FP16_ABGR:
9743 enabled_channels = 0x5;
9744 compr_op = aco_opcode::v_cvt_pkrtz_f16_f32;
9745 break;
9746
9747 case V_028714_SPI_SHADER_UNORM16_ABGR:
9748 enabled_channels = 0x5;
9749 compr_op = aco_opcode::v_cvt_pknorm_u16_f32;
9750 break;
9751
9752 case V_028714_SPI_SHADER_SNORM16_ABGR:
9753 enabled_channels = 0x5;
9754 compr_op = aco_opcode::v_cvt_pknorm_i16_f32;
9755 break;
9756
9757 case V_028714_SPI_SHADER_UINT16_ABGR: {
9758 enabled_channels = 0x5;
9759 compr_op = aco_opcode::v_cvt_pk_u16_u32;
9760 if (is_int8 || is_int10) {
9761 /* clamp */
9762 uint32_t max_rgb = is_int8 ? 255 : is_int10 ? 1023 : 0;
9763 Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb));
9764
9765 for (unsigned i = 0; i < 4; i++) {
9766 if ((write_mask >> i) & 1) {
9767 values[i] = bld.vop2(aco_opcode::v_min_u32, bld.def(v1),
9768 i == 3 && is_int10 ? Operand(3u) : Operand(max_rgb_val),
9769 values[i]);
9770 }
9771 }
9772 }
9773 break;
9774 }
9775
9776 case V_028714_SPI_SHADER_SINT16_ABGR:
9777 enabled_channels = 0x5;
9778 compr_op = aco_opcode::v_cvt_pk_i16_i32;
9779 if (is_int8 || is_int10) {
9780 /* clamp */
9781 uint32_t max_rgb = is_int8 ? 127 : is_int10 ? 511 : 0;
9782 uint32_t min_rgb = is_int8 ? -128 :is_int10 ? -512 : 0;
9783 Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb));
9784 Temp min_rgb_val = bld.copy(bld.def(s1), Operand(min_rgb));
9785
9786 for (unsigned i = 0; i < 4; i++) {
9787 if ((write_mask >> i) & 1) {
9788 values[i] = bld.vop2(aco_opcode::v_min_i32, bld.def(v1),
9789 i == 3 && is_int10 ? Operand(1u) : Operand(max_rgb_val),
9790 values[i]);
9791 values[i] = bld.vop2(aco_opcode::v_max_i32, bld.def(v1),
9792 i == 3 && is_int10 ? Operand(-2u) : Operand(min_rgb_val),
9793 values[i]);
9794 }
9795 }
9796 }
9797 break;
9798
9799 case V_028714_SPI_SHADER_32_ABGR:
9800 enabled_channels = 0xF;
9801 break;
9802
9803 default:
9804 break;
9805 }
9806
9807 if (target == V_008DFC_SQ_EXP_NULL)
9808 return false;
9809
9810 if ((bool) compr_op) {
9811 for (int i = 0; i < 2; i++) {
9812 /* check if at least one of the values to be compressed is enabled */
9813 unsigned enabled = (write_mask >> (i*2) | write_mask >> (i*2+1)) & 0x1;
9814 if (enabled) {
9815 enabled_channels |= enabled << (i*2);
9816 values[i] = bld.vop3(compr_op, bld.def(v1),
9817 values[i*2].isUndefined() ? Operand(0u) : values[i*2],
9818 values[i*2+1].isUndefined() ? Operand(0u): values[i*2+1]);
9819 } else {
9820 values[i] = Operand(v1);
9821 }
9822 }
9823 values[2] = Operand(v1);
9824 values[3] = Operand(v1);
9825 } else {
9826 for (int i = 0; i < 4; i++)
9827 values[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1);
9828 }
9829
9830 bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3],
9831 enabled_channels, target, (bool) compr_op);
9832 return true;
9833 }
9834
9835 static void create_fs_exports(isel_context *ctx)
9836 {
9837 bool exported = false;
9838
9839 /* Export depth, stencil and sample mask. */
9840 if (ctx->outputs.mask[FRAG_RESULT_DEPTH] ||
9841 ctx->outputs.mask[FRAG_RESULT_STENCIL] ||
9842 ctx->outputs.mask[FRAG_RESULT_SAMPLE_MASK])
9843 exported |= export_fs_mrt_z(ctx);
9844
9845 /* Export all color render targets. */
9846 for (unsigned i = FRAG_RESULT_DATA0; i < FRAG_RESULT_DATA7 + 1; ++i)
9847 if (ctx->outputs.mask[i])
9848 exported |= export_fs_mrt_color(ctx, i);
9849
9850 if (!exported)
9851 create_null_export(ctx);
9852 }
9853
9854 static void write_tcs_tess_factors(isel_context *ctx)
9855 {
9856 unsigned outer_comps;
9857 unsigned inner_comps;
9858
9859 switch (ctx->args->options->key.tcs.primitive_mode) {
9860 case GL_ISOLINES:
9861 outer_comps = 2;
9862 inner_comps = 0;
9863 break;
9864 case GL_TRIANGLES:
9865 outer_comps = 3;
9866 inner_comps = 1;
9867 break;
9868 case GL_QUADS:
9869 outer_comps = 4;
9870 inner_comps = 2;
9871 break;
9872 default:
9873 return;
9874 }
9875
9876 Builder bld(ctx->program, ctx->block);
9877
9878 bld.barrier(aco_opcode::p_memory_barrier_shared);
9879 if (unlikely(ctx->program->chip_class != GFX6 && ctx->program->workgroup_size > ctx->program->wave_size))
9880 bld.sopp(aco_opcode::s_barrier);
9881
9882 Temp tcs_rel_ids = get_arg(ctx, ctx->args->ac.tcs_rel_ids);
9883 Temp invocation_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), tcs_rel_ids, Operand(8u), Operand(5u));
9884
9885 Temp invocation_id_is_zero = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), invocation_id);
9886 if_context ic_invocation_id_is_zero;
9887 begin_divergent_if_then(ctx, &ic_invocation_id_is_zero, invocation_id_is_zero);
9888 bld.reset(ctx->block);
9889
9890 Temp hs_ring_tess_factor = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_FACTOR * 16u));
9891
9892 std::pair<Temp, unsigned> lds_base = get_tcs_output_lds_offset(ctx);
9893 unsigned stride = inner_comps + outer_comps;
9894 unsigned lds_align = calculate_lds_alignment(ctx, lds_base.second);
9895 Temp tf_inner_vec;
9896 Temp tf_outer_vec;
9897 Temp out[6];
9898 assert(stride <= (sizeof(out) / sizeof(Temp)));
9899
9900 if (ctx->args->options->key.tcs.primitive_mode == GL_ISOLINES) {
9901 // LINES reversal
9902 tf_outer_vec = load_lds(ctx, 4, bld.tmp(v2), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_out_loc, lds_align);
9903 out[1] = emit_extract_vector(ctx, tf_outer_vec, 0, v1);
9904 out[0] = emit_extract_vector(ctx, tf_outer_vec, 1, v1);
9905 } else {
9906 tf_outer_vec = load_lds(ctx, 4, bld.tmp(RegClass(RegType::vgpr, outer_comps)), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_out_loc, lds_align);
9907 tf_inner_vec = load_lds(ctx, 4, bld.tmp(RegClass(RegType::vgpr, inner_comps)), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_in_loc, lds_align);
9908
9909 for (unsigned i = 0; i < outer_comps; ++i)
9910 out[i] = emit_extract_vector(ctx, tf_outer_vec, i, v1);
9911 for (unsigned i = 0; i < inner_comps; ++i)
9912 out[outer_comps + i] = emit_extract_vector(ctx, tf_inner_vec, i, v1);
9913 }
9914
9915 Temp rel_patch_id = get_tess_rel_patch_id(ctx);
9916 Temp tf_base = get_arg(ctx, ctx->args->tess_factor_offset);
9917 Temp byte_offset = bld.v_mul_imm(bld.def(v1), rel_patch_id, stride * 4u);
9918 unsigned tf_const_offset = 0;
9919
9920 if (ctx->program->chip_class <= GFX8) {
9921 Temp rel_patch_id_is_zero = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), rel_patch_id);
9922 if_context ic_rel_patch_id_is_zero;
9923 begin_divergent_if_then(ctx, &ic_rel_patch_id_is_zero, rel_patch_id_is_zero);
9924 bld.reset(ctx->block);
9925
9926 /* Store the dynamic HS control word. */
9927 Temp control_word = bld.copy(bld.def(v1), Operand(0x80000000u));
9928 bld.mubuf(aco_opcode::buffer_store_dword,
9929 /* SRSRC */ hs_ring_tess_factor, /* VADDR */ Operand(v1), /* SOFFSET */ tf_base, /* VDATA */ control_word,
9930 /* immediate OFFSET */ 0, /* OFFEN */ false, /* idxen*/ false, /* addr64 */ false,
9931 /* disable_wqm */ false, /* glc */ true);
9932 tf_const_offset += 4;
9933
9934 begin_divergent_if_else(ctx, &ic_rel_patch_id_is_zero);
9935 end_divergent_if(ctx, &ic_rel_patch_id_is_zero);
9936 bld.reset(ctx->block);
9937 }
9938
9939 assert(stride == 2 || stride == 4 || stride == 6);
9940 Temp tf_vec = create_vec_from_array(ctx, out, stride, RegType::vgpr, 4u);
9941 store_vmem_mubuf(ctx, tf_vec, hs_ring_tess_factor, byte_offset, tf_base, tf_const_offset, 4, (1 << stride) - 1, true, false);
9942
9943 /* Store to offchip for TES to read - only if TES reads them */
9944 if (ctx->args->options->key.tcs.tes_reads_tess_factors) {
9945 Temp hs_ring_tess_offchip = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
9946 Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
9947
9948 std::pair<Temp, unsigned> vmem_offs_outer = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, ctx->tcs_tess_lvl_out_loc);
9949 store_vmem_mubuf(ctx, tf_outer_vec, hs_ring_tess_offchip, vmem_offs_outer.first, oc_lds, vmem_offs_outer.second, 4, (1 << outer_comps) - 1, true, false);
9950
9951 if (likely(inner_comps)) {
9952 std::pair<Temp, unsigned> vmem_offs_inner = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, ctx->tcs_tess_lvl_in_loc);
9953 store_vmem_mubuf(ctx, tf_inner_vec, hs_ring_tess_offchip, vmem_offs_inner.first, oc_lds, vmem_offs_inner.second, 4, (1 << inner_comps) - 1, true, false);
9954 }
9955 }
9956
9957 begin_divergent_if_else(ctx, &ic_invocation_id_is_zero);
9958 end_divergent_if(ctx, &ic_invocation_id_is_zero);
9959 }
9960
9961 static void emit_stream_output(isel_context *ctx,
9962 Temp const *so_buffers,
9963 Temp const *so_write_offset,
9964 const struct radv_stream_output *output)
9965 {
9966 unsigned num_comps = util_bitcount(output->component_mask);
9967 unsigned writemask = (1 << num_comps) - 1;
9968 unsigned loc = output->location;
9969 unsigned buf = output->buffer;
9970
9971 assert(num_comps && num_comps <= 4);
9972 if (!num_comps || num_comps > 4)
9973 return;
9974
9975 unsigned start = ffs(output->component_mask) - 1;
9976
9977 Temp out[4];
9978 bool all_undef = true;
9979 assert(ctx->stage == vertex_vs || ctx->stage == gs_copy_vs);
9980 for (unsigned i = 0; i < num_comps; i++) {
9981 out[i] = ctx->outputs.temps[loc * 4 + start + i];
9982 all_undef = all_undef && !out[i].id();
9983 }
9984 if (all_undef)
9985 return;
9986
9987 while (writemask) {
9988 int start, count;
9989 u_bit_scan_consecutive_range(&writemask, &start, &count);
9990 if (count == 3 && ctx->options->chip_class == GFX6) {
9991 /* GFX6 doesn't support storing vec3, split it. */
9992 writemask |= 1u << (start + 2);
9993 count = 2;
9994 }
9995
9996 unsigned offset = output->offset + start * 4;
9997
9998 Temp write_data = {ctx->program->allocateId(), RegClass(RegType::vgpr, count)};
9999 aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
10000 for (int i = 0; i < count; ++i)
10001 vec->operands[i] = (ctx->outputs.mask[loc] & 1 << (start + i)) ? Operand(out[start + i]) : Operand(0u);
10002 vec->definitions[0] = Definition(write_data);
10003 ctx->block->instructions.emplace_back(std::move(vec));
10004
10005 aco_opcode opcode;
10006 switch (count) {
10007 case 1:
10008 opcode = aco_opcode::buffer_store_dword;
10009 break;
10010 case 2:
10011 opcode = aco_opcode::buffer_store_dwordx2;
10012 break;
10013 case 3:
10014 opcode = aco_opcode::buffer_store_dwordx3;
10015 break;
10016 case 4:
10017 opcode = aco_opcode::buffer_store_dwordx4;
10018 break;
10019 default:
10020 unreachable("Unsupported dword count.");
10021 }
10022
10023 aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(opcode, Format::MUBUF, 4, 0)};
10024 store->operands[0] = Operand(so_buffers[buf]);
10025 store->operands[1] = Operand(so_write_offset[buf]);
10026 store->operands[2] = Operand((uint32_t) 0);
10027 store->operands[3] = Operand(write_data);
10028 if (offset > 4095) {
10029 /* Don't think this can happen in RADV, but maybe GL? It's easy to do this anyway. */
10030 Builder bld(ctx->program, ctx->block);
10031 store->operands[0] = bld.vadd32(bld.def(v1), Operand(offset), Operand(so_write_offset[buf]));
10032 } else {
10033 store->offset = offset;
10034 }
10035 store->offen = true;
10036 store->glc = true;
10037 store->dlc = false;
10038 store->slc = true;
10039 store->can_reorder = true;
10040 ctx->block->instructions.emplace_back(std::move(store));
10041 }
10042 }
10043
10044 static void emit_streamout(isel_context *ctx, unsigned stream)
10045 {
10046 Builder bld(ctx->program, ctx->block);
10047
10048 Temp so_buffers[4];
10049 Temp buf_ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->streamout_buffers));
10050 for (unsigned i = 0; i < 4; i++) {
10051 unsigned stride = ctx->program->info->so.strides[i];
10052 if (!stride)
10053 continue;
10054
10055 Operand off = bld.copy(bld.def(s1), Operand(i * 16u));
10056 so_buffers[i] = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), buf_ptr, off);
10057 }
10058
10059 Temp so_vtx_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10060 get_arg(ctx, ctx->args->streamout_config), Operand(0x70010u));
10061
10062 Temp tid = emit_mbcnt(ctx, bld.def(v1));
10063
10064 Temp can_emit = bld.vopc(aco_opcode::v_cmp_gt_i32, bld.def(bld.lm), so_vtx_count, tid);
10065
10066 if_context ic;
10067 begin_divergent_if_then(ctx, &ic, can_emit);
10068
10069 bld.reset(ctx->block);
10070
10071 Temp so_write_index = bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->streamout_write_idx), tid);
10072
10073 Temp so_write_offset[4];
10074
10075 for (unsigned i = 0; i < 4; i++) {
10076 unsigned stride = ctx->program->info->so.strides[i];
10077 if (!stride)
10078 continue;
10079
10080 if (stride == 1) {
10081 Temp offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
10082 get_arg(ctx, ctx->args->streamout_write_idx),
10083 get_arg(ctx, ctx->args->streamout_offset[i]));
10084 Temp new_offset = bld.vadd32(bld.def(v1), offset, tid);
10085
10086 so_write_offset[i] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), new_offset);
10087 } else {
10088 Temp offset = bld.v_mul_imm(bld.def(v1), so_write_index, stride * 4u);
10089 Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(4u),
10090 get_arg(ctx, ctx->args->streamout_offset[i]));
10091 so_write_offset[i] = bld.vadd32(bld.def(v1), offset, offset2);
10092 }
10093 }
10094
10095 for (unsigned i = 0; i < ctx->program->info->so.num_outputs; i++) {
10096 struct radv_stream_output *output =
10097 &ctx->program->info->so.outputs[i];
10098 if (stream != output->stream)
10099 continue;
10100
10101 emit_stream_output(ctx, so_buffers, so_write_offset, output);
10102 }
10103
10104 begin_divergent_if_else(ctx, &ic);
10105 end_divergent_if(ctx, &ic);
10106 }
10107
10108 } /* end namespace */
10109
10110 void fix_ls_vgpr_init_bug(isel_context *ctx, Pseudo_instruction *startpgm)
10111 {
10112 assert(ctx->shader->info.stage == MESA_SHADER_VERTEX);
10113 Builder bld(ctx->program, ctx->block);
10114 constexpr unsigned hs_idx = 1u;
10115 Builder::Result hs_thread_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10116 get_arg(ctx, ctx->args->merged_wave_info),
10117 Operand((8u << 16) | (hs_idx * 8u)));
10118 Temp ls_has_nonzero_hs_threads = bool_to_vector_condition(ctx, hs_thread_count.def(1).getTemp());
10119
10120 /* If there are no HS threads, SPI mistakenly loads the LS VGPRs starting at VGPR 0. */
10121
10122 Temp instance_id = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10123 get_arg(ctx, ctx->args->rel_auto_id),
10124 get_arg(ctx, ctx->args->ac.instance_id),
10125 ls_has_nonzero_hs_threads);
10126 Temp rel_auto_id = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10127 get_arg(ctx, ctx->args->ac.tcs_rel_ids),
10128 get_arg(ctx, ctx->args->rel_auto_id),
10129 ls_has_nonzero_hs_threads);
10130 Temp vertex_id = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10131 get_arg(ctx, ctx->args->ac.tcs_patch_id),
10132 get_arg(ctx, ctx->args->ac.vertex_id),
10133 ls_has_nonzero_hs_threads);
10134
10135 ctx->arg_temps[ctx->args->ac.instance_id.arg_index] = instance_id;
10136 ctx->arg_temps[ctx->args->rel_auto_id.arg_index] = rel_auto_id;
10137 ctx->arg_temps[ctx->args->ac.vertex_id.arg_index] = vertex_id;
10138 }
10139
10140 void split_arguments(isel_context *ctx, Pseudo_instruction *startpgm)
10141 {
10142 /* Split all arguments except for the first (ring_offsets) and the last
10143 * (exec) so that the dead channels don't stay live throughout the program.
10144 */
10145 for (int i = 1; i < startpgm->definitions.size() - 1; i++) {
10146 if (startpgm->definitions[i].regClass().size() > 1) {
10147 emit_split_vector(ctx, startpgm->definitions[i].getTemp(),
10148 startpgm->definitions[i].regClass().size());
10149 }
10150 }
10151 }
10152
10153 void handle_bc_optimize(isel_context *ctx)
10154 {
10155 /* needed when SPI_PS_IN_CONTROL.BC_OPTIMIZE_DISABLE is set to 0 */
10156 Builder bld(ctx->program, ctx->block);
10157 uint32_t spi_ps_input_ena = ctx->program->config->spi_ps_input_ena;
10158 bool uses_center = G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena);
10159 bool uses_centroid = G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena);
10160 ctx->persp_centroid = get_arg(ctx, ctx->args->ac.persp_centroid);
10161 ctx->linear_centroid = get_arg(ctx, ctx->args->ac.linear_centroid);
10162 if (uses_center && uses_centroid) {
10163 Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)),
10164 get_arg(ctx, ctx->args->ac.prim_mask), Operand(0u));
10165
10166 if (G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena)) {
10167 Temp new_coord[2];
10168 for (unsigned i = 0; i < 2; i++) {
10169 Temp persp_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_centroid), i, v1);
10170 Temp persp_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_center), i, v1);
10171 new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10172 persp_centroid, persp_center, sel);
10173 }
10174 ctx->persp_centroid = bld.tmp(v2);
10175 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->persp_centroid),
10176 Operand(new_coord[0]), Operand(new_coord[1]));
10177 emit_split_vector(ctx, ctx->persp_centroid, 2);
10178 }
10179
10180 if (G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena)) {
10181 Temp new_coord[2];
10182 for (unsigned i = 0; i < 2; i++) {
10183 Temp linear_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_centroid), i, v1);
10184 Temp linear_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_center), i, v1);
10185 new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1),
10186 linear_centroid, linear_center, sel);
10187 }
10188 ctx->linear_centroid = bld.tmp(v2);
10189 bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->linear_centroid),
10190 Operand(new_coord[0]), Operand(new_coord[1]));
10191 emit_split_vector(ctx, ctx->linear_centroid, 2);
10192 }
10193 }
10194 }
10195
10196 void setup_fp_mode(isel_context *ctx, nir_shader *shader)
10197 {
10198 Program *program = ctx->program;
10199
10200 unsigned float_controls = shader->info.float_controls_execution_mode;
10201
10202 program->next_fp_mode.preserve_signed_zero_inf_nan32 =
10203 float_controls & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32;
10204 program->next_fp_mode.preserve_signed_zero_inf_nan16_64 =
10205 float_controls & (FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 |
10206 FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
10207
10208 program->next_fp_mode.must_flush_denorms32 =
10209 float_controls & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32;
10210 program->next_fp_mode.must_flush_denorms16_64 =
10211 float_controls & (FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 |
10212 FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
10213
10214 program->next_fp_mode.care_about_round32 =
10215 float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32);
10216
10217 program->next_fp_mode.care_about_round16_64 =
10218 float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 |
10219 FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
10220
10221 /* default to preserving fp16 and fp64 denorms, since it's free */
10222 if (program->next_fp_mode.must_flush_denorms16_64)
10223 program->next_fp_mode.denorm16_64 = 0;
10224 else
10225 program->next_fp_mode.denorm16_64 = fp_denorm_keep;
10226
10227 /* preserving fp32 denorms is expensive, so only do it if asked */
10228 if (float_controls & FLOAT_CONTROLS_DENORM_PRESERVE_FP32)
10229 program->next_fp_mode.denorm32 = fp_denorm_keep;
10230 else
10231 program->next_fp_mode.denorm32 = 0;
10232
10233 if (float_controls & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32)
10234 program->next_fp_mode.round32 = fp_round_tz;
10235 else
10236 program->next_fp_mode.round32 = fp_round_ne;
10237
10238 if (float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64))
10239 program->next_fp_mode.round16_64 = fp_round_tz;
10240 else
10241 program->next_fp_mode.round16_64 = fp_round_ne;
10242
10243 ctx->block->fp_mode = program->next_fp_mode;
10244 }
10245
10246 void cleanup_cfg(Program *program)
10247 {
10248 /* create linear_succs/logical_succs */
10249 for (Block& BB : program->blocks) {
10250 for (unsigned idx : BB.linear_preds)
10251 program->blocks[idx].linear_succs.emplace_back(BB.index);
10252 for (unsigned idx : BB.logical_preds)
10253 program->blocks[idx].logical_succs.emplace_back(BB.index);
10254 }
10255 }
10256
10257 void select_program(Program *program,
10258 unsigned shader_count,
10259 struct nir_shader *const *shaders,
10260 ac_shader_config* config,
10261 struct radv_shader_args *args)
10262 {
10263 isel_context ctx = setup_isel_context(program, shader_count, shaders, config, args, false);
10264 if_context ic_merged_wave_info;
10265
10266 for (unsigned i = 0; i < shader_count; i++) {
10267 nir_shader *nir = shaders[i];
10268 init_context(&ctx, nir);
10269
10270 setup_fp_mode(&ctx, nir);
10271
10272 if (!i) {
10273 /* needs to be after init_context() for FS */
10274 Pseudo_instruction *startpgm = add_startpgm(&ctx);
10275 append_logical_start(ctx.block);
10276
10277 if (unlikely(args->options->has_ls_vgpr_init_bug && ctx.stage == vertex_tess_control_hs))
10278 fix_ls_vgpr_init_bug(&ctx, startpgm);
10279
10280 split_arguments(&ctx, startpgm);
10281 }
10282
10283 /* In a merged VS+TCS HS, the VS implementation can be completely empty. */
10284 nir_function_impl *func = nir_shader_get_entrypoint(nir);
10285 bool empty_shader = nir_cf_list_is_empty_block(&func->body) &&
10286 ((nir->info.stage == MESA_SHADER_VERTEX &&
10287 (ctx.stage == vertex_tess_control_hs || ctx.stage == vertex_geometry_gs)) ||
10288 (nir->info.stage == MESA_SHADER_TESS_EVAL &&
10289 ctx.stage == tess_eval_geometry_gs));
10290
10291 bool check_merged_wave_info = ctx.tcs_in_out_eq ? i == 0 : (shader_count >= 2 && !empty_shader);
10292 bool endif_merged_wave_info = ctx.tcs_in_out_eq ? i == 1 : check_merged_wave_info;
10293 if (check_merged_wave_info) {
10294 Builder bld(ctx.program, ctx.block);
10295
10296 /* The s_bfm only cares about s0.u[5:0] so we don't need either s_bfe nor s_and here */
10297 Temp count = i == 0 ? get_arg(&ctx, args->merged_wave_info)
10298 : bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc),
10299 get_arg(&ctx, args->merged_wave_info), Operand(i * 8u));
10300
10301 Temp mask = bld.sop2(aco_opcode::s_bfm_b64, bld.def(s2), count, Operand(0u));
10302 Temp cond;
10303
10304 if (ctx.program->wave_size == 64) {
10305 /* Special case for 64 active invocations, because 64 doesn't work with s_bfm */
10306 Temp active_64 = bld.sopc(aco_opcode::s_bitcmp1_b32, bld.def(s1, scc), count, Operand(6u /* log2(64) */));
10307 cond = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(-1u), mask, bld.scc(active_64));
10308 } else {
10309 /* We use s_bfm_b64 (not _b32) which works with 32, but we need to extract the lower half of the register */
10310 cond = emit_extract_vector(&ctx, mask, 0, bld.lm);
10311 }
10312
10313 begin_divergent_if_then(&ctx, &ic_merged_wave_info, cond);
10314 }
10315
10316 if (i) {
10317 Builder bld(ctx.program, ctx.block);
10318
10319 bld.barrier(aco_opcode::p_memory_barrier_shared);
10320 bld.sopp(aco_opcode::s_barrier);
10321
10322 if (ctx.stage == vertex_geometry_gs || ctx.stage == tess_eval_geometry_gs) {
10323 ctx.gs_wave_id = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1, m0), bld.def(s1, scc), get_arg(&ctx, args->merged_wave_info), Operand((8u << 16) | 16u));
10324 }
10325 } else if (ctx.stage == geometry_gs)
10326 ctx.gs_wave_id = get_arg(&ctx, args->gs_wave_id);
10327
10328 if (ctx.stage == fragment_fs)
10329 handle_bc_optimize(&ctx);
10330
10331 visit_cf_list(&ctx, &func->body);
10332
10333 if (ctx.program->info->so.num_outputs && (ctx.stage == vertex_vs || ctx.stage == tess_eval_vs))
10334 emit_streamout(&ctx, 0);
10335
10336 if (ctx.stage == vertex_vs || ctx.stage == tess_eval_vs) {
10337 create_vs_exports(&ctx);
10338 } else if (nir->info.stage == MESA_SHADER_GEOMETRY) {
10339 Builder bld(ctx.program, ctx.block);
10340 bld.barrier(aco_opcode::p_memory_barrier_gs_data);
10341 bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx.gs_wave_id), -1, sendmsg_gs_done(false, false, 0));
10342 } else if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
10343 write_tcs_tess_factors(&ctx);
10344 }
10345
10346 if (ctx.stage == fragment_fs)
10347 create_fs_exports(&ctx);
10348
10349 if (endif_merged_wave_info) {
10350 begin_divergent_if_else(&ctx, &ic_merged_wave_info);
10351 end_divergent_if(&ctx, &ic_merged_wave_info);
10352 }
10353
10354 ralloc_free(ctx.divergent_vals);
10355
10356 if (i == 0 && ctx.stage == vertex_tess_control_hs && ctx.tcs_in_out_eq) {
10357 /* Outputs of the previous stage are inputs to the next stage */
10358 ctx.inputs = ctx.outputs;
10359 ctx.outputs = shader_io_state();
10360 }
10361 }
10362
10363 program->config->float_mode = program->blocks[0].fp_mode.val;
10364
10365 append_logical_end(ctx.block);
10366 ctx.block->kind |= block_kind_uniform | block_kind_export_end;
10367 Builder bld(ctx.program, ctx.block);
10368 if (ctx.program->wb_smem_l1_on_end)
10369 bld.smem(aco_opcode::s_dcache_wb, false);
10370 bld.sopp(aco_opcode::s_endpgm);
10371
10372 cleanup_cfg(program);
10373 }
10374
10375 void select_gs_copy_shader(Program *program, struct nir_shader *gs_shader,
10376 ac_shader_config* config,
10377 struct radv_shader_args *args)
10378 {
10379 isel_context ctx = setup_isel_context(program, 1, &gs_shader, config, args, true);
10380
10381 program->next_fp_mode.preserve_signed_zero_inf_nan32 = false;
10382 program->next_fp_mode.preserve_signed_zero_inf_nan16_64 = false;
10383 program->next_fp_mode.must_flush_denorms32 = false;
10384 program->next_fp_mode.must_flush_denorms16_64 = false;
10385 program->next_fp_mode.care_about_round32 = false;
10386 program->next_fp_mode.care_about_round16_64 = false;
10387 program->next_fp_mode.denorm16_64 = fp_denorm_keep;
10388 program->next_fp_mode.denorm32 = 0;
10389 program->next_fp_mode.round32 = fp_round_ne;
10390 program->next_fp_mode.round16_64 = fp_round_ne;
10391 ctx.block->fp_mode = program->next_fp_mode;
10392
10393 add_startpgm(&ctx);
10394 append_logical_start(ctx.block);
10395
10396 Builder bld(ctx.program, ctx.block);
10397
10398 Temp gsvs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), program->private_segment_buffer, Operand(RING_GSVS_VS * 16u));
10399
10400 Operand stream_id(0u);
10401 if (args->shader_info->so.num_outputs)
10402 stream_id = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
10403 get_arg(&ctx, ctx.args->streamout_config), Operand(0x20018u));
10404
10405 Temp vtx_offset = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), get_arg(&ctx, ctx.args->ac.vertex_id));
10406
10407 std::stack<Block> endif_blocks;
10408
10409 for (unsigned stream = 0; stream < 4; stream++) {
10410 if (stream_id.isConstant() && stream != stream_id.constantValue())
10411 continue;
10412
10413 unsigned num_components = args->shader_info->gs.num_stream_output_components[stream];
10414 if (stream > 0 && (!num_components || !args->shader_info->so.num_outputs))
10415 continue;
10416
10417 memset(ctx.outputs.mask, 0, sizeof(ctx.outputs.mask));
10418
10419 unsigned BB_if_idx = ctx.block->index;
10420 Block BB_endif = Block();
10421 if (!stream_id.isConstant()) {
10422 /* begin IF */
10423 Temp cond = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), stream_id, Operand(stream));
10424 append_logical_end(ctx.block);
10425 ctx.block->kind |= block_kind_uniform;
10426 bld.branch(aco_opcode::p_cbranch_z, cond);
10427
10428 BB_endif.kind |= ctx.block->kind & block_kind_top_level;
10429
10430 ctx.block = ctx.program->create_and_insert_block();
10431 add_edge(BB_if_idx, ctx.block);
10432 bld.reset(ctx.block);
10433 append_logical_start(ctx.block);
10434 }
10435
10436 unsigned offset = 0;
10437 for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
10438 if (args->shader_info->gs.output_streams[i] != stream)
10439 continue;
10440
10441 unsigned output_usage_mask = args->shader_info->gs.output_usage_mask[i];
10442 unsigned length = util_last_bit(output_usage_mask);
10443 for (unsigned j = 0; j < length; ++j) {
10444 if (!(output_usage_mask & (1 << j)))
10445 continue;
10446
10447 unsigned const_offset = offset * args->shader_info->gs.vertices_out * 16 * 4;
10448 Temp voffset = vtx_offset;
10449 if (const_offset >= 4096u) {
10450 voffset = bld.vadd32(bld.def(v1), Operand(const_offset / 4096u * 4096u), voffset);
10451 const_offset %= 4096u;
10452 }
10453
10454 aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(aco_opcode::buffer_load_dword, Format::MUBUF, 3, 1)};
10455 mubuf->definitions[0] = bld.def(v1);
10456 mubuf->operands[0] = Operand(gsvs_ring);
10457 mubuf->operands[1] = Operand(voffset);
10458 mubuf->operands[2] = Operand(0u);
10459 mubuf->offen = true;
10460 mubuf->offset = const_offset;
10461 mubuf->glc = true;
10462 mubuf->slc = true;
10463 mubuf->dlc = args->options->chip_class >= GFX10;
10464 mubuf->barrier = barrier_none;
10465 mubuf->can_reorder = true;
10466
10467 ctx.outputs.mask[i] |= 1 << j;
10468 ctx.outputs.temps[i * 4u + j] = mubuf->definitions[0].getTemp();
10469
10470 bld.insert(std::move(mubuf));
10471
10472 offset++;
10473 }
10474 }
10475
10476 if (args->shader_info->so.num_outputs) {
10477 emit_streamout(&ctx, stream);
10478 bld.reset(ctx.block);
10479 }
10480
10481 if (stream == 0) {
10482 create_vs_exports(&ctx);
10483 ctx.block->kind |= block_kind_export_end;
10484 }
10485
10486 if (!stream_id.isConstant()) {
10487 append_logical_end(ctx.block);
10488
10489 /* branch from then block to endif block */
10490 bld.branch(aco_opcode::p_branch);
10491 add_edge(ctx.block->index, &BB_endif);
10492 ctx.block->kind |= block_kind_uniform;
10493
10494 /* emit else block */
10495 ctx.block = ctx.program->create_and_insert_block();
10496 add_edge(BB_if_idx, ctx.block);
10497 bld.reset(ctx.block);
10498 append_logical_start(ctx.block);
10499
10500 endif_blocks.push(std::move(BB_endif));
10501 }
10502 }
10503
10504 while (!endif_blocks.empty()) {
10505 Block BB_endif = std::move(endif_blocks.top());
10506 endif_blocks.pop();
10507
10508 Block *BB_else = ctx.block;
10509
10510 append_logical_end(BB_else);
10511 /* branch from else block to endif block */
10512 bld.branch(aco_opcode::p_branch);
10513 add_edge(BB_else->index, &BB_endif);
10514 BB_else->kind |= block_kind_uniform;
10515
10516 /** emit endif merge block */
10517 ctx.block = program->insert_block(std::move(BB_endif));
10518 bld.reset(ctx.block);
10519 append_logical_start(ctx.block);
10520 }
10521
10522 program->config->float_mode = program->blocks[0].fp_mode.val;
10523
10524 append_logical_end(ctx.block);
10525 ctx.block->kind |= block_kind_uniform;
10526 bld.sopp(aco_opcode::s_endpgm);
10527
10528 cleanup_cfg(program);
10529 }
10530 }