2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
31 #include "aco_builder.h"
32 #include "util/u_math.h"
34 #include "vulkan/radv_shader.h"
39 struct lower_context
{
41 std::vector
<aco_ptr
<Instruction
>> instructions
;
44 aco_opcode
get_reduce_opcode(chip_class chip
, ReduceOp op
) {
46 case iadd32
: return chip
>= GFX9
? aco_opcode::v_add_u32
: aco_opcode::v_add_co_u32
;
47 case imul32
: return aco_opcode::v_mul_lo_u32
;
48 case fadd32
: return aco_opcode::v_add_f32
;
49 case fmul32
: return aco_opcode::v_mul_f32
;
50 case imax32
: return aco_opcode::v_max_i32
;
51 case imin32
: return aco_opcode::v_min_i32
;
52 case umin32
: return aco_opcode::v_min_u32
;
53 case umax32
: return aco_opcode::v_max_u32
;
54 case fmin32
: return aco_opcode::v_min_f32
;
55 case fmax32
: return aco_opcode::v_max_f32
;
56 case iand32
: return aco_opcode::v_and_b32
;
57 case ixor32
: return aco_opcode::v_xor_b32
;
58 case ior32
: return aco_opcode::v_or_b32
;
59 case iadd64
: return aco_opcode::num_opcodes
;
60 case imul64
: return aco_opcode::num_opcodes
;
61 case fadd64
: return aco_opcode::v_add_f64
;
62 case fmul64
: return aco_opcode::v_mul_f64
;
63 case imin64
: return aco_opcode::num_opcodes
;
64 case imax64
: return aco_opcode::num_opcodes
;
65 case umin64
: return aco_opcode::num_opcodes
;
66 case umax64
: return aco_opcode::num_opcodes
;
67 case fmin64
: return aco_opcode::v_min_f64
;
68 case fmax64
: return aco_opcode::v_max_f64
;
69 case iand64
: return aco_opcode::num_opcodes
;
70 case ior64
: return aco_opcode::num_opcodes
;
71 case ixor64
: return aco_opcode::num_opcodes
;
72 default: return aco_opcode::num_opcodes
;
76 void emit_vadd32(Builder
& bld
, Definition def
, Operand src0
, Operand src1
)
78 Instruction
*instr
= bld
.vadd32(def
, src0
, src1
, false, Operand(s2
), true);
79 if (instr
->definitions
.size() >= 2) {
80 assert(instr
->definitions
[1].regClass() == bld
.lm
);
81 instr
->definitions
[1].setFixed(vcc
);
85 void emit_int64_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
86 PhysReg vtmp_reg
, ReduceOp op
,
87 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
88 Operand
*identity
=NULL
)
90 Builder
bld(ctx
->program
, &ctx
->instructions
);
91 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
92 Definition vtmp_def
[] = {Definition(vtmp_reg
, v1
), Definition(PhysReg
{vtmp_reg
+1}, v1
)};
93 Operand src0
[] = {Operand(src0_reg
, v1
), Operand(PhysReg
{src0_reg
+1}, v1
)};
94 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
95 Operand src1_64
= Operand(src1_reg
, v2
);
96 Operand vtmp_op
[] = {Operand(vtmp_reg
, v1
), Operand(PhysReg
{vtmp_reg
+1}, v1
)};
97 Operand vtmp_op64
= Operand(vtmp_reg
, v2
);
99 if (ctx
->program
->chip_class
>= GFX10
) {
101 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
102 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
103 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
104 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), vtmp_op
[0], src1
[0]);
106 bld
.vop2_dpp(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0],
107 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
109 bld
.vop2_dpp(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
),
110 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
111 } else if (op
== iand64
) {
112 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0],
113 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
114 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1],
115 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
116 } else if (op
== ior64
) {
117 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0],
118 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
119 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1],
120 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
121 } else if (op
== ixor64
) {
122 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0],
123 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
124 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1],
125 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
126 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
127 aco_opcode cmp
= aco_opcode::num_opcodes
;
130 cmp
= aco_opcode::v_cmp_gt_u64
;
133 cmp
= aco_opcode::v_cmp_lt_u64
;
136 cmp
= aco_opcode::v_cmp_gt_i64
;
139 cmp
= aco_opcode::v_cmp_lt_i64
;
146 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
147 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[1], identity
[1]);
149 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
150 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
151 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[1], src0
[1],
152 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
154 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), vtmp_op64
, src1_64
);
155 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], vtmp_op
[0], src1
[0], Operand(vcc
, bld
.lm
));
156 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], vtmp_op
[1], src1
[1], Operand(vcc
, bld
.lm
));
157 } else if (op
== imul64
) {
159 * t1 = umul_lo(t4, y_lo)
161 * t0 = umul_lo(t3, y_hi)
163 * t5 = umul_hi(t3, y_lo)
164 * res_hi = iadd(t2, t5)
165 * res_lo = umul_lo(t3, y_lo)
166 * Requires that res_hi != src0[0] and res_hi != src1[0]
167 * and that vtmp[0] != res_hi.
170 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[1]);
171 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[1],
172 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
173 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[1], vtmp_op
[0], src1
[0]);
175 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
176 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
177 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
178 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[0], vtmp_op
[0], src1
[1]);
179 emit_vadd32(bld
, vtmp_def
[1], vtmp_op
[0], vtmp_op
[1]);
181 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
182 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
183 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
184 bld
.vop3(aco_opcode::v_mul_hi_u32
, vtmp_def
[0], vtmp_op
[0], src1
[0]);
185 emit_vadd32(bld
, dst
[1], vtmp_op
[1], vtmp_op
[0]);
187 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
188 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
189 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
190 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], vtmp_op
[0], src1
[0]);
194 void emit_int64_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
, PhysReg vtmp
, ReduceOp op
)
196 Builder
bld(ctx
->program
, &ctx
->instructions
);
197 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
198 RegClass src0_rc
= src0_reg
.reg() >= 256 ? v1
: s1
;
199 Operand src0
[] = {Operand(src0_reg
, src0_rc
), Operand(PhysReg
{src0_reg
+1}, src0_rc
)};
200 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
201 Operand src0_64
= Operand(src0_reg
, src0_reg
.reg() >= 256 ? v2
: s2
);
202 Operand src1_64
= Operand(src1_reg
, v2
);
205 (op
== imul64
|| op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
)) {
206 assert(vtmp
.reg() != 0);
207 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), src0
[0]);
208 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
210 src0
[0] = Operand(vtmp
, v1
);
211 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
212 src0_64
= Operand(vtmp
, v2
);
213 } else if (src0_rc
== s1
&& op
== iadd64
) {
214 assert(vtmp
.reg() != 0);
215 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
216 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
220 if (ctx
->program
->chip_class
>= GFX10
) {
221 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
223 bld
.vop2(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
225 bld
.vop2(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
226 } else if (op
== iand64
) {
227 bld
.vop2(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0]);
228 bld
.vop2(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1]);
229 } else if (op
== ior64
) {
230 bld
.vop2(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0]);
231 bld
.vop2(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1]);
232 } else if (op
== ixor64
) {
233 bld
.vop2(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0]);
234 bld
.vop2(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1]);
235 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
236 aco_opcode cmp
= aco_opcode::num_opcodes
;
239 cmp
= aco_opcode::v_cmp_gt_u64
;
242 cmp
= aco_opcode::v_cmp_lt_u64
;
245 cmp
= aco_opcode::v_cmp_gt_i64
;
248 cmp
= aco_opcode::v_cmp_lt_i64
;
254 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), src0_64
, src1_64
);
255 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], src0
[0], src1
[0], Operand(vcc
, bld
.lm
));
256 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
257 } else if (op
== imul64
) {
258 if (src1_reg
== dst_reg
) {
259 /* it's fine if src0==dst but not if src1==dst */
260 std::swap(src0_reg
, src1_reg
);
261 std::swap(src0
[0], src1
[0]);
262 std::swap(src0
[1], src1
[1]);
263 std::swap(src0_64
, src1_64
);
265 assert(!(src0_reg
== src1_reg
));
266 /* t1 = umul_lo(x_hi, y_lo)
267 * t0 = umul_lo(x_lo, y_hi)
269 * t5 = umul_hi(x_lo, y_lo)
270 * res_hi = iadd(t2, t5)
271 * res_lo = umul_lo(x_lo, y_lo)
272 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
274 Definition
tmp0_def(PhysReg
{src0_reg
+1}, v1
);
275 Definition
tmp1_def(PhysReg
{src1_reg
+1}, v1
);
276 Operand tmp0_op
= src0
[1];
277 Operand tmp1_op
= src1
[1];
278 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp0_def
, src0
[1], src1
[0]);
279 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp1_def
, src0
[0], src1
[1]);
280 emit_vadd32(bld
, tmp0_def
, tmp1_op
, tmp0_op
);
281 bld
.vop3(aco_opcode::v_mul_hi_u32
, tmp1_def
, src0
[0], src1
[0]);
282 emit_vadd32(bld
, dst
[1], tmp0_op
, tmp1_op
);
283 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], src0
[0], src1
[0]);
287 void emit_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
288 PhysReg vtmp
, ReduceOp op
, unsigned size
,
289 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
290 Operand
*identity
=NULL
) /* for VOP3 with sparse writes */
292 Builder
bld(ctx
->program
, &ctx
->instructions
);
293 RegClass rc
= RegClass(RegType::vgpr
, size
);
294 Definition
dst(dst_reg
, rc
);
295 Operand
src0(src0_reg
, rc
);
296 Operand
src1(src1_reg
, rc
);
298 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
299 bool vop3
= op
== imul32
|| size
== 2;
302 if (opcode
== aco_opcode::v_add_co_u32
)
303 bld
.vop2_dpp(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
305 bld
.vop2_dpp(opcode
, dst
, src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
309 if (opcode
== aco_opcode::num_opcodes
) {
310 emit_int64_dpp_op(ctx
, dst_reg
,src0_reg
, src1_reg
, vtmp
, op
,
311 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
, identity
);
316 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), identity
[0]);
317 if (identity
&& size
>= 2)
318 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), identity
[1]);
320 for (unsigned i
= 0; i
< size
; i
++)
321 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{src0_reg
+i
}, v1
),
322 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
324 bld
.vop3(opcode
, dst
, Operand(vtmp
, rc
), src1
);
327 void emit_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
328 PhysReg vtmp
, ReduceOp op
, unsigned size
)
330 Builder
bld(ctx
->program
, &ctx
->instructions
);
331 RegClass rc
= RegClass(RegType::vgpr
, size
);
332 Definition
dst(dst_reg
, rc
);
333 Operand
src0(src0_reg
, RegClass(src0_reg
.reg() >= 256 ? RegType::vgpr
: RegType::sgpr
, size
));
334 Operand
src1(src1_reg
, rc
);
336 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
337 bool vop3
= op
== imul32
|| size
== 2;
339 if (opcode
== aco_opcode::num_opcodes
) {
340 emit_int64_op(ctx
, dst_reg
, src0_reg
, src1_reg
, vtmp
, op
);
345 bld
.vop3(opcode
, dst
, src0
, src1
);
346 } else if (opcode
== aco_opcode::v_add_co_u32
) {
347 bld
.vop2(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
);
349 bld
.vop2(opcode
, dst
, src0
, src1
);
353 void emit_dpp_mov(lower_context
*ctx
, PhysReg dst
, PhysReg src0
, unsigned size
,
354 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
)
356 Builder
bld(ctx
->program
, &ctx
->instructions
);
357 for (unsigned i
= 0; i
< size
; i
++) {
358 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
+i
}, v1
), Operand(PhysReg
{src0
+i
}, v1
),
359 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
363 uint32_t get_reduction_identity(ReduceOp op
, unsigned idx
)
381 return 0x3f800000u
; /* 1.0 */
383 return idx
? 0x3ff00000u
: 0u; /* 1.0 */
387 return idx
? 0x7fffffffu
: 0xffffffffu
;
391 return idx
? 0x80000000u
: 0;
398 return 0x7f800000u
; /* infinity */
400 return idx
? 0x7ff00000u
: 0u; /* infinity */
402 return 0xff800000u
; /* negative infinity */
404 return idx
? 0xfff00000u
: 0u; /* negative infinity */
406 unreachable("Invalid reduction operation");
412 void emit_ds_swizzle(Builder bld
, PhysReg dst
, PhysReg src
, unsigned size
, unsigned ds_pattern
)
414 for (unsigned i
= 0; i
< size
; i
++) {
415 bld
.ds(aco_opcode::ds_swizzle_b32
, Definition(PhysReg
{dst
+i
}, v1
),
416 Operand(PhysReg
{src
+i
}, v1
), ds_pattern
);
420 void emit_reduction(lower_context
*ctx
, aco_opcode op
, ReduceOp reduce_op
, unsigned cluster_size
, PhysReg tmp
,
421 PhysReg stmp
, PhysReg vtmp
, PhysReg sitmp
, Operand src
, Definition dst
)
423 assert(cluster_size
== ctx
->program
->wave_size
|| op
== aco_opcode::p_reduce
);
424 assert(cluster_size
<= ctx
->program
->wave_size
);
426 Builder
bld(ctx
->program
, &ctx
->instructions
);
429 identity
[0] = Operand(get_reduction_identity(reduce_op
, 0));
430 identity
[1] = Operand(get_reduction_identity(reduce_op
, 1));
431 Operand vcndmask_identity
[2] = {identity
[0], identity
[1]};
433 /* First, copy the source to tmp and set inactive lanes to the identity */
434 bld
.sop1(Builder::s_or_saveexec
, Definition(stmp
, bld
.lm
), Definition(scc
, s1
), Definition(exec
, bld
.lm
), Operand(UINT64_MAX
), Operand(exec
, bld
.lm
));
436 for (unsigned i
= 0; i
< src
.size(); i
++) {
437 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
438 * except on GFX10, where v_writelane_b32 can take a literal. */
439 if (identity
[i
].isLiteral() && op
== aco_opcode::p_exclusive_scan
&& ctx
->program
->chip_class
< GFX10
) {
440 bld
.sop1(aco_opcode::s_mov_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), identity
[i
]);
441 identity
[i
] = Operand(PhysReg
{sitmp
+i
}, s1
);
443 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
444 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
445 } else if (identity
[i
].isLiteral()) {
446 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
447 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
451 for (unsigned i
= 0; i
< src
.size(); i
++) {
452 bld
.vop2_e64(aco_opcode::v_cndmask_b32
, Definition(PhysReg
{tmp
+ i
}, v1
),
453 vcndmask_identity
[i
], Operand(PhysReg
{src
.physReg() + i
}, v1
),
454 Operand(stmp
, bld
.lm
));
457 bool reduction_needs_last_op
= false;
459 case aco_opcode::p_reduce
:
460 if (cluster_size
== 1) break;
462 if (ctx
->program
->chip_class
<= GFX7
) {
463 reduction_needs_last_op
= true;
464 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
465 if (cluster_size
== 2) break;
466 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
467 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
468 if (cluster_size
== 4) break;
469 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
470 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
471 if (cluster_size
== 8) break;
472 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
473 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
474 if (cluster_size
== 16) break;
475 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
476 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
477 if (cluster_size
== 32) break;
478 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
479 for (unsigned i
= 0; i
< src
.size(); i
++)
480 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+ i
}, v1
), Operand(0u));
481 // TODO: it would be more effective to do the last reduction step on SALU
482 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
483 reduction_needs_last_op
= false;
487 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
488 if (cluster_size
== 2) break;
489 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
490 if (cluster_size
== 4) break;
491 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_half_mirror
, 0xf, 0xf, false);
492 if (cluster_size
== 8) break;
493 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_mirror
, 0xf, 0xf, false);
494 if (cluster_size
== 16) break;
496 if (ctx
->program
->chip_class
>= GFX10
) {
497 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
498 for (unsigned i
= 0; i
< src
.size(); i
++)
499 bld
.vop3(aco_opcode::v_permlanex16_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u), Operand(0u));
501 if (cluster_size
== 32) {
502 reduction_needs_last_op
= true;
506 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
507 for (unsigned i
= 0; i
< src
.size(); i
++)
508 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
509 // TODO: it would be more effective to do the last reduction step on SALU
510 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
514 if (cluster_size
== 32) {
515 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
516 reduction_needs_last_op
= true;
519 assert(cluster_size
== 64);
520 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast15
, 0xa, 0xf, false);
521 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast31
, 0xc, 0xf, false);
523 case aco_opcode::p_exclusive_scan
:
524 if (ctx
->program
->chip_class
>= GFX10
) { /* gfx10 doesn't support wf_sr1, so emulate it */
525 /* shift rows right */
526 emit_dpp_mov(ctx
, vtmp
, tmp
, src
.size(), dpp_row_sr(1), 0xf, 0xf, true);
528 /* fill in the gaps in rows 1 and 3 */
529 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10000u
));
530 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(0x10000u
));
531 for (unsigned i
= 0; i
< src
.size(); i
++) {
532 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
533 Definition(PhysReg
{vtmp
+i
}, v1
),
534 Operand(PhysReg
{tmp
+i
}, v1
),
535 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
536 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
538 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(UINT64_MAX
));
540 if (ctx
->program
->wave_size
== 64) {
541 /* fill in the gap in row 2 */
542 for (unsigned i
= 0; i
< src
.size(); i
++) {
543 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
544 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
547 std::swap(tmp
, vtmp
);
548 } else if (ctx
->program
->chip_class
>= GFX8
) {
549 emit_dpp_mov(ctx
, tmp
, tmp
, src
.size(), dpp_wf_sr1
, 0xf, 0xf, true);
551 // TODO: use LDS on CS with a single write and shifted read
552 /* wavefront shift_right by 1 on SI/CI */
553 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
554 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
555 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10101010u
));
556 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
557 for (unsigned i
= 0; i
< src
.size(); i
++)
558 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
560 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
561 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
562 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x01000100u
));
563 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
564 for (unsigned i
= 0; i
< src
.size(); i
++)
565 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
567 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
568 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
569 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(1u), Operand(16u));
570 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(1u), Operand(16u));
571 for (unsigned i
= 0; i
< src
.size(); i
++)
572 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
574 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
575 for (unsigned i
= 0; i
< src
.size(); i
++) {
576 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{vtmp
+i
}, v1
));
577 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
578 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
579 identity
[i
] = Operand(0u); /* prevent further uses of identity */
581 std::swap(tmp
, vtmp
);
584 for (unsigned i
= 0; i
< src
.size(); i
++) {
585 if (!identity
[i
].isConstant() || identity
[i
].constantValue()) { /* bound_ctrl should take care of this overwise */
586 if (ctx
->program
->chip_class
< GFX10
)
587 assert((identity
[i
].isConstant() && !identity
[i
].isLiteral()) || identity
[i
].physReg() == PhysReg
{sitmp
+i
});
588 bld
.writelane(Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{tmp
+i
}, v1
));
592 case aco_opcode::p_inclusive_scan
:
593 assert(cluster_size
== ctx
->program
->wave_size
);
594 if (ctx
->program
->chip_class
<= GFX7
) {
595 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
596 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xAAAAAAAAu
));
597 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
598 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
600 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
601 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
602 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xCCCCCCCCu
));
603 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
604 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
606 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
607 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
608 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xF0F0F0F0u
));
609 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
610 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
612 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
613 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
614 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xFF00FF00u
));
615 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
616 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
618 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
619 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
620 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
621 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
622 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
624 for (unsigned i
= 0; i
< src
.size(); i
++)
625 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
626 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
627 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
631 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
632 dpp_row_sr(1), 0xf, 0xf, false, identity
);
633 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
634 dpp_row_sr(2), 0xf, 0xf, false, identity
);
635 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
636 dpp_row_sr(4), 0xf, 0xf, false, identity
);
637 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
638 dpp_row_sr(8), 0xf, 0xf, false, identity
);
639 if (ctx
->program
->chip_class
>= GFX10
) {
640 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
641 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
642 for (unsigned i
= 0; i
< src
.size(); i
++) {
643 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
644 Definition(PhysReg
{vtmp
+i
}, v1
),
645 Operand(PhysReg
{tmp
+i
}, v1
),
646 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
647 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
649 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
651 if (ctx
->program
->wave_size
== 64) {
652 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
653 for (unsigned i
= 0; i
< src
.size(); i
++)
654 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
655 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
658 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
659 dpp_row_bcast15
, 0xa, 0xf, false, identity
);
660 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
661 dpp_row_bcast31
, 0xc, 0xf, false, identity
);
665 unreachable("Invalid reduction mode");
669 if (op
== aco_opcode::p_reduce
) {
670 if (reduction_needs_last_op
&& dst
.regClass().type() == RegType::vgpr
) {
671 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
672 emit_op(ctx
, dst
.physReg(), tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
676 if (reduction_needs_last_op
)
677 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
681 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
683 if (dst
.regClass().type() == RegType::sgpr
) {
684 for (unsigned k
= 0; k
< src
.size(); k
++) {
685 bld
.readlane(Definition(PhysReg
{dst
.physReg() + k
}, s1
),
686 Operand(PhysReg
{tmp
+ k
}, v1
), Operand(ctx
->program
->wave_size
- 1));
688 } else if (dst
.physReg() != tmp
) {
689 for (unsigned k
= 0; k
< src
.size(); k
++) {
690 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
.physReg() + k
}, v1
),
691 Operand(PhysReg
{tmp
+ k
}, v1
));
696 struct copy_operation
{
702 uint64_t is_used
= 0;
706 void split_copy(unsigned offset
, Definition
*def
, Operand
*op
, const copy_operation
& src
, bool ignore_uses
, unsigned max_size
)
708 PhysReg def_reg
= src
.def
.physReg();
709 PhysReg op_reg
= src
.op
.physReg();
710 def_reg
.reg_b
+= offset
;
711 op_reg
.reg_b
+= offset
;
713 max_size
= MIN2(max_size
, src
.def
.regClass().type() == RegType::vgpr
? 4 : 8);
715 /* make sure the size is a power of two and reg % bytes == 0 */
717 for (; bytes
<= max_size
; bytes
*= 2) {
718 unsigned next
= bytes
* 2u;
719 bool can_increase
= def_reg
.reg_b
% next
== 0 &&
720 offset
+ next
<= src
.bytes
&& next
<= max_size
;
721 if (!src
.op
.isConstant() && can_increase
)
722 can_increase
= op_reg
.reg_b
% next
== 0;
723 for (unsigned i
= 0; !ignore_uses
&& can_increase
&& (i
< bytes
); i
++)
724 can_increase
= (src
.uses
[offset
+ bytes
+ i
] == 0) == (src
.uses
[offset
] == 0);
729 RegClass def_cls
= bytes
% 4 == 0 ? RegClass(src
.def
.regClass().type(), bytes
/ 4u) :
730 RegClass(src
.def
.regClass().type(), bytes
).as_subdword();
731 *def
= Definition(src
.def
.tempId(), def_reg
, def_cls
);
732 if (src
.op
.isConstant()) {
733 assert(offset
== 0 || (offset
== 4 && src
.op
.bytes() == 8));
734 if (src
.op
.bytes() == 8 && bytes
== 4)
735 *op
= Operand(uint32_t(src
.op
.constantValue64() >> (offset
* 8u)));
739 RegClass op_cls
= bytes
% 4 == 0 ? RegClass(src
.op
.regClass().type(), bytes
/ 4u) :
740 RegClass(src
.op
.regClass().type(), bytes
).as_subdword();
741 *op
= Operand(op_reg
, op_cls
);
742 op
->setTemp(Temp(src
.op
.tempId(), op_cls
));
746 uint32_t get_intersection_mask(int a_start
, int a_size
,
747 int b_start
, int b_size
)
749 int intersection_start
= MAX2(b_start
- a_start
, 0);
750 int intersection_end
= MAX2(b_start
+ b_size
- a_start
, 0);
751 if (intersection_start
>= a_size
|| intersection_end
== 0)
754 uint32_t mask
= u_bit_consecutive(0, a_size
);
755 return u_bit_consecutive(intersection_start
, intersection_end
- intersection_start
) & mask
;
758 bool do_copy(lower_context
* ctx
, Builder
& bld
, const copy_operation
& copy
, bool *preserve_scc
)
760 bool did_copy
= false;
761 for (unsigned offset
= 0; offset
< copy
.bytes
;) {
762 if (copy
.uses
[offset
]) {
769 split_copy(offset
, &def
, &op
, copy
, false, 8);
771 if (def
.physReg() == scc
) {
772 bld
.sopc(aco_opcode::s_cmp_lg_i32
, def
, op
, Operand(0u));
773 *preserve_scc
= true;
774 } else if (def
.bytes() == 8 && def
.getTemp().type() == RegType::sgpr
) {
775 bld
.sop1(aco_opcode::s_mov_b64
, def
, Operand(op
.physReg(), s2
));
780 ctx
->program
->statistics
[statistic_copies
]++;
783 offset
+= def
.bytes();
788 void do_swap(lower_context
*ctx
, Builder
& bld
, const copy_operation
& copy
, bool preserve_scc
, Pseudo_instruction
*pi
)
792 if (copy
.bytes
== 3 && (copy
.def
.physReg().reg_b
% 4 <= 1) &&
793 (copy
.def
.physReg().reg_b
% 4) == (copy
.op
.physReg().reg_b
% 4)) {
794 /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */
795 PhysReg op
= copy
.op
.physReg();
796 PhysReg def
= copy
.def
.physReg();
801 tmp
.op
= Operand(op
, v1
);
802 tmp
.def
= Definition(def
, v1
);
804 memset(tmp
.uses
, 1, 4);
805 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
807 op
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
808 def
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
809 tmp
.op
= Operand(op
, v1b
);
810 tmp
.def
= Definition(def
, v1b
);
813 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
818 for (; offset
< copy
.bytes
;) {
821 split_copy(offset
, &def
, &op
, copy
, true, 8);
823 assert(op
.regClass() == def
.regClass());
824 Operand def_as_op
= Operand(def
.physReg(), def
.regClass());
825 Definition op_as_def
= Definition(op
.physReg(), op
.regClass());
826 if (ctx
->program
->chip_class
>= GFX9
&& def
.regClass() == v1
) {
827 bld
.vop1(aco_opcode::v_swap_b32
, def
, op_as_def
, op
, def_as_op
);
828 ctx
->program
->statistics
[statistic_copies
]++;
829 } else if (def
.regClass() == v1
) {
830 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
831 bld
.vop2(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
832 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
833 ctx
->program
->statistics
[statistic_copies
] += 3;
834 } else if (op
.physReg() == scc
|| def
.physReg() == scc
) {
835 /* we need to swap scc and another sgpr */
836 assert(!preserve_scc
);
838 PhysReg other
= op
.physReg() == scc
? def
.physReg() : op
.physReg();
840 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
841 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(other
, s1
), Operand(0u));
842 bld
.sop1(aco_opcode::s_mov_b32
, Definition(other
, s1
), Operand(pi
->scratch_sgpr
, s1
));
843 ctx
->program
->statistics
[statistic_copies
] += 3;
844 } else if (def
.regClass() == s1
) {
846 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), op
);
847 bld
.sop1(aco_opcode::s_mov_b32
, op_as_def
, def_as_op
);
848 bld
.sop1(aco_opcode::s_mov_b32
, def
, Operand(pi
->scratch_sgpr
, s1
));
850 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
851 bld
.sop2(aco_opcode::s_xor_b32
, def
, Definition(scc
, s1
), op
, def_as_op
);
852 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
854 ctx
->program
->statistics
[statistic_copies
] += 3;
855 } else if (def
.regClass() == s2
) {
857 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
858 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
859 bld
.sop2(aco_opcode::s_xor_b64
, def
, Definition(scc
, s1
), op
, def_as_op
);
860 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
862 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(pi
->scratch_sgpr
, s1
), Operand(0u));
863 ctx
->program
->statistics
[statistic_copies
] += 3;
864 } else if (ctx
->program
->chip_class
>= GFX9
&& def
.bytes() == 2 && def
.physReg().reg() == op
.physReg().reg()) {
865 aco_ptr
<VOP3P_instruction
> vop3p
{create_instruction
<VOP3P_instruction
>(aco_opcode::v_pk_add_u16
, Format::VOP3P
, 2, 1)};
866 vop3p
->operands
[0] = Operand(PhysReg
{op
.physReg().reg()}, v1
);
867 vop3p
->operands
[1] = Operand(0u);
868 vop3p
->definitions
[0] = Definition(PhysReg
{op
.physReg().reg()}, v1
);
869 vop3p
->opsel_lo
= 0x1;
870 vop3p
->opsel_hi
= 0x2;
871 bld
.insert(std::move(vop3p
));
873 assert(def
.regClass().is_subdword());
874 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
875 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
876 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
877 ctx
->program
->statistics
[statistic_copies
] += 3;
880 offset
+= def
.bytes();
883 /* fixup in case we swapped bytes we shouldn't have */
884 copy_operation tmp_copy
= copy
;
885 tmp_copy
.op
.setFixed(copy
.def
.physReg());
886 tmp_copy
.def
.setFixed(copy
.op
.physReg());
887 do_copy(ctx
, bld
, tmp_copy
, &preserve_scc
);
890 void handle_operands(std::map
<PhysReg
, copy_operation
>& copy_map
, lower_context
* ctx
, chip_class chip_class
, Pseudo_instruction
*pi
)
892 Builder
bld(ctx
->program
, &ctx
->instructions
);
893 aco_ptr
<Instruction
> mov
;
894 std::map
<PhysReg
, copy_operation
>::iterator it
= copy_map
.begin();
895 std::map
<PhysReg
, copy_operation
>::iterator target
;
896 bool writes_scc
= false;
898 /* count the number of uses for each dst reg */
899 while (it
!= copy_map
.end()) {
901 if (it
->second
.def
.physReg() == scc
)
904 assert(!pi
->tmp_in_scc
|| !(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
906 /* if src and dst reg are the same, remove operation */
907 if (it
->first
== it
->second
.op
.physReg()) {
908 it
= copy_map
.erase(it
);
912 /* split large copies */
913 if (it
->second
.bytes
> 8) {
914 assert(!it
->second
.op
.isConstant());
915 assert(!it
->second
.def
.regClass().is_subdword());
916 RegClass rc
= RegClass(it
->second
.def
.regClass().type(), it
->second
.def
.size() - 2);
917 Definition hi_def
= Definition(PhysReg
{it
->first
+ 2}, rc
);
918 rc
= RegClass(it
->second
.op
.regClass().type(), it
->second
.op
.size() - 2);
919 Operand hi_op
= Operand(PhysReg
{it
->second
.op
.physReg() + 2}, rc
);
920 copy_operation copy
= {hi_op
, hi_def
, it
->second
.bytes
- 8};
921 copy_map
[hi_def
.physReg()] = copy
;
922 assert(it
->second
.op
.physReg().byte() == 0 && it
->second
.def
.physReg().byte() == 0);
923 it
->second
.op
= Operand(it
->second
.op
.physReg(), it
->second
.op
.regClass().type() == RegType::sgpr
? s2
: v2
);
924 it
->second
.def
= Definition(it
->second
.def
.physReg(), it
->second
.def
.regClass().type() == RegType::sgpr
? s2
: v2
);
925 it
->second
.bytes
= 8;
928 /* check if the definition reg is used by another copy operation */
929 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
930 if (copy
.second
.op
.isConstant())
932 for (uint16_t i
= 0; i
< it
->second
.bytes
; i
++) {
933 /* distance might underflow */
934 unsigned distance
= it
->first
.reg_b
+ i
- copy
.second
.op
.physReg().reg_b
;
935 if (distance
< copy
.second
.bytes
)
936 it
->second
.uses
[i
] += 1;
943 /* first, handle paths in the location transfer graph */
944 bool preserve_scc
= pi
->tmp_in_scc
&& !writes_scc
;
945 it
= copy_map
.begin();
946 while (it
!= copy_map
.end()) {
948 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
949 if (it
->second
.is_used
== 0 &&
950 it
->second
.def
.getTemp().type() == RegType::sgpr
&& it
->second
.bytes
== 4 &&
951 !it
->second
.op
.isConstant() && it
->first
% 2 == it
->second
.op
.physReg() % 2) {
953 PhysReg other_def_reg
= PhysReg
{it
->first
% 2 ? it
->first
- 1 : it
->first
+ 1};
954 PhysReg other_op_reg
= PhysReg
{it
->first
% 2 ? it
->second
.op
.physReg() - 1 : it
->second
.op
.physReg() + 1};
955 std::map
<PhysReg
, copy_operation
>::iterator other
= copy_map
.find(other_def_reg
);
957 if (other
!= copy_map
.end() && !other
->second
.is_used
&& other
->second
.bytes
== 4 &&
958 other
->second
.op
.physReg() == other_op_reg
&& !other
->second
.op
.isConstant()) {
959 std::map
<PhysReg
, copy_operation
>::iterator to_erase
= it
->first
% 2 ? it
: other
;
960 it
= it
->first
% 2 ? other
: it
;
961 copy_map
.erase(to_erase
);
962 it
->second
.bytes
= 8;
965 // TODO: try to coalesce subdword copies
967 /* find portions where the target reg is not used as operand for any other copy */
968 if (it
->second
.is_used
) {
969 if (it
->second
.op
.isConstant()) {
970 /* we have to skip constants until is_used=0 */
975 unsigned has_zero_use_bytes
= 0;
976 for (unsigned i
= 0; i
< it
->second
.bytes
; i
++)
977 has_zero_use_bytes
|= (it
->second
.uses
[i
] == 0) << i
;
979 if (has_zero_use_bytes
) {
980 /* Skipping partial copying and doing a v_swap_b32 and then fixup
981 * copies is usually beneficial for sub-dword copies, but if doing
982 * a partial copy allows further copies, it should be done instead. */
983 bool partial_copy
= (has_zero_use_bytes
== 0xf) || (has_zero_use_bytes
== 0xf0);
984 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
987 for (uint16_t i
= 0; i
< copy
.second
.bytes
; i
++) {
988 /* distance might underflow */
989 unsigned distance
= copy
.first
.reg_b
+ i
- it
->second
.op
.physReg().reg_b
;
990 if (distance
< it
->second
.bytes
&& copy
.second
.uses
[i
] == 1 &&
991 !it
->second
.uses
[distance
])
1001 /* full target reg is used: register swapping needed */
1007 bool did_copy
= do_copy(ctx
, bld
, it
->second
, &preserve_scc
);
1009 /* reduce the number of uses of the operand reg by one */
1010 if (did_copy
&& !it
->second
.op
.isConstant()) {
1011 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
1012 for (uint16_t i
= 0; i
< copy
.second
.bytes
; i
++) {
1013 /* distance might underflow */
1014 unsigned distance
= copy
.first
.reg_b
+ i
- it
->second
.op
.physReg().reg_b
;
1015 if (distance
< it
->second
.bytes
&& !it
->second
.uses
[distance
])
1016 copy
.second
.uses
[i
] -= 1;
1021 if (it
->second
.is_used
== 0) {
1022 /* the target reg is not used as operand for any other copy, so we
1023 * copied to all of it */
1025 it
= copy_map
.begin();
1027 /* we only performed some portions of this copy, so split it to only
1028 * leave the portions that still need to be done */
1029 copy_operation original
= it
->second
; /* the map insertion below can overwrite this */
1031 for (unsigned offset
= 0; offset
< original
.bytes
;) {
1032 if (original
.uses
[offset
] == 0) {
1038 split_copy(offset
, &def
, &op
, original
, false, 8);
1040 copy_operation copy
= {op
, def
, def
.bytes()};
1041 for (unsigned i
= 0; i
< copy
.bytes
; i
++)
1042 copy
.uses
[i
] = original
.uses
[i
+ offset
];
1043 copy_map
[def
.physReg()] = copy
;
1045 offset
+= def
.bytes();
1048 it
= copy_map
.begin();
1052 if (copy_map
.empty())
1055 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1056 unsigned largest
= 0;
1057 for (const std::pair
<PhysReg
, copy_operation
>& op
: copy_map
)
1058 largest
= MAX2(largest
, op
.second
.bytes
);
1060 while (!copy_map
.empty()) {
1062 /* Perform larger swaps first, so that we don't have to split the uses of
1063 * registers we swap (we don't have to because of alignment restrictions) and
1064 * larger swaps swaps can make other swaps unnecessary. */
1065 auto it
= copy_map
.begin();
1066 for (auto it2
= copy_map
.begin(); it2
!= copy_map
.end(); ++it2
) {
1067 if (it2
->second
.bytes
> it
->second
.bytes
) {
1069 if (it
->second
.bytes
== largest
)
1074 /* should already be done */
1075 assert(!it
->second
.op
.isConstant());
1077 assert(it
->second
.op
.isFixed());
1078 assert(it
->second
.def
.regClass() == it
->second
.op
.regClass());
1080 if (it
->first
== it
->second
.op
.physReg()) {
1085 if (preserve_scc
&& it
->second
.def
.getTemp().type() == RegType::sgpr
)
1086 assert(!(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
1088 /* to resolve the cycle, we have to swap the src reg with the dst reg */
1089 copy_operation swap
= it
->second
;
1090 do_swap(ctx
, bld
, swap
, preserve_scc
, pi
);
1092 /* remove from map */
1095 /* change the operand reg of the target's use and split uses if needed */
1096 target
= copy_map
.begin();
1097 uint32_t bytes_left
= u_bit_consecutive(0, swap
.bytes
);
1098 for (; target
!= copy_map
.end(); ++target
) {
1099 if (target
->second
.op
.physReg() == swap
.def
.physReg() && swap
.bytes
== target
->second
.bytes
) {
1100 target
->second
.op
.setFixed(swap
.op
.physReg());
1104 uint32_t imask
= get_intersection_mask(swap
.def
.physReg().reg_b
, swap
.bytes
,
1105 target
->second
.op
.physReg().reg_b
, target
->second
.bytes
);
1110 assert(target
->second
.bytes
< swap
.bytes
);
1112 PhysReg new_reg
= swap
.op
.physReg();
1113 new_reg
.reg_b
+= target
->second
.op
.physReg().reg_b
- swap
.def
.physReg().reg_b
;
1114 target
->second
.op
.setFixed(new_reg
);
1116 bytes_left
&= ~imask
;
1123 void lower_to_hw_instr(Program
* program
)
1125 Block
*discard_block
= NULL
;
1127 for (size_t i
= 0; i
< program
->blocks
.size(); i
++)
1129 Block
*block
= &program
->blocks
[i
];
1131 ctx
.program
= program
;
1132 Builder
bld(program
, &ctx
.instructions
);
1134 bool set_mode
= i
== 0 && block
->fp_mode
.val
!= program
->config
->float_mode
;
1135 for (unsigned pred
: block
->linear_preds
) {
1136 if (program
->blocks
[pred
].fp_mode
.val
!= block
->fp_mode
.val
) {
1142 /* only allow changing modes at top-level blocks so this doesn't break
1143 * the "jump over empty blocks" optimization */
1144 assert(block
->kind
& block_kind_top_level
);
1145 uint32_t mode
= block
->fp_mode
.val
;
1146 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1147 bld
.sopk(aco_opcode::s_setreg_imm32_b32
, Operand(mode
), (7 << 11) | 1);
1150 for (size_t j
= 0; j
< block
->instructions
.size(); j
++) {
1151 aco_ptr
<Instruction
>& instr
= block
->instructions
[j
];
1152 aco_ptr
<Instruction
> mov
;
1153 if (instr
->format
== Format::PSEUDO
) {
1154 Pseudo_instruction
*pi
= (Pseudo_instruction
*)instr
.get();
1156 switch (instr
->opcode
)
1158 case aco_opcode::p_extract_vector
:
1160 PhysReg reg
= instr
->operands
[0].physReg();
1161 Definition
& def
= instr
->definitions
[0];
1162 reg
.reg_b
+= instr
->operands
[1].constantValue() * def
.bytes();
1164 if (reg
== def
.physReg())
1167 RegClass op_rc
= def
.regClass().is_subdword() ? def
.regClass() :
1168 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1169 std::map
<PhysReg
, copy_operation
> copy_operations
;
1170 copy_operations
[def
.physReg()] = {Operand(reg
, op_rc
), def
, def
.bytes()};
1171 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1174 case aco_opcode::p_create_vector
:
1176 std::map
<PhysReg
, copy_operation
> copy_operations
;
1177 PhysReg reg
= instr
->definitions
[0].physReg();
1179 for (const Operand
& op
: instr
->operands
) {
1180 if (op
.isConstant()) {
1181 const Definition def
= Definition(reg
, RegClass(instr
->definitions
[0].getTemp().type(), op
.size()));
1182 copy_operations
[reg
] = {op
, def
, op
.bytes()};
1183 reg
.reg_b
+= op
.bytes();
1186 if (op
.isUndefined()) {
1187 // TODO: coalesce subdword copies if dst byte is 0
1188 reg
.reg_b
+= op
.bytes();
1192 RegClass rc_def
= op
.regClass().is_subdword() ? op
.regClass() :
1193 RegClass(instr
->definitions
[0].getTemp().type(), op
.size());
1194 const Definition def
= Definition(reg
, rc_def
);
1195 copy_operations
[def
.physReg()] = {op
, def
, op
.bytes()};
1196 reg
.reg_b
+= op
.bytes();
1198 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1201 case aco_opcode::p_split_vector
:
1203 std::map
<PhysReg
, copy_operation
> copy_operations
;
1204 PhysReg reg
= instr
->operands
[0].physReg();
1206 for (const Definition
& def
: instr
->definitions
) {
1207 RegClass rc_op
= def
.regClass().is_subdword() ? def
.regClass() :
1208 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1209 const Operand op
= Operand(reg
, rc_op
);
1210 copy_operations
[def
.physReg()] = {op
, def
, def
.bytes()};
1211 reg
.reg_b
+= def
.bytes();
1213 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1216 case aco_opcode::p_parallelcopy
:
1217 case aco_opcode::p_wqm
:
1219 std::map
<PhysReg
, copy_operation
> copy_operations
;
1220 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++) {
1221 assert(instr
->definitions
[i
].bytes() == instr
->operands
[i
].bytes());
1222 copy_operations
[instr
->definitions
[i
].physReg()] = {instr
->operands
[i
], instr
->definitions
[i
], instr
->operands
[i
].bytes()};
1224 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1227 case aco_opcode::p_exit_early_if
:
1229 /* don't bother with an early exit near the end of the program */
1230 if ((block
->instructions
.size() - 1 - j
) <= 4 &&
1231 block
->instructions
.back()->opcode
== aco_opcode::s_endpgm
) {
1232 unsigned null_exp_dest
= (ctx
.program
->stage
& hw_fs
) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS
;
1233 bool ignore_early_exit
= true;
1235 for (unsigned k
= j
+ 1; k
< block
->instructions
.size(); ++k
) {
1236 const aco_ptr
<Instruction
> &instr
= block
->instructions
[k
];
1237 if (instr
->opcode
== aco_opcode::s_endpgm
||
1238 instr
->opcode
== aco_opcode::p_logical_end
)
1240 else if (instr
->opcode
== aco_opcode::exp
&&
1241 static_cast<Export_instruction
*>(instr
.get())->dest
== null_exp_dest
)
1243 else if (instr
->opcode
== aco_opcode::p_parallelcopy
&&
1244 instr
->definitions
[0].isFixed() &&
1245 instr
->definitions
[0].physReg() == exec
)
1248 ignore_early_exit
= false;
1251 if (ignore_early_exit
)
1255 if (!discard_block
) {
1256 discard_block
= program
->create_and_insert_block();
1257 block
= &program
->blocks
[i
];
1259 bld
.reset(discard_block
);
1260 bld
.exp(aco_opcode::exp
, Operand(v1
), Operand(v1
), Operand(v1
), Operand(v1
),
1261 0, V_008DFC_SQ_EXP_NULL
, false, true, true);
1262 if (program
->wb_smem_l1_on_end
)
1263 bld
.smem(aco_opcode::s_dcache_wb
);
1264 bld
.sopp(aco_opcode::s_endpgm
);
1266 bld
.reset(&ctx
.instructions
);
1269 //TODO: exec can be zero here with block_kind_discard
1271 assert(instr
->operands
[0].physReg() == scc
);
1272 bld
.sopp(aco_opcode::s_cbranch_scc0
, instr
->operands
[0], discard_block
->index
);
1274 discard_block
->linear_preds
.push_back(block
->index
);
1275 block
->linear_succs
.push_back(discard_block
->index
);
1278 case aco_opcode::p_spill
:
1280 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1281 for (unsigned i
= 0; i
< instr
->operands
[2].size(); i
++)
1282 bld
.writelane(bld
.def(v1
, instr
->operands
[0].physReg()),
1283 Operand(PhysReg
{instr
->operands
[2].physReg() + i
}, s1
),
1284 Operand(instr
->operands
[1].constantValue() + i
),
1285 instr
->operands
[0]);
1288 case aco_opcode::p_reload
:
1290 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1291 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++)
1292 bld
.readlane(bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1294 Operand(instr
->operands
[1].constantValue() + i
));
1297 case aco_opcode::p_as_uniform
:
1299 if (instr
->operands
[0].isConstant() || instr
->operands
[0].regClass().type() == RegType::sgpr
) {
1300 std::map
<PhysReg
, copy_operation
> copy_operations
;
1301 copy_operations
[instr
->definitions
[0].physReg()] = {instr
->operands
[0], instr
->definitions
[0], instr
->definitions
[0].bytes()};
1302 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1304 assert(instr
->operands
[0].regClass().type() == RegType::vgpr
);
1305 assert(instr
->definitions
[0].regClass().type() == RegType::sgpr
);
1306 assert(instr
->operands
[0].size() == instr
->definitions
[0].size());
1307 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
1308 bld
.vop1(aco_opcode::v_readfirstlane_b32
,
1309 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1310 Operand(PhysReg
{instr
->operands
[0].physReg() + i
}, v1
));
1318 } else if (instr
->format
== Format::PSEUDO_BRANCH
) {
1319 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(instr
.get());
1320 /* check if all blocks from current to target are empty */
1321 bool can_remove
= block
->index
< branch
->target
[0];
1322 for (unsigned i
= block
->index
+ 1; can_remove
&& i
< branch
->target
[0]; i
++) {
1323 if (program
->blocks
[i
].instructions
.size())
1329 switch (instr
->opcode
) {
1330 case aco_opcode::p_branch
:
1331 assert(block
->linear_succs
[0] == branch
->target
[0]);
1332 bld
.sopp(aco_opcode::s_branch
, branch
->target
[0]);
1334 case aco_opcode::p_cbranch_nz
:
1335 assert(block
->linear_succs
[1] == branch
->target
[0]);
1336 if (branch
->operands
[0].physReg() == exec
)
1337 bld
.sopp(aco_opcode::s_cbranch_execnz
, branch
->target
[0]);
1338 else if (branch
->operands
[0].physReg() == vcc
)
1339 bld
.sopp(aco_opcode::s_cbranch_vccnz
, branch
->target
[0]);
1341 assert(branch
->operands
[0].physReg() == scc
);
1342 bld
.sopp(aco_opcode::s_cbranch_scc1
, branch
->target
[0]);
1345 case aco_opcode::p_cbranch_z
:
1346 assert(block
->linear_succs
[1] == branch
->target
[0]);
1347 if (branch
->operands
[0].physReg() == exec
)
1348 bld
.sopp(aco_opcode::s_cbranch_execz
, branch
->target
[0]);
1349 else if (branch
->operands
[0].physReg() == vcc
)
1350 bld
.sopp(aco_opcode::s_cbranch_vccz
, branch
->target
[0]);
1352 assert(branch
->operands
[0].physReg() == scc
);
1353 bld
.sopp(aco_opcode::s_cbranch_scc0
, branch
->target
[0]);
1357 unreachable("Unknown Pseudo branch instruction!");
1360 } else if (instr
->format
== Format::PSEUDO_REDUCTION
) {
1361 Pseudo_reduction_instruction
* reduce
= static_cast<Pseudo_reduction_instruction
*>(instr
.get());
1362 if (reduce
->reduce_op
== gfx10_wave64_bpermute
) {
1363 /* Only makes sense on GFX10 wave64 */
1364 assert(program
->chip_class
>= GFX10
);
1365 assert(program
->info
->wave_size
== 64);
1366 assert(instr
->definitions
[0].regClass() == v1
); /* Destination */
1367 assert(instr
->definitions
[1].regClass() == s2
); /* Temp EXEC */
1368 assert(instr
->definitions
[1].physReg() != vcc
);
1369 assert(instr
->definitions
[2].physReg() == scc
); /* SCC clobber */
1370 assert(instr
->operands
[0].physReg() == vcc
); /* Compare */
1371 assert(instr
->operands
[1].regClass() == v2
.as_linear()); /* Temp VGPR pair */
1372 assert(instr
->operands
[2].regClass() == v1
); /* Indices x4 */
1373 assert(instr
->operands
[3].regClass() == v1
); /* Input data */
1375 PhysReg shared_vgpr_reg_lo
= PhysReg(align(program
->config
->num_vgprs
, 4) + 256);
1376 PhysReg shared_vgpr_reg_hi
= PhysReg(shared_vgpr_reg_lo
+ 1);
1377 Operand compare
= instr
->operands
[0];
1378 Operand
tmp1(instr
->operands
[1].physReg(), v1
);
1379 Operand
tmp2(PhysReg(instr
->operands
[1].physReg() + 1), v1
);
1380 Operand index_x4
= instr
->operands
[2];
1381 Operand input_data
= instr
->operands
[3];
1382 Definition
shared_vgpr_lo(shared_vgpr_reg_lo
, v1
);
1383 Definition
shared_vgpr_hi(shared_vgpr_reg_hi
, v1
);
1384 Definition
def_temp1(tmp1
.physReg(), v1
);
1385 Definition
def_temp2(tmp2
.physReg(), v1
);
1387 /* Save EXEC and set it for all lanes */
1388 bld
.sop1(aco_opcode::s_or_saveexec_b64
, instr
->definitions
[1], instr
->definitions
[2],
1389 Definition(exec
, s2
), Operand((uint64_t)-1), Operand(exec
, s2
));
1391 /* HI: Copy data from high lanes 32-63 to shared vgpr */
1392 bld
.vop1_dpp(aco_opcode::v_mov_b32
, shared_vgpr_hi
, input_data
, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1394 /* LO: Copy data from low lanes 0-31 to shared vgpr */
1395 bld
.vop1_dpp(aco_opcode::v_mov_b32
, shared_vgpr_lo
, input_data
, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1396 /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
1397 bld
.vop1_dpp(aco_opcode::v_mov_b32
, def_temp1
, Operand(shared_vgpr_reg_hi
, v1
), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1399 /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
1400 bld
.vop1_dpp(aco_opcode::v_mov_b32
, def_temp1
, Operand(shared_vgpr_reg_lo
, v1
), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1402 /* Permute the original input */
1403 bld
.ds(aco_opcode::ds_bpermute_b32
, def_temp2
, index_x4
, input_data
);
1404 /* Permute the swapped input */
1405 bld
.ds(aco_opcode::ds_bpermute_b32
, def_temp1
, index_x4
, tmp1
);
1407 /* Restore saved EXEC */
1408 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(instr
->definitions
[1].physReg(), s2
));
1409 /* Choose whether to use the original or swapped */
1410 bld
.vop2(aco_opcode::v_cndmask_b32
, instr
->definitions
[0], tmp1
, tmp2
, compare
);
1412 emit_reduction(&ctx
, reduce
->opcode
, reduce
->reduce_op
, reduce
->cluster_size
,
1413 reduce
->operands
[1].physReg(), // tmp
1414 reduce
->definitions
[1].physReg(), // stmp
1415 reduce
->operands
[2].physReg(), // vtmp
1416 reduce
->definitions
[2].physReg(), // sitmp
1417 reduce
->operands
[0], reduce
->definitions
[0]);
1420 ctx
.instructions
.emplace_back(std::move(instr
));
1424 block
->instructions
.swap(ctx
.instructions
);