2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
31 #include "aco_builder.h"
32 #include "util/u_math.h"
34 #include "vulkan/radv_shader.h"
39 struct lower_context
{
41 std::vector
<aco_ptr
<Instruction
>> instructions
;
44 aco_opcode
get_reduce_opcode(chip_class chip
, ReduceOp op
) {
45 /* Because some 16-bit instructions are already VOP3 on GFX10, we use the
46 * 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use
47 * DPP with the arithmetic instructions. This requires to sign-extend.
51 case iadd16
: return chip
>= GFX10
? aco_opcode::v_add_u32
: aco_opcode::v_add_u16
;
53 case imul16
: return chip
>= GFX10
? aco_opcode::v_mul_lo_u16_e64
: aco_opcode::v_mul_lo_u16
;
54 case fadd16
: return aco_opcode::v_add_f16
;
55 case fmul16
: return aco_opcode::v_mul_f16
;
57 case imax16
: return chip
>= GFX10
? aco_opcode::v_max_i32
: aco_opcode::v_max_i16
;
59 case imin16
: return chip
>= GFX10
? aco_opcode::v_min_i32
: aco_opcode::v_min_i16
;
61 case umin16
: return chip
>= GFX10
? aco_opcode::v_min_u32
: aco_opcode::v_min_u16
;
63 case umax16
: return chip
>= GFX10
? aco_opcode::v_max_u32
: aco_opcode::v_max_u16
;
64 case fmin16
: return aco_opcode::v_min_f16
;
65 case fmax16
: return aco_opcode::v_max_f16
;
66 case iadd32
: return chip
>= GFX9
? aco_opcode::v_add_u32
: aco_opcode::v_add_co_u32
;
67 case imul32
: return aco_opcode::v_mul_lo_u32
;
68 case fadd32
: return aco_opcode::v_add_f32
;
69 case fmul32
: return aco_opcode::v_mul_f32
;
70 case imax32
: return aco_opcode::v_max_i32
;
71 case imin32
: return aco_opcode::v_min_i32
;
72 case umin32
: return aco_opcode::v_min_u32
;
73 case umax32
: return aco_opcode::v_max_u32
;
74 case fmin32
: return aco_opcode::v_min_f32
;
75 case fmax32
: return aco_opcode::v_max_f32
;
78 case iand32
: return aco_opcode::v_and_b32
;
81 case ixor32
: return aco_opcode::v_xor_b32
;
84 case ior32
: return aco_opcode::v_or_b32
;
85 case iadd64
: return aco_opcode::num_opcodes
;
86 case imul64
: return aco_opcode::num_opcodes
;
87 case fadd64
: return aco_opcode::v_add_f64
;
88 case fmul64
: return aco_opcode::v_mul_f64
;
89 case imin64
: return aco_opcode::num_opcodes
;
90 case imax64
: return aco_opcode::num_opcodes
;
91 case umin64
: return aco_opcode::num_opcodes
;
92 case umax64
: return aco_opcode::num_opcodes
;
93 case fmin64
: return aco_opcode::v_min_f64
;
94 case fmax64
: return aco_opcode::v_max_f64
;
95 case iand64
: return aco_opcode::num_opcodes
;
96 case ior64
: return aco_opcode::num_opcodes
;
97 case ixor64
: return aco_opcode::num_opcodes
;
98 default: return aco_opcode::num_opcodes
;
102 bool is_vop3_reduce_opcode(aco_opcode opcode
)
104 /* 64-bit reductions are VOP3. */
105 if (opcode
== aco_opcode::num_opcodes
)
108 return instr_info
.format
[(int)opcode
] == Format::VOP3
;
111 void emit_vadd32(Builder
& bld
, Definition def
, Operand src0
, Operand src1
)
113 Instruction
*instr
= bld
.vadd32(def
, src0
, src1
, false, Operand(s2
), true);
114 if (instr
->definitions
.size() >= 2) {
115 assert(instr
->definitions
[1].regClass() == bld
.lm
);
116 instr
->definitions
[1].setFixed(vcc
);
120 void emit_int64_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
121 PhysReg vtmp_reg
, ReduceOp op
,
122 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
123 Operand
*identity
=NULL
)
125 Builder
bld(ctx
->program
, &ctx
->instructions
);
126 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
127 Definition vtmp_def
[] = {Definition(vtmp_reg
, v1
), Definition(PhysReg
{vtmp_reg
+1}, v1
)};
128 Operand src0
[] = {Operand(src0_reg
, v1
), Operand(PhysReg
{src0_reg
+1}, v1
)};
129 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
130 Operand src1_64
= Operand(src1_reg
, v2
);
131 Operand vtmp_op
[] = {Operand(vtmp_reg
, v1
), Operand(PhysReg
{vtmp_reg
+1}, v1
)};
132 Operand vtmp_op64
= Operand(vtmp_reg
, v2
);
134 if (ctx
->program
->chip_class
>= GFX10
) {
136 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
137 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
138 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
139 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), vtmp_op
[0], src1
[0]);
141 bld
.vop2_dpp(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0],
142 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
144 bld
.vop2_dpp(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
),
145 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
146 } else if (op
== iand64
) {
147 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0],
148 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
149 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1],
150 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
151 } else if (op
== ior64
) {
152 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0],
153 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
154 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1],
155 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
156 } else if (op
== ixor64
) {
157 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0],
158 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
159 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1],
160 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
161 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
162 aco_opcode cmp
= aco_opcode::num_opcodes
;
165 cmp
= aco_opcode::v_cmp_gt_u64
;
168 cmp
= aco_opcode::v_cmp_lt_u64
;
171 cmp
= aco_opcode::v_cmp_gt_i64
;
174 cmp
= aco_opcode::v_cmp_lt_i64
;
181 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
182 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[1], identity
[1]);
184 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
185 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
186 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[1], src0
[1],
187 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
189 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), vtmp_op64
, src1_64
);
190 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], vtmp_op
[0], src1
[0], Operand(vcc
, bld
.lm
));
191 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], vtmp_op
[1], src1
[1], Operand(vcc
, bld
.lm
));
192 } else if (op
== imul64
) {
194 * t1 = umul_lo(t4, y_lo)
196 * t0 = umul_lo(t3, y_hi)
198 * t5 = umul_hi(t3, y_lo)
199 * res_hi = iadd(t2, t5)
200 * res_lo = umul_lo(t3, y_lo)
201 * Requires that res_hi != src0[0] and res_hi != src1[0]
202 * and that vtmp[0] != res_hi.
205 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[1]);
206 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[1],
207 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
208 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[1], vtmp_op
[0], src1
[0]);
210 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
211 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
212 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
213 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[0], vtmp_op
[0], src1
[1]);
214 emit_vadd32(bld
, vtmp_def
[1], vtmp_op
[0], vtmp_op
[1]);
216 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
217 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
218 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
219 bld
.vop3(aco_opcode::v_mul_hi_u32
, vtmp_def
[0], vtmp_op
[0], src1
[0]);
220 emit_vadd32(bld
, dst
[1], vtmp_op
[1], vtmp_op
[0]);
222 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
223 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
224 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
225 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], vtmp_op
[0], src1
[0]);
229 void emit_int64_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
, PhysReg vtmp
, ReduceOp op
)
231 Builder
bld(ctx
->program
, &ctx
->instructions
);
232 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
233 RegClass src0_rc
= src0_reg
.reg() >= 256 ? v1
: s1
;
234 Operand src0
[] = {Operand(src0_reg
, src0_rc
), Operand(PhysReg
{src0_reg
+1}, src0_rc
)};
235 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
236 Operand src0_64
= Operand(src0_reg
, src0_reg
.reg() >= 256 ? v2
: s2
);
237 Operand src1_64
= Operand(src1_reg
, v2
);
240 (op
== imul64
|| op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
)) {
241 assert(vtmp
.reg() != 0);
242 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), src0
[0]);
243 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
245 src0
[0] = Operand(vtmp
, v1
);
246 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
247 src0_64
= Operand(vtmp
, v2
);
248 } else if (src0_rc
== s1
&& op
== iadd64
) {
249 assert(vtmp
.reg() != 0);
250 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
251 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
255 if (ctx
->program
->chip_class
>= GFX10
) {
256 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
258 bld
.vop2(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
260 bld
.vop2(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
261 } else if (op
== iand64
) {
262 bld
.vop2(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0]);
263 bld
.vop2(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1]);
264 } else if (op
== ior64
) {
265 bld
.vop2(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0]);
266 bld
.vop2(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1]);
267 } else if (op
== ixor64
) {
268 bld
.vop2(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0]);
269 bld
.vop2(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1]);
270 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
271 aco_opcode cmp
= aco_opcode::num_opcodes
;
274 cmp
= aco_opcode::v_cmp_gt_u64
;
277 cmp
= aco_opcode::v_cmp_lt_u64
;
280 cmp
= aco_opcode::v_cmp_gt_i64
;
283 cmp
= aco_opcode::v_cmp_lt_i64
;
289 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), src0_64
, src1_64
);
290 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], src0
[0], src1
[0], Operand(vcc
, bld
.lm
));
291 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
292 } else if (op
== imul64
) {
293 if (src1_reg
== dst_reg
) {
294 /* it's fine if src0==dst but not if src1==dst */
295 std::swap(src0_reg
, src1_reg
);
296 std::swap(src0
[0], src1
[0]);
297 std::swap(src0
[1], src1
[1]);
298 std::swap(src0_64
, src1_64
);
300 assert(!(src0_reg
== src1_reg
));
301 /* t1 = umul_lo(x_hi, y_lo)
302 * t0 = umul_lo(x_lo, y_hi)
304 * t5 = umul_hi(x_lo, y_lo)
305 * res_hi = iadd(t2, t5)
306 * res_lo = umul_lo(x_lo, y_lo)
307 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
309 Definition
tmp0_def(PhysReg
{src0_reg
+1}, v1
);
310 Definition
tmp1_def(PhysReg
{src1_reg
+1}, v1
);
311 Operand tmp0_op
= src0
[1];
312 Operand tmp1_op
= src1
[1];
313 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp0_def
, src0
[1], src1
[0]);
314 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp1_def
, src0
[0], src1
[1]);
315 emit_vadd32(bld
, tmp0_def
, tmp1_op
, tmp0_op
);
316 bld
.vop3(aco_opcode::v_mul_hi_u32
, tmp1_def
, src0
[0], src1
[0]);
317 emit_vadd32(bld
, dst
[1], tmp0_op
, tmp1_op
);
318 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], src0
[0], src1
[0]);
322 void emit_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
323 PhysReg vtmp
, ReduceOp op
, unsigned size
,
324 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
325 Operand
*identity
=NULL
) /* for VOP3 with sparse writes */
327 Builder
bld(ctx
->program
, &ctx
->instructions
);
328 RegClass rc
= RegClass(RegType::vgpr
, size
);
329 Definition
dst(dst_reg
, rc
);
330 Operand
src0(src0_reg
, rc
);
331 Operand
src1(src1_reg
, rc
);
333 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
334 bool vop3
= is_vop3_reduce_opcode(opcode
);
337 if (opcode
== aco_opcode::v_add_co_u32
)
338 bld
.vop2_dpp(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
340 bld
.vop2_dpp(opcode
, dst
, src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
344 if (opcode
== aco_opcode::num_opcodes
) {
345 emit_int64_dpp_op(ctx
, dst_reg
,src0_reg
, src1_reg
, vtmp
, op
,
346 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
, identity
);
351 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), identity
[0]);
352 if (identity
&& size
>= 2)
353 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), identity
[1]);
355 for (unsigned i
= 0; i
< size
; i
++)
356 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{src0_reg
+i
}, v1
),
357 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
359 bld
.vop3(opcode
, dst
, Operand(vtmp
, rc
), src1
);
362 void emit_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
363 PhysReg vtmp
, ReduceOp op
, unsigned size
)
365 Builder
bld(ctx
->program
, &ctx
->instructions
);
366 RegClass rc
= RegClass(RegType::vgpr
, size
);
367 Definition
dst(dst_reg
, rc
);
368 Operand
src0(src0_reg
, RegClass(src0_reg
.reg() >= 256 ? RegType::vgpr
: RegType::sgpr
, size
));
369 Operand
src1(src1_reg
, rc
);
371 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
372 bool vop3
= is_vop3_reduce_opcode(opcode
);
374 if (opcode
== aco_opcode::num_opcodes
) {
375 emit_int64_op(ctx
, dst_reg
, src0_reg
, src1_reg
, vtmp
, op
);
380 bld
.vop3(opcode
, dst
, src0
, src1
);
381 } else if (opcode
== aco_opcode::v_add_co_u32
) {
382 bld
.vop2(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
);
384 bld
.vop2(opcode
, dst
, src0
, src1
);
388 void emit_dpp_mov(lower_context
*ctx
, PhysReg dst
, PhysReg src0
, unsigned size
,
389 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
)
391 Builder
bld(ctx
->program
, &ctx
->instructions
);
392 for (unsigned i
= 0; i
< size
; i
++) {
393 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
+i
}, v1
), Operand(PhysReg
{src0
+i
}, v1
),
394 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
398 uint32_t get_reduction_identity(ReduceOp op
, unsigned idx
)
427 return 0x3c00u
; /* 1.0 */
429 return 0x3f800000u
; /* 1.0 */
431 return idx
? 0x3ff00000u
: 0u; /* 1.0 */
439 return idx
? 0x7fffffffu
: 0xffffffffu
;
447 return idx
? 0x80000000u
: 0;
459 return 0x7c00u
; /* infinity */
461 return 0x7f800000u
; /* infinity */
463 return idx
? 0x7ff00000u
: 0u; /* infinity */
465 return 0xfc00u
; /* negative infinity */
467 return 0xff800000u
; /* negative infinity */
469 return idx
? 0xfff00000u
: 0u; /* negative infinity */
471 unreachable("Invalid reduction operation");
477 void emit_ds_swizzle(Builder bld
, PhysReg dst
, PhysReg src
, unsigned size
, unsigned ds_pattern
)
479 for (unsigned i
= 0; i
< size
; i
++) {
480 bld
.ds(aco_opcode::ds_swizzle_b32
, Definition(PhysReg
{dst
+i
}, v1
),
481 Operand(PhysReg
{src
+i
}, v1
), ds_pattern
);
485 void emit_reduction(lower_context
*ctx
, aco_opcode op
, ReduceOp reduce_op
, unsigned cluster_size
, PhysReg tmp
,
486 PhysReg stmp
, PhysReg vtmp
, PhysReg sitmp
, Operand src
, Definition dst
)
488 assert(cluster_size
== ctx
->program
->wave_size
|| op
== aco_opcode::p_reduce
);
489 assert(cluster_size
<= ctx
->program
->wave_size
);
491 Builder
bld(ctx
->program
, &ctx
->instructions
);
494 identity
[0] = Operand(get_reduction_identity(reduce_op
, 0));
495 identity
[1] = Operand(get_reduction_identity(reduce_op
, 1));
496 Operand vcndmask_identity
[2] = {identity
[0], identity
[1]};
498 /* First, copy the source to tmp and set inactive lanes to the identity */
499 bld
.sop1(Builder::s_or_saveexec
, Definition(stmp
, bld
.lm
), Definition(scc
, s1
), Definition(exec
, bld
.lm
), Operand(UINT64_MAX
), Operand(exec
, bld
.lm
));
501 for (unsigned i
= 0; i
< src
.size(); i
++) {
502 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
503 * except on GFX10, where v_writelane_b32 can take a literal. */
504 if (identity
[i
].isLiteral() && op
== aco_opcode::p_exclusive_scan
&& ctx
->program
->chip_class
< GFX10
) {
505 bld
.sop1(aco_opcode::s_mov_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), identity
[i
]);
506 identity
[i
] = Operand(PhysReg
{sitmp
+i
}, s1
);
508 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
509 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
510 } else if (identity
[i
].isLiteral()) {
511 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
512 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
516 for (unsigned i
= 0; i
< src
.size(); i
++) {
517 bld
.vop2_e64(aco_opcode::v_cndmask_b32
, Definition(PhysReg
{tmp
+ i
}, v1
),
518 vcndmask_identity
[i
], Operand(PhysReg
{src
.physReg() + i
}, v1
),
519 Operand(stmp
, bld
.lm
));
522 if (src
.regClass() == v1b
) {
523 aco_ptr
<SDWA_instruction
> sdwa
{create_instruction
<SDWA_instruction
>(aco_opcode::v_mov_b32
, asSDWA(Format::VOP1
), 1, 1)};
524 sdwa
->operands
[0] = Operand(PhysReg
{tmp
}, v1
);
525 sdwa
->definitions
[0] = Definition(PhysReg
{tmp
}, v1
);
526 if (reduce_op
== imin8
|| reduce_op
== imax8
)
527 sdwa
->sel
[0] = sdwa_sbyte
;
529 sdwa
->sel
[0] = sdwa_ubyte
;
530 sdwa
->dst_sel
= sdwa_udword
;
531 bld
.insert(std::move(sdwa
));
532 } else if (src
.regClass() == v2b
) {
533 if (ctx
->program
->chip_class
>= GFX10
&&
534 (reduce_op
== iadd16
|| reduce_op
== imax16
||
535 reduce_op
== imin16
|| reduce_op
== umin16
|| reduce_op
== umax16
)) {
536 aco_ptr
<SDWA_instruction
> sdwa
{create_instruction
<SDWA_instruction
>(aco_opcode::v_mov_b32
, asSDWA(Format::VOP1
), 1, 1)};
537 sdwa
->operands
[0] = Operand(PhysReg
{tmp
}, v1
);
538 sdwa
->definitions
[0] = Definition(PhysReg
{tmp
}, v1
);
539 if (reduce_op
== imin16
|| reduce_op
== imax16
|| reduce_op
== iadd16
)
540 sdwa
->sel
[0] = sdwa_sword
;
542 sdwa
->sel
[0] = sdwa_uword
;
543 sdwa
->dst_sel
= sdwa_udword
;
544 bld
.insert(std::move(sdwa
));
548 bool reduction_needs_last_op
= false;
550 case aco_opcode::p_reduce
:
551 if (cluster_size
== 1) break;
553 if (ctx
->program
->chip_class
<= GFX7
) {
554 reduction_needs_last_op
= true;
555 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
556 if (cluster_size
== 2) break;
557 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
558 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
559 if (cluster_size
== 4) break;
560 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
561 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
562 if (cluster_size
== 8) break;
563 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
564 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
565 if (cluster_size
== 16) break;
566 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
567 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
568 if (cluster_size
== 32) break;
569 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
570 for (unsigned i
= 0; i
< src
.size(); i
++)
571 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+ i
}, v1
), Operand(0u));
572 // TODO: it would be more effective to do the last reduction step on SALU
573 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
574 reduction_needs_last_op
= false;
578 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
579 if (cluster_size
== 2) break;
580 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
581 if (cluster_size
== 4) break;
582 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_half_mirror
, 0xf, 0xf, false);
583 if (cluster_size
== 8) break;
584 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_mirror
, 0xf, 0xf, false);
585 if (cluster_size
== 16) break;
587 if (ctx
->program
->chip_class
>= GFX10
) {
588 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
589 for (unsigned i
= 0; i
< src
.size(); i
++)
590 bld
.vop3(aco_opcode::v_permlanex16_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u), Operand(0u));
592 if (cluster_size
== 32) {
593 reduction_needs_last_op
= true;
597 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
598 for (unsigned i
= 0; i
< src
.size(); i
++)
599 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
600 // TODO: it would be more effective to do the last reduction step on SALU
601 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
605 if (cluster_size
== 32) {
606 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
607 reduction_needs_last_op
= true;
610 assert(cluster_size
== 64);
611 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast15
, 0xa, 0xf, false);
612 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast31
, 0xc, 0xf, false);
614 case aco_opcode::p_exclusive_scan
:
615 if (ctx
->program
->chip_class
>= GFX10
) { /* gfx10 doesn't support wf_sr1, so emulate it */
616 /* shift rows right */
617 emit_dpp_mov(ctx
, vtmp
, tmp
, src
.size(), dpp_row_sr(1), 0xf, 0xf, true);
619 /* fill in the gaps in rows 1 and 3 */
620 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10000u
));
621 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(0x10000u
));
622 for (unsigned i
= 0; i
< src
.size(); i
++) {
623 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
624 Definition(PhysReg
{vtmp
+i
}, v1
),
625 Operand(PhysReg
{tmp
+i
}, v1
),
626 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
627 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
629 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(UINT64_MAX
));
631 if (ctx
->program
->wave_size
== 64) {
632 /* fill in the gap in row 2 */
633 for (unsigned i
= 0; i
< src
.size(); i
++) {
634 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
635 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
638 std::swap(tmp
, vtmp
);
639 } else if (ctx
->program
->chip_class
>= GFX8
) {
640 emit_dpp_mov(ctx
, tmp
, tmp
, src
.size(), dpp_wf_sr1
, 0xf, 0xf, true);
642 // TODO: use LDS on CS with a single write and shifted read
643 /* wavefront shift_right by 1 on SI/CI */
644 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
645 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
646 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10101010u
));
647 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
648 for (unsigned i
= 0; i
< src
.size(); i
++)
649 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
651 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
652 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
653 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x01000100u
));
654 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
655 for (unsigned i
= 0; i
< src
.size(); i
++)
656 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
658 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
659 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
660 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(1u), Operand(16u));
661 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(1u), Operand(16u));
662 for (unsigned i
= 0; i
< src
.size(); i
++)
663 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
665 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
666 for (unsigned i
= 0; i
< src
.size(); i
++) {
667 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{vtmp
+i
}, v1
));
668 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
669 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
670 identity
[i
] = Operand(0u); /* prevent further uses of identity */
672 std::swap(tmp
, vtmp
);
675 for (unsigned i
= 0; i
< src
.size(); i
++) {
676 if (!identity
[i
].isConstant() || identity
[i
].constantValue()) { /* bound_ctrl should take care of this overwise */
677 if (ctx
->program
->chip_class
< GFX10
)
678 assert((identity
[i
].isConstant() && !identity
[i
].isLiteral()) || identity
[i
].physReg() == PhysReg
{sitmp
+i
});
679 bld
.writelane(Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{tmp
+i
}, v1
));
683 case aco_opcode::p_inclusive_scan
:
684 assert(cluster_size
== ctx
->program
->wave_size
);
685 if (ctx
->program
->chip_class
<= GFX7
) {
686 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
687 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xAAAAAAAAu
));
688 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
689 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
691 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
692 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
693 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xCCCCCCCCu
));
694 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
695 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
697 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
698 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
699 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xF0F0F0F0u
));
700 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
701 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
703 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
704 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
705 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xFF00FF00u
));
706 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
707 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
709 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
710 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
711 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
712 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
713 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
715 for (unsigned i
= 0; i
< src
.size(); i
++)
716 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
717 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
718 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
722 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
723 dpp_row_sr(1), 0xf, 0xf, false, identity
);
724 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
725 dpp_row_sr(2), 0xf, 0xf, false, identity
);
726 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
727 dpp_row_sr(4), 0xf, 0xf, false, identity
);
728 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
729 dpp_row_sr(8), 0xf, 0xf, false, identity
);
730 if (ctx
->program
->chip_class
>= GFX10
) {
731 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
732 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
733 for (unsigned i
= 0; i
< src
.size(); i
++) {
734 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
735 Definition(PhysReg
{vtmp
+i
}, v1
),
736 Operand(PhysReg
{tmp
+i
}, v1
),
737 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
738 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
740 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
742 if (ctx
->program
->wave_size
== 64) {
743 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
744 for (unsigned i
= 0; i
< src
.size(); i
++)
745 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
746 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
749 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
750 dpp_row_bcast15
, 0xa, 0xf, false, identity
);
751 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
752 dpp_row_bcast31
, 0xc, 0xf, false, identity
);
756 unreachable("Invalid reduction mode");
760 if (op
== aco_opcode::p_reduce
) {
761 if (reduction_needs_last_op
&& dst
.regClass().type() == RegType::vgpr
) {
762 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
763 emit_op(ctx
, dst
.physReg(), tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
767 if (reduction_needs_last_op
)
768 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
772 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
774 if (dst
.regClass().type() == RegType::sgpr
) {
775 for (unsigned k
= 0; k
< src
.size(); k
++) {
776 bld
.readlane(Definition(PhysReg
{dst
.physReg() + k
}, s1
),
777 Operand(PhysReg
{tmp
+ k
}, v1
), Operand(ctx
->program
->wave_size
- 1));
779 } else if (dst
.physReg() != tmp
) {
780 for (unsigned k
= 0; k
< src
.size(); k
++) {
781 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
.physReg() + k
}, v1
),
782 Operand(PhysReg
{tmp
+ k
}, v1
));
787 void emit_gfx10_wave64_bpermute(Program
*program
, aco_ptr
<Instruction
> &instr
, Builder
&bld
)
789 /* Emulates proper bpermute on GFX10 in wave64 mode.
791 * This is necessary because on GFX10 the bpermute instruction only works
792 * on half waves (you can think of it as having a cluster size of 32), so we
793 * manually swap the data between the two halves using two shared VGPRs.
796 assert(program
->chip_class
>= GFX10
);
797 assert(program
->info
->wave_size
== 64);
799 unsigned shared_vgpr_reg_0
= align(program
->config
->num_vgprs
, 4) + 256;
800 Definition dst
= instr
->definitions
[0];
801 Definition tmp_exec
= instr
->definitions
[1];
802 Definition clobber_scc
= instr
->definitions
[2];
803 Operand index_x4
= instr
->operands
[0];
804 Operand input_data
= instr
->operands
[1];
805 Operand same_half
= instr
->operands
[2];
807 assert(dst
.regClass() == v1
);
808 assert(tmp_exec
.regClass() == bld
.lm
);
809 assert(clobber_scc
.isFixed() && clobber_scc
.physReg() == scc
);
810 assert(same_half
.regClass() == bld
.lm
);
811 assert(index_x4
.regClass() == v1
);
812 assert(input_data
.regClass().type() == RegType::vgpr
);
813 assert(input_data
.bytes() <= 4);
814 assert(dst
.physReg() != index_x4
.physReg());
815 assert(dst
.physReg() != input_data
.physReg());
816 assert(tmp_exec
.physReg() != same_half
.physReg());
818 PhysReg
shared_vgpr_lo(shared_vgpr_reg_0
);
819 PhysReg
shared_vgpr_hi(shared_vgpr_reg_0
+ 1);
821 /* Permute the input within the same half-wave */
822 bld
.ds(aco_opcode::ds_bpermute_b32
, dst
, index_x4
, input_data
);
824 /* HI: Copy data from high lanes 32-63 to shared vgpr */
825 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(shared_vgpr_hi
, v1
), input_data
, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
827 bld
.sop1(aco_opcode::s_mov_b64
, tmp_exec
, Operand(exec
, s2
));
828 /* Set EXEC to enable LO lanes only */
829 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(0u));
830 /* LO: Copy data from low lanes 0-31 to shared vgpr */
831 bld
.vop1(aco_opcode::v_mov_b32
, Definition(shared_vgpr_lo
, v1
), input_data
);
832 /* LO: bpermute shared vgpr (high lanes' data) */
833 bld
.ds(aco_opcode::ds_bpermute_b32
, Definition(shared_vgpr_hi
, v1
), index_x4
, Operand(shared_vgpr_hi
, v1
));
834 /* Set EXEC to enable HI lanes only */
835 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
836 /* HI: bpermute shared vgpr (low lanes' data) */
837 bld
.ds(aco_opcode::ds_bpermute_b32
, Definition(shared_vgpr_lo
, v1
), index_x4
, Operand(shared_vgpr_lo
, v1
));
839 /* Only enable lanes which use the other half's data */
840 bld
.sop2(aco_opcode::s_andn2_b64
, Definition(exec
, s2
), clobber_scc
, Operand(tmp_exec
.physReg(), s2
), same_half
);
841 /* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */
842 bld
.vop1_dpp(aco_opcode::v_mov_b32
, dst
, Operand(shared_vgpr_hi
, v1
), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
843 /* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */
844 bld
.vop1_dpp(aco_opcode::v_mov_b32
, dst
, Operand(shared_vgpr_lo
, v1
), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
846 /* Restore saved EXEC */
847 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(tmp_exec
.physReg(), s2
));
849 /* RA assumes that the result is always in the low part of the register, so we have to shift, if it's not there already */
850 if (input_data
.physReg().byte()) {
851 unsigned right_shift
= input_data
.physReg().byte() * 8;
852 bld
.vop2(aco_opcode::v_lshrrev_b32
, dst
, Operand(right_shift
), Operand(dst
.physReg(), v1
));
856 struct copy_operation
{
862 uint64_t is_used
= 0;
866 void split_copy(unsigned offset
, Definition
*def
, Operand
*op
, const copy_operation
& src
, bool ignore_uses
, unsigned max_size
)
868 PhysReg def_reg
= src
.def
.physReg();
869 PhysReg op_reg
= src
.op
.physReg();
870 def_reg
.reg_b
+= offset
;
871 op_reg
.reg_b
+= offset
;
873 max_size
= MIN2(max_size
, src
.def
.regClass().type() == RegType::vgpr
? 4 : 8);
875 /* make sure the size is a power of two and reg % bytes == 0 */
877 for (; bytes
<= max_size
; bytes
*= 2) {
878 unsigned next
= bytes
* 2u;
879 bool can_increase
= def_reg
.reg_b
% next
== 0 &&
880 offset
+ next
<= src
.bytes
&& next
<= max_size
;
881 if (!src
.op
.isConstant() && can_increase
)
882 can_increase
= op_reg
.reg_b
% next
== 0;
883 for (unsigned i
= 0; !ignore_uses
&& can_increase
&& (i
< bytes
); i
++)
884 can_increase
= (src
.uses
[offset
+ bytes
+ i
] == 0) == (src
.uses
[offset
] == 0);
889 RegClass def_cls
= bytes
% 4 == 0 ? RegClass(src
.def
.regClass().type(), bytes
/ 4u) :
890 RegClass(src
.def
.regClass().type(), bytes
).as_subdword();
891 *def
= Definition(src
.def
.tempId(), def_reg
, def_cls
);
892 if (src
.op
.isConstant()) {
893 assert(offset
== 0 || (offset
== 4 && src
.op
.bytes() == 8));
894 if (src
.op
.bytes() == 8 && bytes
== 4)
895 *op
= Operand(uint32_t(src
.op
.constantValue64() >> (offset
* 8u)));
899 RegClass op_cls
= bytes
% 4 == 0 ? RegClass(src
.op
.regClass().type(), bytes
/ 4u) :
900 RegClass(src
.op
.regClass().type(), bytes
).as_subdword();
901 *op
= Operand(op_reg
, op_cls
);
902 op
->setTemp(Temp(src
.op
.tempId(), op_cls
));
906 uint32_t get_intersection_mask(int a_start
, int a_size
,
907 int b_start
, int b_size
)
909 int intersection_start
= MAX2(b_start
- a_start
, 0);
910 int intersection_end
= MAX2(b_start
+ b_size
- a_start
, 0);
911 if (intersection_start
>= a_size
|| intersection_end
== 0)
914 uint32_t mask
= u_bit_consecutive(0, a_size
);
915 return u_bit_consecutive(intersection_start
, intersection_end
- intersection_start
) & mask
;
918 bool do_copy(lower_context
* ctx
, Builder
& bld
, const copy_operation
& copy
, bool *preserve_scc
)
920 bool did_copy
= false;
921 for (unsigned offset
= 0; offset
< copy
.bytes
;) {
922 if (copy
.uses
[offset
]) {
929 split_copy(offset
, &def
, &op
, copy
, false, 8);
931 if (def
.physReg() == scc
) {
932 bld
.sopc(aco_opcode::s_cmp_lg_i32
, def
, op
, Operand(0u));
933 *preserve_scc
= true;
934 } else if (def
.bytes() == 8 && def
.getTemp().type() == RegType::sgpr
) {
935 bld
.sop1(aco_opcode::s_mov_b64
, def
, Operand(op
.physReg(), s2
));
940 ctx
->program
->statistics
[statistic_copies
]++;
943 offset
+= def
.bytes();
948 void do_swap(lower_context
*ctx
, Builder
& bld
, const copy_operation
& copy
, bool preserve_scc
, Pseudo_instruction
*pi
)
952 if (copy
.bytes
== 3 && (copy
.def
.physReg().reg_b
% 4 <= 1) &&
953 (copy
.def
.physReg().reg_b
% 4) == (copy
.op
.physReg().reg_b
% 4)) {
954 /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */
955 PhysReg op
= copy
.op
.physReg();
956 PhysReg def
= copy
.def
.physReg();
961 tmp
.op
= Operand(op
, v1
);
962 tmp
.def
= Definition(def
, v1
);
964 memset(tmp
.uses
, 1, 4);
965 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
967 op
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
968 def
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
969 tmp
.op
= Operand(op
, v1b
);
970 tmp
.def
= Definition(def
, v1b
);
973 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
978 for (; offset
< copy
.bytes
;) {
981 split_copy(offset
, &def
, &op
, copy
, true, 8);
983 assert(op
.regClass() == def
.regClass());
984 Operand def_as_op
= Operand(def
.physReg(), def
.regClass());
985 Definition op_as_def
= Definition(op
.physReg(), op
.regClass());
986 if (ctx
->program
->chip_class
>= GFX9
&& def
.regClass() == v1
) {
987 bld
.vop1(aco_opcode::v_swap_b32
, def
, op_as_def
, op
, def_as_op
);
988 ctx
->program
->statistics
[statistic_copies
]++;
989 } else if (def
.regClass() == v1
) {
990 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
991 bld
.vop2(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
992 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
993 ctx
->program
->statistics
[statistic_copies
] += 3;
994 } else if (op
.physReg() == scc
|| def
.physReg() == scc
) {
995 /* we need to swap scc and another sgpr */
996 assert(!preserve_scc
);
998 PhysReg other
= op
.physReg() == scc
? def
.physReg() : op
.physReg();
1000 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
1001 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(other
, s1
), Operand(0u));
1002 bld
.sop1(aco_opcode::s_mov_b32
, Definition(other
, s1
), Operand(pi
->scratch_sgpr
, s1
));
1003 ctx
->program
->statistics
[statistic_copies
] += 3;
1004 } else if (def
.regClass() == s1
) {
1006 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), op
);
1007 bld
.sop1(aco_opcode::s_mov_b32
, op_as_def
, def_as_op
);
1008 bld
.sop1(aco_opcode::s_mov_b32
, def
, Operand(pi
->scratch_sgpr
, s1
));
1010 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1011 bld
.sop2(aco_opcode::s_xor_b32
, def
, Definition(scc
, s1
), op
, def_as_op
);
1012 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1014 ctx
->program
->statistics
[statistic_copies
] += 3;
1015 } else if (def
.regClass() == s2
) {
1017 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
1018 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1019 bld
.sop2(aco_opcode::s_xor_b64
, def
, Definition(scc
, s1
), op
, def_as_op
);
1020 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1022 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(pi
->scratch_sgpr
, s1
), Operand(0u));
1023 ctx
->program
->statistics
[statistic_copies
] += 3;
1024 } else if (ctx
->program
->chip_class
>= GFX9
&& def
.bytes() == 2 && def
.physReg().reg() == op
.physReg().reg()) {
1025 aco_ptr
<VOP3P_instruction
> vop3p
{create_instruction
<VOP3P_instruction
>(aco_opcode::v_pk_add_u16
, Format::VOP3P
, 2, 1)};
1026 vop3p
->operands
[0] = Operand(PhysReg
{op
.physReg().reg()}, v1
);
1027 vop3p
->operands
[1] = Operand(0u);
1028 vop3p
->definitions
[0] = Definition(PhysReg
{op
.physReg().reg()}, v1
);
1029 vop3p
->opsel_lo
= 0x1;
1030 vop3p
->opsel_hi
= 0x2;
1031 bld
.insert(std::move(vop3p
));
1033 assert(def
.regClass().is_subdword());
1034 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
1035 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
1036 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
1037 ctx
->program
->statistics
[statistic_copies
] += 3;
1040 offset
+= def
.bytes();
1043 /* fixup in case we swapped bytes we shouldn't have */
1044 copy_operation tmp_copy
= copy
;
1045 tmp_copy
.op
.setFixed(copy
.def
.physReg());
1046 tmp_copy
.def
.setFixed(copy
.op
.physReg());
1047 do_copy(ctx
, bld
, tmp_copy
, &preserve_scc
);
1050 void handle_operands(std::map
<PhysReg
, copy_operation
>& copy_map
, lower_context
* ctx
, chip_class chip_class
, Pseudo_instruction
*pi
)
1052 Builder
bld(ctx
->program
, &ctx
->instructions
);
1053 aco_ptr
<Instruction
> mov
;
1054 std::map
<PhysReg
, copy_operation
>::iterator it
= copy_map
.begin();
1055 std::map
<PhysReg
, copy_operation
>::iterator target
;
1056 bool writes_scc
= false;
1058 /* count the number of uses for each dst reg */
1059 while (it
!= copy_map
.end()) {
1061 if (it
->second
.def
.physReg() == scc
)
1064 assert(!pi
->tmp_in_scc
|| !(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
1066 /* if src and dst reg are the same, remove operation */
1067 if (it
->first
== it
->second
.op
.physReg()) {
1068 it
= copy_map
.erase(it
);
1072 /* split large copies */
1073 if (it
->second
.bytes
> 8) {
1074 assert(!it
->second
.op
.isConstant());
1075 assert(!it
->second
.def
.regClass().is_subdword());
1076 RegClass rc
= RegClass(it
->second
.def
.regClass().type(), it
->second
.def
.size() - 2);
1077 Definition hi_def
= Definition(PhysReg
{it
->first
+ 2}, rc
);
1078 rc
= RegClass(it
->second
.op
.regClass().type(), it
->second
.op
.size() - 2);
1079 Operand hi_op
= Operand(PhysReg
{it
->second
.op
.physReg() + 2}, rc
);
1080 copy_operation copy
= {hi_op
, hi_def
, it
->second
.bytes
- 8};
1081 copy_map
[hi_def
.physReg()] = copy
;
1082 assert(it
->second
.op
.physReg().byte() == 0 && it
->second
.def
.physReg().byte() == 0);
1083 it
->second
.op
= Operand(it
->second
.op
.physReg(), it
->second
.op
.regClass().type() == RegType::sgpr
? s2
: v2
);
1084 it
->second
.def
= Definition(it
->second
.def
.physReg(), it
->second
.def
.regClass().type() == RegType::sgpr
? s2
: v2
);
1085 it
->second
.bytes
= 8;
1088 /* check if the definition reg is used by another copy operation */
1089 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
1090 if (copy
.second
.op
.isConstant())
1092 for (uint16_t i
= 0; i
< it
->second
.bytes
; i
++) {
1093 /* distance might underflow */
1094 unsigned distance
= it
->first
.reg_b
+ i
- copy
.second
.op
.physReg().reg_b
;
1095 if (distance
< copy
.second
.bytes
)
1096 it
->second
.uses
[i
] += 1;
1103 /* first, handle paths in the location transfer graph */
1104 bool preserve_scc
= pi
->tmp_in_scc
&& !writes_scc
;
1105 it
= copy_map
.begin();
1106 while (it
!= copy_map
.end()) {
1108 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
1109 if (it
->second
.is_used
== 0 &&
1110 it
->second
.def
.getTemp().type() == RegType::sgpr
&& it
->second
.bytes
== 4 &&
1111 !it
->second
.op
.isConstant() && it
->first
% 2 == it
->second
.op
.physReg() % 2) {
1113 PhysReg other_def_reg
= PhysReg
{it
->first
% 2 ? it
->first
- 1 : it
->first
+ 1};
1114 PhysReg other_op_reg
= PhysReg
{it
->first
% 2 ? it
->second
.op
.physReg() - 1 : it
->second
.op
.physReg() + 1};
1115 std::map
<PhysReg
, copy_operation
>::iterator other
= copy_map
.find(other_def_reg
);
1117 if (other
!= copy_map
.end() && !other
->second
.is_used
&& other
->second
.bytes
== 4 &&
1118 other
->second
.op
.physReg() == other_op_reg
&& !other
->second
.op
.isConstant()) {
1119 std::map
<PhysReg
, copy_operation
>::iterator to_erase
= it
->first
% 2 ? it
: other
;
1120 it
= it
->first
% 2 ? other
: it
;
1121 copy_map
.erase(to_erase
);
1122 it
->second
.bytes
= 8;
1125 // TODO: try to coalesce subdword copies
1127 /* find portions where the target reg is not used as operand for any other copy */
1128 if (it
->second
.is_used
) {
1129 if (it
->second
.op
.isConstant()) {
1130 /* we have to skip constants until is_used=0 */
1135 unsigned has_zero_use_bytes
= 0;
1136 for (unsigned i
= 0; i
< it
->second
.bytes
; i
++)
1137 has_zero_use_bytes
|= (it
->second
.uses
[i
] == 0) << i
;
1139 if (has_zero_use_bytes
) {
1140 /* Skipping partial copying and doing a v_swap_b32 and then fixup
1141 * copies is usually beneficial for sub-dword copies, but if doing
1142 * a partial copy allows further copies, it should be done instead. */
1143 bool partial_copy
= (has_zero_use_bytes
== 0xf) || (has_zero_use_bytes
== 0xf0);
1144 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
1147 for (uint16_t i
= 0; i
< copy
.second
.bytes
; i
++) {
1148 /* distance might underflow */
1149 unsigned distance
= copy
.first
.reg_b
+ i
- it
->second
.op
.physReg().reg_b
;
1150 if (distance
< it
->second
.bytes
&& copy
.second
.uses
[i
] == 1 &&
1151 !it
->second
.uses
[distance
])
1152 partial_copy
= true;
1156 if (!partial_copy
) {
1161 /* full target reg is used: register swapping needed */
1167 bool did_copy
= do_copy(ctx
, bld
, it
->second
, &preserve_scc
);
1169 std::pair
<PhysReg
, copy_operation
> copy
= *it
;
1171 if (it
->second
.is_used
== 0) {
1172 /* the target reg is not used as operand for any other copy, so we
1173 * copied to all of it */
1175 it
= copy_map
.begin();
1177 /* we only performed some portions of this copy, so split it to only
1178 * leave the portions that still need to be done */
1179 copy_operation original
= it
->second
; /* the map insertion below can overwrite this */
1181 for (unsigned offset
= 0; offset
< original
.bytes
;) {
1182 if (original
.uses
[offset
] == 0) {
1188 split_copy(offset
, &def
, &op
, original
, false, 8);
1190 copy_operation copy
= {op
, def
, def
.bytes()};
1191 for (unsigned i
= 0; i
< copy
.bytes
; i
++)
1192 copy
.uses
[i
] = original
.uses
[i
+ offset
];
1193 copy_map
[def
.physReg()] = copy
;
1195 offset
+= def
.bytes();
1198 it
= copy_map
.begin();
1201 /* Reduce the number of uses of the operand reg by one. Do this after
1202 * splitting the copy or removing it in case the copy writes to it's own
1203 * operand (for example, v[7:8] = v[8:9]) */
1204 if (did_copy
&& !copy
.second
.op
.isConstant()) {
1205 for (std::pair
<const PhysReg
, copy_operation
>& other
: copy_map
) {
1206 for (uint16_t i
= 0; i
< other
.second
.bytes
; i
++) {
1207 /* distance might underflow */
1208 unsigned distance
= other
.first
.reg_b
+ i
- copy
.second
.op
.physReg().reg_b
;
1209 if (distance
< copy
.second
.bytes
&& !copy
.second
.uses
[distance
])
1210 other
.second
.uses
[i
] -= 1;
1216 if (copy_map
.empty())
1219 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1220 unsigned largest
= 0;
1221 for (const std::pair
<PhysReg
, copy_operation
>& op
: copy_map
)
1222 largest
= MAX2(largest
, op
.second
.bytes
);
1224 while (!copy_map
.empty()) {
1226 /* Perform larger swaps first, because larger swaps swaps can make other
1227 * swaps unnecessary. */
1228 auto it
= copy_map
.begin();
1229 for (auto it2
= copy_map
.begin(); it2
!= copy_map
.end(); ++it2
) {
1230 if (it2
->second
.bytes
> it
->second
.bytes
) {
1232 if (it
->second
.bytes
== largest
)
1237 /* should already be done */
1238 assert(!it
->second
.op
.isConstant());
1240 assert(it
->second
.op
.isFixed());
1241 assert(it
->second
.def
.regClass() == it
->second
.op
.regClass());
1243 if (it
->first
== it
->second
.op
.physReg()) {
1248 if (preserve_scc
&& it
->second
.def
.getTemp().type() == RegType::sgpr
)
1249 assert(!(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
1251 /* to resolve the cycle, we have to swap the src reg with the dst reg */
1252 copy_operation swap
= it
->second
;
1254 /* if this is self-intersecting, we have to split it because
1255 * self-intersecting swaps don't make sense */
1256 PhysReg lower
= swap
.def
.physReg();
1257 PhysReg higher
= swap
.op
.physReg();
1258 if (lower
.reg_b
> higher
.reg_b
)
1259 std::swap(lower
, higher
);
1260 if (higher
.reg_b
- lower
.reg_b
< (int)swap
.bytes
) {
1261 unsigned offset
= higher
.reg_b
- lower
.reg_b
;
1262 RegType type
= swap
.def
.regClass().type();
1264 copy_operation middle
;
1265 lower
.reg_b
+= offset
;
1266 higher
.reg_b
+= offset
;
1267 middle
.bytes
= swap
.bytes
- offset
* 2;
1268 memcpy(middle
.uses
, swap
.uses
+ offset
, middle
.bytes
);
1269 middle
.op
= Operand(lower
, RegClass::get(type
, middle
.bytes
));
1270 middle
.def
= Definition(higher
, RegClass::get(type
, middle
.bytes
));
1271 copy_map
[higher
] = middle
;
1274 lower
.reg_b
+= middle
.bytes
;
1275 higher
.reg_b
+= middle
.bytes
;
1276 end
.bytes
= swap
.bytes
- (offset
+ middle
.bytes
);
1277 memcpy(end
.uses
, swap
.uses
+ offset
+ middle
.bytes
, end
.bytes
);
1278 end
.op
= Operand(lower
, RegClass::get(type
, end
.bytes
));
1279 end
.def
= Definition(higher
, RegClass::get(type
, end
.bytes
));
1280 copy_map
[higher
] = end
;
1282 memset(swap
.uses
+ offset
, 0, swap
.bytes
- offset
);
1283 swap
.bytes
= offset
;
1286 do_swap(ctx
, bld
, swap
, preserve_scc
, pi
);
1288 /* remove from map */
1291 /* change the operand reg of the target's uses and split uses if needed */
1292 target
= copy_map
.begin();
1293 uint32_t bytes_left
= u_bit_consecutive(0, swap
.bytes
);
1294 for (; target
!= copy_map
.end(); ++target
) {
1295 if (target
->second
.op
.physReg() == swap
.def
.physReg() && swap
.bytes
== target
->second
.bytes
) {
1296 target
->second
.op
.setFixed(swap
.op
.physReg());
1300 uint32_t imask
= get_intersection_mask(swap
.def
.physReg().reg_b
, swap
.bytes
,
1301 target
->second
.op
.physReg().reg_b
, target
->second
.bytes
);
1306 assert(target
->second
.bytes
< swap
.bytes
);
1308 int offset
= (int)target
->second
.op
.physReg().reg_b
- (int)swap
.def
.physReg().reg_b
;
1310 /* split and update the middle (the portion that reads the swap's
1311 * definition) to read the swap's operand instead */
1312 int target_op_end
= target
->second
.op
.physReg().reg_b
+ target
->second
.bytes
;
1313 int swap_def_end
= swap
.def
.physReg().reg_b
+ swap
.bytes
;
1314 int before_bytes
= MAX2(-offset
, 0);
1315 int after_bytes
= MAX2(target_op_end
- swap_def_end
, 0);
1316 int middle_bytes
= target
->second
.bytes
- before_bytes
- after_bytes
;
1319 unsigned after_offset
= before_bytes
+ middle_bytes
;
1320 assert(after_offset
> 0);
1321 copy_operation copy
;
1322 copy
.bytes
= after_bytes
;
1323 memcpy(copy
.uses
, target
->second
.uses
+ after_offset
, copy
.bytes
);
1324 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), after_bytes
);
1325 copy
.op
= Operand(target
->second
.op
.physReg().advance(after_offset
), rc
);
1326 copy
.def
= Definition(target
->second
.def
.physReg().advance(after_offset
), rc
);
1327 copy_map
[copy
.def
.physReg()] = copy
;
1331 copy_operation copy
;
1332 copy
.bytes
= middle_bytes
;
1333 memcpy(copy
.uses
, target
->second
.uses
+ before_bytes
, copy
.bytes
);
1334 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), middle_bytes
);
1335 copy
.op
= Operand(swap
.op
.physReg().advance(MAX2(offset
, 0)), rc
);
1336 copy
.def
= Definition(target
->second
.def
.physReg().advance(before_bytes
), rc
);
1337 copy_map
[copy
.def
.physReg()] = copy
;
1341 copy_operation copy
;
1342 target
->second
.bytes
= before_bytes
;
1343 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), before_bytes
);
1344 target
->second
.op
= Operand(target
->second
.op
.physReg(), rc
);
1345 target
->second
.def
= Definition(target
->second
.def
.physReg(), rc
);
1346 memset(target
->second
.uses
+ target
->second
.bytes
, 0, 8 - target
->second
.bytes
);
1349 /* break early since we know each byte of the swap's definition is used
1351 bytes_left
&= ~imask
;
1358 void lower_to_hw_instr(Program
* program
)
1360 Block
*discard_block
= NULL
;
1362 for (size_t i
= 0; i
< program
->blocks
.size(); i
++)
1364 Block
*block
= &program
->blocks
[i
];
1366 ctx
.program
= program
;
1367 Builder
bld(program
, &ctx
.instructions
);
1369 bool set_mode
= i
== 0 && block
->fp_mode
.val
!= program
->config
->float_mode
;
1370 for (unsigned pred
: block
->linear_preds
) {
1371 if (program
->blocks
[pred
].fp_mode
.val
!= block
->fp_mode
.val
) {
1377 /* only allow changing modes at top-level blocks so this doesn't break
1378 * the "jump over empty blocks" optimization */
1379 assert(block
->kind
& block_kind_top_level
);
1380 uint32_t mode
= block
->fp_mode
.val
;
1381 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1382 bld
.sopk(aco_opcode::s_setreg_imm32_b32
, Operand(mode
), (7 << 11) | 1);
1385 for (size_t j
= 0; j
< block
->instructions
.size(); j
++) {
1386 aco_ptr
<Instruction
>& instr
= block
->instructions
[j
];
1387 aco_ptr
<Instruction
> mov
;
1388 if (instr
->format
== Format::PSEUDO
) {
1389 Pseudo_instruction
*pi
= (Pseudo_instruction
*)instr
.get();
1391 switch (instr
->opcode
)
1393 case aco_opcode::p_extract_vector
:
1395 PhysReg reg
= instr
->operands
[0].physReg();
1396 Definition
& def
= instr
->definitions
[0];
1397 reg
.reg_b
+= instr
->operands
[1].constantValue() * def
.bytes();
1399 if (reg
== def
.physReg())
1402 RegClass op_rc
= def
.regClass().is_subdword() ? def
.regClass() :
1403 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1404 std::map
<PhysReg
, copy_operation
> copy_operations
;
1405 copy_operations
[def
.physReg()] = {Operand(reg
, op_rc
), def
, def
.bytes()};
1406 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1409 case aco_opcode::p_create_vector
:
1411 std::map
<PhysReg
, copy_operation
> copy_operations
;
1412 PhysReg reg
= instr
->definitions
[0].physReg();
1414 for (const Operand
& op
: instr
->operands
) {
1415 if (op
.isConstant()) {
1416 const Definition def
= Definition(reg
, RegClass(instr
->definitions
[0].getTemp().type(), op
.size()));
1417 copy_operations
[reg
] = {op
, def
, op
.bytes()};
1418 reg
.reg_b
+= op
.bytes();
1421 if (op
.isUndefined()) {
1422 // TODO: coalesce subdword copies if dst byte is 0
1423 reg
.reg_b
+= op
.bytes();
1427 RegClass rc_def
= op
.regClass().is_subdword() ? op
.regClass() :
1428 RegClass(instr
->definitions
[0].getTemp().type(), op
.size());
1429 const Definition def
= Definition(reg
, rc_def
);
1430 copy_operations
[def
.physReg()] = {op
, def
, op
.bytes()};
1431 reg
.reg_b
+= op
.bytes();
1433 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1436 case aco_opcode::p_split_vector
:
1438 std::map
<PhysReg
, copy_operation
> copy_operations
;
1439 PhysReg reg
= instr
->operands
[0].physReg();
1441 for (const Definition
& def
: instr
->definitions
) {
1442 RegClass rc_op
= def
.regClass().is_subdword() ? def
.regClass() :
1443 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1444 const Operand op
= Operand(reg
, rc_op
);
1445 copy_operations
[def
.physReg()] = {op
, def
, def
.bytes()};
1446 reg
.reg_b
+= def
.bytes();
1448 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1451 case aco_opcode::p_parallelcopy
:
1452 case aco_opcode::p_wqm
:
1454 std::map
<PhysReg
, copy_operation
> copy_operations
;
1455 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++) {
1456 assert(instr
->definitions
[i
].bytes() == instr
->operands
[i
].bytes());
1457 copy_operations
[instr
->definitions
[i
].physReg()] = {instr
->operands
[i
], instr
->definitions
[i
], instr
->operands
[i
].bytes()};
1459 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1462 case aco_opcode::p_exit_early_if
:
1464 /* don't bother with an early exit near the end of the program */
1465 if ((block
->instructions
.size() - 1 - j
) <= 4 &&
1466 block
->instructions
.back()->opcode
== aco_opcode::s_endpgm
) {
1467 unsigned null_exp_dest
= (ctx
.program
->stage
& hw_fs
) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS
;
1468 bool ignore_early_exit
= true;
1470 for (unsigned k
= j
+ 1; k
< block
->instructions
.size(); ++k
) {
1471 const aco_ptr
<Instruction
> &instr
= block
->instructions
[k
];
1472 if (instr
->opcode
== aco_opcode::s_endpgm
||
1473 instr
->opcode
== aco_opcode::p_logical_end
)
1475 else if (instr
->opcode
== aco_opcode::exp
&&
1476 static_cast<Export_instruction
*>(instr
.get())->dest
== null_exp_dest
)
1478 else if (instr
->opcode
== aco_opcode::p_parallelcopy
&&
1479 instr
->definitions
[0].isFixed() &&
1480 instr
->definitions
[0].physReg() == exec
)
1483 ignore_early_exit
= false;
1486 if (ignore_early_exit
)
1490 if (!discard_block
) {
1491 discard_block
= program
->create_and_insert_block();
1492 block
= &program
->blocks
[i
];
1494 bld
.reset(discard_block
);
1495 bld
.exp(aco_opcode::exp
, Operand(v1
), Operand(v1
), Operand(v1
), Operand(v1
),
1496 0, V_008DFC_SQ_EXP_NULL
, false, true, true);
1497 if (program
->wb_smem_l1_on_end
)
1498 bld
.smem(aco_opcode::s_dcache_wb
);
1499 bld
.sopp(aco_opcode::s_endpgm
);
1501 bld
.reset(&ctx
.instructions
);
1504 //TODO: exec can be zero here with block_kind_discard
1506 assert(instr
->operands
[0].physReg() == scc
);
1507 bld
.sopp(aco_opcode::s_cbranch_scc0
, instr
->operands
[0], discard_block
->index
);
1509 discard_block
->linear_preds
.push_back(block
->index
);
1510 block
->linear_succs
.push_back(discard_block
->index
);
1513 case aco_opcode::p_spill
:
1515 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1516 for (unsigned i
= 0; i
< instr
->operands
[2].size(); i
++)
1517 bld
.writelane(bld
.def(v1
, instr
->operands
[0].physReg()),
1518 Operand(PhysReg
{instr
->operands
[2].physReg() + i
}, s1
),
1519 Operand(instr
->operands
[1].constantValue() + i
),
1520 instr
->operands
[0]);
1523 case aco_opcode::p_reload
:
1525 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1526 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++)
1527 bld
.readlane(bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1529 Operand(instr
->operands
[1].constantValue() + i
));
1532 case aco_opcode::p_as_uniform
:
1534 if (instr
->operands
[0].isConstant() || instr
->operands
[0].regClass().type() == RegType::sgpr
) {
1535 std::map
<PhysReg
, copy_operation
> copy_operations
;
1536 copy_operations
[instr
->definitions
[0].physReg()] = {instr
->operands
[0], instr
->definitions
[0], instr
->definitions
[0].bytes()};
1537 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1539 assert(instr
->operands
[0].regClass().type() == RegType::vgpr
);
1540 assert(instr
->definitions
[0].regClass().type() == RegType::sgpr
);
1541 assert(instr
->operands
[0].size() == instr
->definitions
[0].size());
1542 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
1543 bld
.vop1(aco_opcode::v_readfirstlane_b32
,
1544 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1545 Operand(PhysReg
{instr
->operands
[0].physReg() + i
}, v1
));
1550 case aco_opcode::p_bpermute
:
1552 if (ctx
.program
->chip_class
<= GFX7
)
1553 unreachable("Not implemented yet on GFX6-7"); /* TODO */
1554 else if (ctx
.program
->chip_class
== GFX10
&& ctx
.program
->wave_size
== 64)
1555 emit_gfx10_wave64_bpermute(program
, instr
, bld
);
1557 unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute.");
1562 } else if (instr
->format
== Format::PSEUDO_BRANCH
) {
1563 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(instr
.get());
1564 /* check if all blocks from current to target are empty */
1565 bool can_remove
= block
->index
< branch
->target
[0];
1566 for (unsigned i
= block
->index
+ 1; can_remove
&& i
< branch
->target
[0]; i
++) {
1567 if (program
->blocks
[i
].instructions
.size())
1573 switch (instr
->opcode
) {
1574 case aco_opcode::p_branch
:
1575 assert(block
->linear_succs
[0] == branch
->target
[0]);
1576 bld
.sopp(aco_opcode::s_branch
, branch
->target
[0]);
1578 case aco_opcode::p_cbranch_nz
:
1579 assert(block
->linear_succs
[1] == branch
->target
[0]);
1580 if (branch
->operands
[0].physReg() == exec
)
1581 bld
.sopp(aco_opcode::s_cbranch_execnz
, branch
->target
[0]);
1582 else if (branch
->operands
[0].physReg() == vcc
)
1583 bld
.sopp(aco_opcode::s_cbranch_vccnz
, branch
->target
[0]);
1585 assert(branch
->operands
[0].physReg() == scc
);
1586 bld
.sopp(aco_opcode::s_cbranch_scc1
, branch
->target
[0]);
1589 case aco_opcode::p_cbranch_z
:
1590 assert(block
->linear_succs
[1] == branch
->target
[0]);
1591 if (branch
->operands
[0].physReg() == exec
)
1592 bld
.sopp(aco_opcode::s_cbranch_execz
, branch
->target
[0]);
1593 else if (branch
->operands
[0].physReg() == vcc
)
1594 bld
.sopp(aco_opcode::s_cbranch_vccz
, branch
->target
[0]);
1596 assert(branch
->operands
[0].physReg() == scc
);
1597 bld
.sopp(aco_opcode::s_cbranch_scc0
, branch
->target
[0]);
1601 unreachable("Unknown Pseudo branch instruction!");
1604 } else if (instr
->format
== Format::PSEUDO_REDUCTION
) {
1605 Pseudo_reduction_instruction
* reduce
= static_cast<Pseudo_reduction_instruction
*>(instr
.get());
1606 emit_reduction(&ctx
, reduce
->opcode
, reduce
->reduce_op
, reduce
->cluster_size
,
1607 reduce
->operands
[1].physReg(), // tmp
1608 reduce
->definitions
[1].physReg(), // stmp
1609 reduce
->operands
[2].physReg(), // vtmp
1610 reduce
->definitions
[2].physReg(), // sitmp
1611 reduce
->operands
[0], reduce
->definitions
[0]);
1613 ctx
.instructions
.emplace_back(std::move(instr
));
1617 block
->instructions
.swap(ctx
.instructions
);