2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
31 #include "aco_builder.h"
32 #include "util/u_math.h"
34 #include "vulkan/radv_shader.h"
39 struct lower_context
{
41 std::vector
<aco_ptr
<Instruction
>> instructions
;
44 aco_opcode
get_reduce_opcode(chip_class chip
, ReduceOp op
) {
45 /* Because some 16-bit instructions are already VOP3 on GFX10, we use the
46 * 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use
47 * DPP with the arithmetic instructions. This requires to sign-extend.
53 return aco_opcode::v_add_u32
;
54 } else if (chip
>= GFX8
) {
55 return aco_opcode::v_add_u16
;
57 return aco_opcode::v_add_co_u32
;
63 return aco_opcode::v_mul_lo_u16_e64
;
64 } else if (chip
>= GFX8
) {
65 return aco_opcode::v_mul_lo_u16
;
67 return aco_opcode::v_mul_u32_u24
;
70 case fadd16
: return aco_opcode::v_add_f16
;
71 case fmul16
: return aco_opcode::v_mul_f16
;
75 return aco_opcode::v_max_i32
;
76 } else if (chip
>= GFX8
) {
77 return aco_opcode::v_max_i16
;
79 return aco_opcode::v_max_i32
;
85 return aco_opcode::v_min_i32
;
86 } else if (chip
>= GFX8
) {
87 return aco_opcode::v_min_i16
;
89 return aco_opcode::v_min_i32
;
95 return aco_opcode::v_min_u32
;
96 } else if (chip
>= GFX8
) {
97 return aco_opcode::v_min_u16
;
99 return aco_opcode::v_min_u32
;
105 return aco_opcode::v_max_u32
;
106 } else if (chip
>= GFX8
) {
107 return aco_opcode::v_max_u16
;
109 return aco_opcode::v_max_u32
;
112 case fmin16
: return aco_opcode::v_min_f16
;
113 case fmax16
: return aco_opcode::v_max_f16
;
114 case iadd32
: return chip
>= GFX9
? aco_opcode::v_add_u32
: aco_opcode::v_add_co_u32
;
115 case imul32
: return aco_opcode::v_mul_lo_u32
;
116 case fadd32
: return aco_opcode::v_add_f32
;
117 case fmul32
: return aco_opcode::v_mul_f32
;
118 case imax32
: return aco_opcode::v_max_i32
;
119 case imin32
: return aco_opcode::v_min_i32
;
120 case umin32
: return aco_opcode::v_min_u32
;
121 case umax32
: return aco_opcode::v_max_u32
;
122 case fmin32
: return aco_opcode::v_min_f32
;
123 case fmax32
: return aco_opcode::v_max_f32
;
126 case iand32
: return aco_opcode::v_and_b32
;
129 case ixor32
: return aco_opcode::v_xor_b32
;
132 case ior32
: return aco_opcode::v_or_b32
;
133 case iadd64
: return aco_opcode::num_opcodes
;
134 case imul64
: return aco_opcode::num_opcodes
;
135 case fadd64
: return aco_opcode::v_add_f64
;
136 case fmul64
: return aco_opcode::v_mul_f64
;
137 case imin64
: return aco_opcode::num_opcodes
;
138 case imax64
: return aco_opcode::num_opcodes
;
139 case umin64
: return aco_opcode::num_opcodes
;
140 case umax64
: return aco_opcode::num_opcodes
;
141 case fmin64
: return aco_opcode::v_min_f64
;
142 case fmax64
: return aco_opcode::v_max_f64
;
143 case iand64
: return aco_opcode::num_opcodes
;
144 case ior64
: return aco_opcode::num_opcodes
;
145 case ixor64
: return aco_opcode::num_opcodes
;
146 default: return aco_opcode::num_opcodes
;
150 bool is_vop3_reduce_opcode(aco_opcode opcode
)
152 /* 64-bit reductions are VOP3. */
153 if (opcode
== aco_opcode::num_opcodes
)
156 return instr_info
.format
[(int)opcode
] == Format::VOP3
;
159 void emit_vadd32(Builder
& bld
, Definition def
, Operand src0
, Operand src1
)
161 Instruction
*instr
= bld
.vadd32(def
, src0
, src1
, false, Operand(s2
), true);
162 if (instr
->definitions
.size() >= 2) {
163 assert(instr
->definitions
[1].regClass() == bld
.lm
);
164 instr
->definitions
[1].setFixed(vcc
);
168 void emit_int64_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
169 PhysReg vtmp_reg
, ReduceOp op
,
170 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
171 Operand
*identity
=NULL
)
173 Builder
bld(ctx
->program
, &ctx
->instructions
);
174 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
175 Definition vtmp_def
[] = {Definition(vtmp_reg
, v1
), Definition(PhysReg
{vtmp_reg
+1}, v1
)};
176 Operand src0
[] = {Operand(src0_reg
, v1
), Operand(PhysReg
{src0_reg
+1}, v1
)};
177 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
178 Operand src1_64
= Operand(src1_reg
, v2
);
179 Operand vtmp_op
[] = {Operand(vtmp_reg
, v1
), Operand(PhysReg
{vtmp_reg
+1}, v1
)};
180 Operand vtmp_op64
= Operand(vtmp_reg
, v2
);
182 if (ctx
->program
->chip_class
>= GFX10
) {
184 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
185 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
186 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
187 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), vtmp_op
[0], src1
[0]);
189 bld
.vop2_dpp(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0],
190 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
192 bld
.vop2_dpp(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
),
193 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
194 } else if (op
== iand64
) {
195 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0],
196 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
197 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1],
198 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
199 } else if (op
== ior64
) {
200 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0],
201 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
202 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1],
203 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
204 } else if (op
== ixor64
) {
205 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0],
206 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
207 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1],
208 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
209 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
210 aco_opcode cmp
= aco_opcode::num_opcodes
;
213 cmp
= aco_opcode::v_cmp_gt_u64
;
216 cmp
= aco_opcode::v_cmp_lt_u64
;
219 cmp
= aco_opcode::v_cmp_gt_i64
;
222 cmp
= aco_opcode::v_cmp_lt_i64
;
229 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
230 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[1], identity
[1]);
232 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
233 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
234 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[1], src0
[1],
235 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
237 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), vtmp_op64
, src1_64
);
238 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], vtmp_op
[0], src1
[0], Operand(vcc
, bld
.lm
));
239 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], vtmp_op
[1], src1
[1], Operand(vcc
, bld
.lm
));
240 } else if (op
== imul64
) {
242 * t1 = umul_lo(t4, y_lo)
244 * t0 = umul_lo(t3, y_hi)
246 * t5 = umul_hi(t3, y_lo)
247 * res_hi = iadd(t2, t5)
248 * res_lo = umul_lo(t3, y_lo)
249 * Requires that res_hi != src0[0] and res_hi != src1[0]
250 * and that vtmp[0] != res_hi.
253 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[1]);
254 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[1],
255 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
256 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[1], vtmp_op
[0], src1
[0]);
258 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
259 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
260 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
261 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[0], vtmp_op
[0], src1
[1]);
262 emit_vadd32(bld
, vtmp_def
[1], vtmp_op
[0], vtmp_op
[1]);
264 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
265 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
266 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
267 bld
.vop3(aco_opcode::v_mul_hi_u32
, vtmp_def
[0], vtmp_op
[0], src1
[0]);
268 emit_vadd32(bld
, dst
[1], vtmp_op
[1], vtmp_op
[0]);
270 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
271 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
272 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
273 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], vtmp_op
[0], src1
[0]);
277 void emit_int64_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
, PhysReg vtmp
, ReduceOp op
)
279 Builder
bld(ctx
->program
, &ctx
->instructions
);
280 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
281 RegClass src0_rc
= src0_reg
.reg() >= 256 ? v1
: s1
;
282 Operand src0
[] = {Operand(src0_reg
, src0_rc
), Operand(PhysReg
{src0_reg
+1}, src0_rc
)};
283 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
284 Operand src0_64
= Operand(src0_reg
, src0_reg
.reg() >= 256 ? v2
: s2
);
285 Operand src1_64
= Operand(src1_reg
, v2
);
288 (op
== imul64
|| op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
)) {
289 assert(vtmp
.reg() != 0);
290 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), src0
[0]);
291 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
293 src0
[0] = Operand(vtmp
, v1
);
294 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
295 src0_64
= Operand(vtmp
, v2
);
296 } else if (src0_rc
== s1
&& op
== iadd64
) {
297 assert(vtmp
.reg() != 0);
298 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
299 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
303 if (ctx
->program
->chip_class
>= GFX10
) {
304 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
306 bld
.vop2(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
308 bld
.vop2(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
309 } else if (op
== iand64
) {
310 bld
.vop2(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0]);
311 bld
.vop2(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1]);
312 } else if (op
== ior64
) {
313 bld
.vop2(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0]);
314 bld
.vop2(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1]);
315 } else if (op
== ixor64
) {
316 bld
.vop2(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0]);
317 bld
.vop2(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1]);
318 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
319 aco_opcode cmp
= aco_opcode::num_opcodes
;
322 cmp
= aco_opcode::v_cmp_gt_u64
;
325 cmp
= aco_opcode::v_cmp_lt_u64
;
328 cmp
= aco_opcode::v_cmp_gt_i64
;
331 cmp
= aco_opcode::v_cmp_lt_i64
;
337 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), src0_64
, src1_64
);
338 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], src0
[0], src1
[0], Operand(vcc
, bld
.lm
));
339 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
340 } else if (op
== imul64
) {
341 if (src1_reg
== dst_reg
) {
342 /* it's fine if src0==dst but not if src1==dst */
343 std::swap(src0_reg
, src1_reg
);
344 std::swap(src0
[0], src1
[0]);
345 std::swap(src0
[1], src1
[1]);
346 std::swap(src0_64
, src1_64
);
348 assert(!(src0_reg
== src1_reg
));
349 /* t1 = umul_lo(x_hi, y_lo)
350 * t0 = umul_lo(x_lo, y_hi)
352 * t5 = umul_hi(x_lo, y_lo)
353 * res_hi = iadd(t2, t5)
354 * res_lo = umul_lo(x_lo, y_lo)
355 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
357 Definition
tmp0_def(PhysReg
{src0_reg
+1}, v1
);
358 Definition
tmp1_def(PhysReg
{src1_reg
+1}, v1
);
359 Operand tmp0_op
= src0
[1];
360 Operand tmp1_op
= src1
[1];
361 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp0_def
, src0
[1], src1
[0]);
362 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp1_def
, src0
[0], src1
[1]);
363 emit_vadd32(bld
, tmp0_def
, tmp1_op
, tmp0_op
);
364 bld
.vop3(aco_opcode::v_mul_hi_u32
, tmp1_def
, src0
[0], src1
[0]);
365 emit_vadd32(bld
, dst
[1], tmp0_op
, tmp1_op
);
366 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], src0
[0], src1
[0]);
370 void emit_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
371 PhysReg vtmp
, ReduceOp op
, unsigned size
,
372 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
373 Operand
*identity
=NULL
) /* for VOP3 with sparse writes */
375 Builder
bld(ctx
->program
, &ctx
->instructions
);
376 RegClass rc
= RegClass(RegType::vgpr
, size
);
377 Definition
dst(dst_reg
, rc
);
378 Operand
src0(src0_reg
, rc
);
379 Operand
src1(src1_reg
, rc
);
381 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
382 bool vop3
= is_vop3_reduce_opcode(opcode
);
385 if (opcode
== aco_opcode::v_add_co_u32
)
386 bld
.vop2_dpp(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
388 bld
.vop2_dpp(opcode
, dst
, src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
392 if (opcode
== aco_opcode::num_opcodes
) {
393 emit_int64_dpp_op(ctx
, dst_reg
,src0_reg
, src1_reg
, vtmp
, op
,
394 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
, identity
);
399 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), identity
[0]);
400 if (identity
&& size
>= 2)
401 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), identity
[1]);
403 for (unsigned i
= 0; i
< size
; i
++)
404 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{src0_reg
+i
}, v1
),
405 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
407 bld
.vop3(opcode
, dst
, Operand(vtmp
, rc
), src1
);
410 void emit_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
411 PhysReg vtmp
, ReduceOp op
, unsigned size
)
413 Builder
bld(ctx
->program
, &ctx
->instructions
);
414 RegClass rc
= RegClass(RegType::vgpr
, size
);
415 Definition
dst(dst_reg
, rc
);
416 Operand
src0(src0_reg
, RegClass(src0_reg
.reg() >= 256 ? RegType::vgpr
: RegType::sgpr
, size
));
417 Operand
src1(src1_reg
, rc
);
419 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
420 bool vop3
= is_vop3_reduce_opcode(opcode
);
422 if (opcode
== aco_opcode::num_opcodes
) {
423 emit_int64_op(ctx
, dst_reg
, src0_reg
, src1_reg
, vtmp
, op
);
428 bld
.vop3(opcode
, dst
, src0
, src1
);
429 } else if (opcode
== aco_opcode::v_add_co_u32
) {
430 bld
.vop2(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
);
432 bld
.vop2(opcode
, dst
, src0
, src1
);
436 void emit_dpp_mov(lower_context
*ctx
, PhysReg dst
, PhysReg src0
, unsigned size
,
437 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
)
439 Builder
bld(ctx
->program
, &ctx
->instructions
);
440 for (unsigned i
= 0; i
< size
; i
++) {
441 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
+i
}, v1
), Operand(PhysReg
{src0
+i
}, v1
),
442 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
446 uint32_t get_reduction_identity(ReduceOp op
, unsigned idx
)
475 return 0x3c00u
; /* 1.0 */
477 return 0x3f800000u
; /* 1.0 */
479 return idx
? 0x3ff00000u
: 0u; /* 1.0 */
487 return idx
? 0x7fffffffu
: 0xffffffffu
;
495 return idx
? 0x80000000u
: 0;
507 return 0x7c00u
; /* infinity */
509 return 0x7f800000u
; /* infinity */
511 return idx
? 0x7ff00000u
: 0u; /* infinity */
513 return 0xfc00u
; /* negative infinity */
515 return 0xff800000u
; /* negative infinity */
517 return idx
? 0xfff00000u
: 0u; /* negative infinity */
519 unreachable("Invalid reduction operation");
525 void emit_ds_swizzle(Builder bld
, PhysReg dst
, PhysReg src
, unsigned size
, unsigned ds_pattern
)
527 for (unsigned i
= 0; i
< size
; i
++) {
528 bld
.ds(aco_opcode::ds_swizzle_b32
, Definition(PhysReg
{dst
+i
}, v1
),
529 Operand(PhysReg
{src
+i
}, v1
), ds_pattern
);
533 void emit_reduction(lower_context
*ctx
, aco_opcode op
, ReduceOp reduce_op
, unsigned cluster_size
, PhysReg tmp
,
534 PhysReg stmp
, PhysReg vtmp
, PhysReg sitmp
, Operand src
, Definition dst
)
536 assert(cluster_size
== ctx
->program
->wave_size
|| op
== aco_opcode::p_reduce
);
537 assert(cluster_size
<= ctx
->program
->wave_size
);
539 Builder
bld(ctx
->program
, &ctx
->instructions
);
542 identity
[0] = Operand(get_reduction_identity(reduce_op
, 0));
543 identity
[1] = Operand(get_reduction_identity(reduce_op
, 1));
544 Operand vcndmask_identity
[2] = {identity
[0], identity
[1]};
546 /* First, copy the source to tmp and set inactive lanes to the identity */
547 bld
.sop1(Builder::s_or_saveexec
, Definition(stmp
, bld
.lm
), Definition(scc
, s1
), Definition(exec
, bld
.lm
), Operand(UINT64_MAX
), Operand(exec
, bld
.lm
));
549 for (unsigned i
= 0; i
< src
.size(); i
++) {
550 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
551 * except on GFX10, where v_writelane_b32 can take a literal. */
552 if (identity
[i
].isLiteral() && op
== aco_opcode::p_exclusive_scan
&& ctx
->program
->chip_class
< GFX10
) {
553 bld
.sop1(aco_opcode::s_mov_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), identity
[i
]);
554 identity
[i
] = Operand(PhysReg
{sitmp
+i
}, s1
);
556 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
557 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
558 } else if (identity
[i
].isLiteral()) {
559 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
560 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
564 for (unsigned i
= 0; i
< src
.size(); i
++) {
565 bld
.vop2_e64(aco_opcode::v_cndmask_b32
, Definition(PhysReg
{tmp
+ i
}, v1
),
566 vcndmask_identity
[i
], Operand(PhysReg
{src
.physReg() + i
}, v1
),
567 Operand(stmp
, bld
.lm
));
570 if (src
.regClass() == v1b
) {
571 aco_ptr
<SDWA_instruction
> sdwa
{create_instruction
<SDWA_instruction
>(aco_opcode::v_mov_b32
, asSDWA(Format::VOP1
), 1, 1)};
572 sdwa
->operands
[0] = Operand(PhysReg
{tmp
}, v1
);
573 sdwa
->definitions
[0] = Definition(PhysReg
{tmp
}, v1
);
574 if (reduce_op
== imin8
|| reduce_op
== imax8
)
575 sdwa
->sel
[0] = sdwa_sbyte
;
577 sdwa
->sel
[0] = sdwa_ubyte
;
578 sdwa
->dst_sel
= sdwa_udword
;
579 bld
.insert(std::move(sdwa
));
580 } else if (src
.regClass() == v2b
) {
581 if (ctx
->program
->chip_class
>= GFX10
&&
582 (reduce_op
== iadd16
|| reduce_op
== imax16
||
583 reduce_op
== imin16
|| reduce_op
== umin16
|| reduce_op
== umax16
)) {
584 aco_ptr
<SDWA_instruction
> sdwa
{create_instruction
<SDWA_instruction
>(aco_opcode::v_mov_b32
, asSDWA(Format::VOP1
), 1, 1)};
585 sdwa
->operands
[0] = Operand(PhysReg
{tmp
}, v1
);
586 sdwa
->definitions
[0] = Definition(PhysReg
{tmp
}, v1
);
587 if (reduce_op
== imin16
|| reduce_op
== imax16
|| reduce_op
== iadd16
)
588 sdwa
->sel
[0] = sdwa_sword
;
590 sdwa
->sel
[0] = sdwa_uword
;
591 sdwa
->dst_sel
= sdwa_udword
;
592 bld
.insert(std::move(sdwa
));
593 } else if (ctx
->program
->chip_class
== GFX6
|| ctx
->program
->chip_class
== GFX7
) {
594 bld
.vop3(aco_opcode::v_bfe_i32
, Definition(PhysReg
{tmp
}, v1
),
595 Operand(PhysReg
{tmp
}, v1
), Operand(0u), Operand(16u));
599 bool reduction_needs_last_op
= false;
601 case aco_opcode::p_reduce
:
602 if (cluster_size
== 1) break;
604 if (ctx
->program
->chip_class
<= GFX7
) {
605 reduction_needs_last_op
= true;
606 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
607 if (cluster_size
== 2) break;
608 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
609 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
610 if (cluster_size
== 4) break;
611 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
612 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
613 if (cluster_size
== 8) break;
614 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
615 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
616 if (cluster_size
== 16) break;
617 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
618 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
619 if (cluster_size
== 32) break;
620 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
621 for (unsigned i
= 0; i
< src
.size(); i
++)
622 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+ i
}, v1
), Operand(0u));
623 // TODO: it would be more effective to do the last reduction step on SALU
624 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
625 reduction_needs_last_op
= false;
629 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
630 if (cluster_size
== 2) break;
631 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
632 if (cluster_size
== 4) break;
633 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_half_mirror
, 0xf, 0xf, false);
634 if (cluster_size
== 8) break;
635 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_mirror
, 0xf, 0xf, false);
636 if (cluster_size
== 16) break;
638 if (ctx
->program
->chip_class
>= GFX10
) {
639 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
640 for (unsigned i
= 0; i
< src
.size(); i
++)
641 bld
.vop3(aco_opcode::v_permlanex16_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u), Operand(0u));
643 if (cluster_size
== 32) {
644 reduction_needs_last_op
= true;
648 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
649 for (unsigned i
= 0; i
< src
.size(); i
++)
650 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
651 // TODO: it would be more effective to do the last reduction step on SALU
652 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
656 if (cluster_size
== 32) {
657 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
658 reduction_needs_last_op
= true;
661 assert(cluster_size
== 64);
662 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast15
, 0xa, 0xf, false);
663 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast31
, 0xc, 0xf, false);
665 case aco_opcode::p_exclusive_scan
:
666 if (ctx
->program
->chip_class
>= GFX10
) { /* gfx10 doesn't support wf_sr1, so emulate it */
667 /* shift rows right */
668 emit_dpp_mov(ctx
, vtmp
, tmp
, src
.size(), dpp_row_sr(1), 0xf, 0xf, true);
670 /* fill in the gaps in rows 1 and 3 */
671 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10000u
));
672 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(0x10000u
));
673 for (unsigned i
= 0; i
< src
.size(); i
++) {
674 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
675 Definition(PhysReg
{vtmp
+i
}, v1
),
676 Operand(PhysReg
{tmp
+i
}, v1
),
677 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
678 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
680 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(UINT64_MAX
));
682 if (ctx
->program
->wave_size
== 64) {
683 /* fill in the gap in row 2 */
684 for (unsigned i
= 0; i
< src
.size(); i
++) {
685 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
686 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
689 std::swap(tmp
, vtmp
);
690 } else if (ctx
->program
->chip_class
>= GFX8
) {
691 emit_dpp_mov(ctx
, tmp
, tmp
, src
.size(), dpp_wf_sr1
, 0xf, 0xf, true);
693 // TODO: use LDS on CS with a single write and shifted read
694 /* wavefront shift_right by 1 on SI/CI */
695 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
696 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
697 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10101010u
));
698 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
699 for (unsigned i
= 0; i
< src
.size(); i
++)
700 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
702 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
703 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
704 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x01000100u
));
705 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
706 for (unsigned i
= 0; i
< src
.size(); i
++)
707 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
709 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
710 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
711 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(1u), Operand(16u));
712 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(1u), Operand(16u));
713 for (unsigned i
= 0; i
< src
.size(); i
++)
714 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
716 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
717 for (unsigned i
= 0; i
< src
.size(); i
++) {
718 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{vtmp
+i
}, v1
));
719 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
720 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
721 identity
[i
] = Operand(0u); /* prevent further uses of identity */
723 std::swap(tmp
, vtmp
);
726 for (unsigned i
= 0; i
< src
.size(); i
++) {
727 if (!identity
[i
].isConstant() || identity
[i
].constantValue()) { /* bound_ctrl should take care of this overwise */
728 if (ctx
->program
->chip_class
< GFX10
)
729 assert((identity
[i
].isConstant() && !identity
[i
].isLiteral()) || identity
[i
].physReg() == PhysReg
{sitmp
+i
});
730 bld
.writelane(Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{tmp
+i
}, v1
));
734 case aco_opcode::p_inclusive_scan
:
735 assert(cluster_size
== ctx
->program
->wave_size
);
736 if (ctx
->program
->chip_class
<= GFX7
) {
737 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
738 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xAAAAAAAAu
));
739 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
740 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
742 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
743 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
744 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xCCCCCCCCu
));
745 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
746 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
748 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
749 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
750 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xF0F0F0F0u
));
751 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
752 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
754 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
755 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
756 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xFF00FF00u
));
757 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
758 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
760 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
761 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
762 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
763 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
764 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
766 for (unsigned i
= 0; i
< src
.size(); i
++)
767 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
768 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
769 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
773 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
774 dpp_row_sr(1), 0xf, 0xf, false, identity
);
775 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
776 dpp_row_sr(2), 0xf, 0xf, false, identity
);
777 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
778 dpp_row_sr(4), 0xf, 0xf, false, identity
);
779 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
780 dpp_row_sr(8), 0xf, 0xf, false, identity
);
781 if (ctx
->program
->chip_class
>= GFX10
) {
782 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
783 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
784 for (unsigned i
= 0; i
< src
.size(); i
++) {
785 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
786 Definition(PhysReg
{vtmp
+i
}, v1
),
787 Operand(PhysReg
{tmp
+i
}, v1
),
788 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
789 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
791 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
793 if (ctx
->program
->wave_size
== 64) {
794 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
795 for (unsigned i
= 0; i
< src
.size(); i
++)
796 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
797 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
800 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
801 dpp_row_bcast15
, 0xa, 0xf, false, identity
);
802 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
803 dpp_row_bcast31
, 0xc, 0xf, false, identity
);
807 unreachable("Invalid reduction mode");
811 if (op
== aco_opcode::p_reduce
) {
812 if (reduction_needs_last_op
&& dst
.regClass().type() == RegType::vgpr
) {
813 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
814 emit_op(ctx
, dst
.physReg(), tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
818 if (reduction_needs_last_op
)
819 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
823 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
825 if (dst
.regClass().type() == RegType::sgpr
) {
826 for (unsigned k
= 0; k
< src
.size(); k
++) {
827 bld
.readlane(Definition(PhysReg
{dst
.physReg() + k
}, s1
),
828 Operand(PhysReg
{tmp
+ k
}, v1
), Operand(ctx
->program
->wave_size
- 1));
830 } else if (dst
.physReg() != tmp
) {
831 for (unsigned k
= 0; k
< src
.size(); k
++) {
832 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
.physReg() + k
}, v1
),
833 Operand(PhysReg
{tmp
+ k
}, v1
));
838 void emit_gfx10_wave64_bpermute(Program
*program
, aco_ptr
<Instruction
> &instr
, Builder
&bld
)
840 /* Emulates proper bpermute on GFX10 in wave64 mode.
842 * This is necessary because on GFX10 the bpermute instruction only works
843 * on half waves (you can think of it as having a cluster size of 32), so we
844 * manually swap the data between the two halves using two shared VGPRs.
847 assert(program
->chip_class
>= GFX10
);
848 assert(program
->info
->wave_size
== 64);
850 unsigned shared_vgpr_reg_0
= align(program
->config
->num_vgprs
, 4) + 256;
851 Definition dst
= instr
->definitions
[0];
852 Definition tmp_exec
= instr
->definitions
[1];
853 Definition clobber_scc
= instr
->definitions
[2];
854 Operand index_x4
= instr
->operands
[0];
855 Operand input_data
= instr
->operands
[1];
856 Operand same_half
= instr
->operands
[2];
858 assert(dst
.regClass() == v1
);
859 assert(tmp_exec
.regClass() == bld
.lm
);
860 assert(clobber_scc
.isFixed() && clobber_scc
.physReg() == scc
);
861 assert(same_half
.regClass() == bld
.lm
);
862 assert(index_x4
.regClass() == v1
);
863 assert(input_data
.regClass().type() == RegType::vgpr
);
864 assert(input_data
.bytes() <= 4);
865 assert(dst
.physReg() != index_x4
.physReg());
866 assert(dst
.physReg() != input_data
.physReg());
867 assert(tmp_exec
.physReg() != same_half
.physReg());
869 PhysReg
shared_vgpr_lo(shared_vgpr_reg_0
);
870 PhysReg
shared_vgpr_hi(shared_vgpr_reg_0
+ 1);
872 /* Permute the input within the same half-wave */
873 bld
.ds(aco_opcode::ds_bpermute_b32
, dst
, index_x4
, input_data
);
875 /* HI: Copy data from high lanes 32-63 to shared vgpr */
876 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(shared_vgpr_hi
, v1
), input_data
, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
878 bld
.sop1(aco_opcode::s_mov_b64
, tmp_exec
, Operand(exec
, s2
));
879 /* Set EXEC to enable LO lanes only */
880 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(0u));
881 /* LO: Copy data from low lanes 0-31 to shared vgpr */
882 bld
.vop1(aco_opcode::v_mov_b32
, Definition(shared_vgpr_lo
, v1
), input_data
);
883 /* LO: bpermute shared vgpr (high lanes' data) */
884 bld
.ds(aco_opcode::ds_bpermute_b32
, Definition(shared_vgpr_hi
, v1
), index_x4
, Operand(shared_vgpr_hi
, v1
));
885 /* Set EXEC to enable HI lanes only */
886 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
887 /* HI: bpermute shared vgpr (low lanes' data) */
888 bld
.ds(aco_opcode::ds_bpermute_b32
, Definition(shared_vgpr_lo
, v1
), index_x4
, Operand(shared_vgpr_lo
, v1
));
890 /* Only enable lanes which use the other half's data */
891 bld
.sop2(aco_opcode::s_andn2_b64
, Definition(exec
, s2
), clobber_scc
, Operand(tmp_exec
.physReg(), s2
), same_half
);
892 /* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */
893 bld
.vop1_dpp(aco_opcode::v_mov_b32
, dst
, Operand(shared_vgpr_hi
, v1
), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
894 /* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */
895 bld
.vop1_dpp(aco_opcode::v_mov_b32
, dst
, Operand(shared_vgpr_lo
, v1
), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
897 /* Restore saved EXEC */
898 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(tmp_exec
.physReg(), s2
));
900 /* RA assumes that the result is always in the low part of the register, so we have to shift, if it's not there already */
901 if (input_data
.physReg().byte()) {
902 unsigned right_shift
= input_data
.physReg().byte() * 8;
903 bld
.vop2(aco_opcode::v_lshrrev_b32
, dst
, Operand(right_shift
), Operand(dst
.physReg(), v1
));
907 void emit_gfx6_bpermute(Program
*program
, aco_ptr
<Instruction
> &instr
, Builder
&bld
)
909 /* Emulates bpermute using readlane instructions */
911 Operand index
= instr
->operands
[0];
912 Operand input
= instr
->operands
[1];
913 Definition dst
= instr
->definitions
[0];
914 Definition temp_exec
= instr
->definitions
[1];
915 Definition clobber_vcc
= instr
->definitions
[2];
917 assert(dst
.regClass() == v1
);
918 assert(temp_exec
.regClass() == bld
.lm
);
919 assert(clobber_vcc
.regClass() == bld
.lm
);
920 assert(clobber_vcc
.physReg() == vcc
);
921 assert(index
.regClass() == v1
);
922 assert(index
.physReg() != dst
.physReg());
923 assert(input
.regClass().type() == RegType::vgpr
);
924 assert(input
.bytes() <= 4);
925 assert(input
.physReg() != dst
.physReg());
927 /* Save original EXEC */
928 bld
.sop1(aco_opcode::s_mov_b64
, temp_exec
, Operand(exec
, s2
));
930 /* An "unrolled loop" that is executed per each lane.
931 * This takes only a few instructions per lane, as opposed to a "real" loop
932 * with branching, where the branch instruction alone would take 16+ cycles.
934 for (unsigned n
= 0; n
< program
->wave_size
; ++n
) {
935 /* Activate the lane which has N for its source index */
936 bld
.vopc(aco_opcode::v_cmpx_eq_u32
, Definition(exec
, bld
.lm
), clobber_vcc
, Operand(n
), index
);
937 /* Read the data from lane N */
938 bld
.readlane(Definition(vcc
, s1
), input
, Operand(n
));
939 /* On the active lane, move the data we read from lane N to the destination VGPR */
940 bld
.vop1(aco_opcode::v_mov_b32
, dst
, Operand(vcc
, s1
));
941 /* Restore original EXEC */
942 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(temp_exec
.physReg(), s2
));
946 struct copy_operation
{
952 uint64_t is_used
= 0;
956 void split_copy(unsigned offset
, Definition
*def
, Operand
*op
, const copy_operation
& src
, bool ignore_uses
, unsigned max_size
)
958 PhysReg def_reg
= src
.def
.physReg();
959 PhysReg op_reg
= src
.op
.physReg();
960 def_reg
.reg_b
+= offset
;
961 op_reg
.reg_b
+= offset
;
963 max_size
= MIN2(max_size
, src
.def
.regClass().type() == RegType::vgpr
? 4 : 8);
965 /* make sure the size is a power of two and reg % bytes == 0 */
967 for (; bytes
<= max_size
; bytes
*= 2) {
968 unsigned next
= bytes
* 2u;
969 bool can_increase
= def_reg
.reg_b
% next
== 0 &&
970 offset
+ next
<= src
.bytes
&& next
<= max_size
;
971 if (!src
.op
.isConstant() && can_increase
)
972 can_increase
= op_reg
.reg_b
% next
== 0;
973 for (unsigned i
= 0; !ignore_uses
&& can_increase
&& (i
< bytes
); i
++)
974 can_increase
= (src
.uses
[offset
+ bytes
+ i
] == 0) == (src
.uses
[offset
] == 0);
979 RegClass def_cls
= bytes
% 4 == 0 ? RegClass(src
.def
.regClass().type(), bytes
/ 4u) :
980 RegClass(src
.def
.regClass().type(), bytes
).as_subdword();
981 *def
= Definition(src
.def
.tempId(), def_reg
, def_cls
);
982 if (src
.op
.isConstant()) {
983 assert(offset
== 0 || (offset
== 4 && src
.op
.bytes() == 8));
984 if (src
.op
.bytes() == 8 && bytes
== 4)
985 *op
= Operand(uint32_t(src
.op
.constantValue64() >> (offset
* 8u)));
989 RegClass op_cls
= bytes
% 4 == 0 ? RegClass(src
.op
.regClass().type(), bytes
/ 4u) :
990 RegClass(src
.op
.regClass().type(), bytes
).as_subdword();
991 *op
= Operand(op_reg
, op_cls
);
992 op
->setTemp(Temp(src
.op
.tempId(), op_cls
));
996 uint32_t get_intersection_mask(int a_start
, int a_size
,
997 int b_start
, int b_size
)
999 int intersection_start
= MAX2(b_start
- a_start
, 0);
1000 int intersection_end
= MAX2(b_start
+ b_size
- a_start
, 0);
1001 if (intersection_start
>= a_size
|| intersection_end
== 0)
1004 uint32_t mask
= u_bit_consecutive(0, a_size
);
1005 return u_bit_consecutive(intersection_start
, intersection_end
- intersection_start
) & mask
;
1008 bool do_copy(lower_context
* ctx
, Builder
& bld
, const copy_operation
& copy
, bool *preserve_scc
)
1010 bool did_copy
= false;
1011 for (unsigned offset
= 0; offset
< copy
.bytes
;) {
1012 if (copy
.uses
[offset
]) {
1019 split_copy(offset
, &def
, &op
, copy
, false, 8);
1021 if (def
.physReg() == scc
) {
1022 bld
.sopc(aco_opcode::s_cmp_lg_i32
, def
, op
, Operand(0u));
1023 *preserve_scc
= true;
1024 } else if (def
.bytes() == 8 && def
.getTemp().type() == RegType::sgpr
) {
1025 bld
.sop1(aco_opcode::s_mov_b64
, def
, Operand(op
.physReg(), s2
));
1030 ctx
->program
->statistics
[statistic_copies
]++;
1033 offset
+= def
.bytes();
1038 void do_swap(lower_context
*ctx
, Builder
& bld
, const copy_operation
& copy
, bool preserve_scc
, Pseudo_instruction
*pi
)
1040 unsigned offset
= 0;
1042 if (copy
.bytes
== 3 && (copy
.def
.physReg().reg_b
% 4 <= 1) &&
1043 (copy
.def
.physReg().reg_b
% 4) == (copy
.op
.physReg().reg_b
% 4)) {
1044 /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */
1045 PhysReg op
= copy
.op
.physReg();
1046 PhysReg def
= copy
.def
.physReg();
1051 tmp
.op
= Operand(op
, v1
);
1052 tmp
.def
= Definition(def
, v1
);
1054 memset(tmp
.uses
, 1, 4);
1055 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
1057 op
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
1058 def
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
1059 tmp
.op
= Operand(op
, v1b
);
1060 tmp
.def
= Definition(def
, v1b
);
1063 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
1065 offset
= copy
.bytes
;
1068 for (; offset
< copy
.bytes
;) {
1071 split_copy(offset
, &def
, &op
, copy
, true, 8);
1073 assert(op
.regClass() == def
.regClass());
1074 Operand def_as_op
= Operand(def
.physReg(), def
.regClass());
1075 Definition op_as_def
= Definition(op
.physReg(), op
.regClass());
1076 if (ctx
->program
->chip_class
>= GFX9
&& def
.regClass() == v1
) {
1077 bld
.vop1(aco_opcode::v_swap_b32
, def
, op_as_def
, op
, def_as_op
);
1078 ctx
->program
->statistics
[statistic_copies
]++;
1079 } else if (def
.regClass() == v1
) {
1080 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
1081 bld
.vop2(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
1082 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
1083 ctx
->program
->statistics
[statistic_copies
] += 3;
1084 } else if (op
.physReg() == scc
|| def
.physReg() == scc
) {
1085 /* we need to swap scc and another sgpr */
1086 assert(!preserve_scc
);
1088 PhysReg other
= op
.physReg() == scc
? def
.physReg() : op
.physReg();
1090 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
1091 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(other
, s1
), Operand(0u));
1092 bld
.sop1(aco_opcode::s_mov_b32
, Definition(other
, s1
), Operand(pi
->scratch_sgpr
, s1
));
1093 ctx
->program
->statistics
[statistic_copies
] += 3;
1094 } else if (def
.regClass() == s1
) {
1096 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), op
);
1097 bld
.sop1(aco_opcode::s_mov_b32
, op_as_def
, def_as_op
);
1098 bld
.sop1(aco_opcode::s_mov_b32
, def
, Operand(pi
->scratch_sgpr
, s1
));
1100 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1101 bld
.sop2(aco_opcode::s_xor_b32
, def
, Definition(scc
, s1
), op
, def_as_op
);
1102 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1104 ctx
->program
->statistics
[statistic_copies
] += 3;
1105 } else if (def
.regClass() == s2
) {
1107 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
1108 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1109 bld
.sop2(aco_opcode::s_xor_b64
, def
, Definition(scc
, s1
), op
, def_as_op
);
1110 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
1112 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(pi
->scratch_sgpr
, s1
), Operand(0u));
1113 ctx
->program
->statistics
[statistic_copies
] += 3;
1114 } else if (ctx
->program
->chip_class
>= GFX9
&& def
.bytes() == 2 && def
.physReg().reg() == op
.physReg().reg()) {
1115 aco_ptr
<VOP3P_instruction
> vop3p
{create_instruction
<VOP3P_instruction
>(aco_opcode::v_pk_add_u16
, Format::VOP3P
, 2, 1)};
1116 vop3p
->operands
[0] = Operand(PhysReg
{op
.physReg().reg()}, v1
);
1117 vop3p
->operands
[1] = Operand(0u);
1118 vop3p
->definitions
[0] = Definition(PhysReg
{op
.physReg().reg()}, v1
);
1119 vop3p
->opsel_lo
= 0x1;
1120 vop3p
->opsel_hi
= 0x2;
1121 bld
.insert(std::move(vop3p
));
1123 assert(def
.regClass().is_subdword());
1124 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
1125 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
1126 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
1127 ctx
->program
->statistics
[statistic_copies
] += 3;
1130 offset
+= def
.bytes();
1133 /* fixup in case we swapped bytes we shouldn't have */
1134 copy_operation tmp_copy
= copy
;
1135 tmp_copy
.op
.setFixed(copy
.def
.physReg());
1136 tmp_copy
.def
.setFixed(copy
.op
.physReg());
1137 do_copy(ctx
, bld
, tmp_copy
, &preserve_scc
);
1140 void handle_operands(std::map
<PhysReg
, copy_operation
>& copy_map
, lower_context
* ctx
, chip_class chip_class
, Pseudo_instruction
*pi
)
1142 Builder
bld(ctx
->program
, &ctx
->instructions
);
1143 aco_ptr
<Instruction
> mov
;
1144 std::map
<PhysReg
, copy_operation
>::iterator it
= copy_map
.begin();
1145 std::map
<PhysReg
, copy_operation
>::iterator target
;
1146 bool writes_scc
= false;
1148 /* count the number of uses for each dst reg */
1149 while (it
!= copy_map
.end()) {
1151 if (it
->second
.def
.physReg() == scc
)
1154 assert(!pi
->tmp_in_scc
|| !(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
1156 /* if src and dst reg are the same, remove operation */
1157 if (it
->first
== it
->second
.op
.physReg()) {
1158 it
= copy_map
.erase(it
);
1162 /* split large copies */
1163 if (it
->second
.bytes
> 8) {
1164 assert(!it
->second
.op
.isConstant());
1165 assert(!it
->second
.def
.regClass().is_subdword());
1166 RegClass rc
= RegClass(it
->second
.def
.regClass().type(), it
->second
.def
.size() - 2);
1167 Definition hi_def
= Definition(PhysReg
{it
->first
+ 2}, rc
);
1168 rc
= RegClass(it
->second
.op
.regClass().type(), it
->second
.op
.size() - 2);
1169 Operand hi_op
= Operand(PhysReg
{it
->second
.op
.physReg() + 2}, rc
);
1170 copy_operation copy
= {hi_op
, hi_def
, it
->second
.bytes
- 8};
1171 copy_map
[hi_def
.physReg()] = copy
;
1172 assert(it
->second
.op
.physReg().byte() == 0 && it
->second
.def
.physReg().byte() == 0);
1173 it
->second
.op
= Operand(it
->second
.op
.physReg(), it
->second
.op
.regClass().type() == RegType::sgpr
? s2
: v2
);
1174 it
->second
.def
= Definition(it
->second
.def
.physReg(), it
->second
.def
.regClass().type() == RegType::sgpr
? s2
: v2
);
1175 it
->second
.bytes
= 8;
1178 /* check if the definition reg is used by another copy operation */
1179 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
1180 if (copy
.second
.op
.isConstant())
1182 for (uint16_t i
= 0; i
< it
->second
.bytes
; i
++) {
1183 /* distance might underflow */
1184 unsigned distance
= it
->first
.reg_b
+ i
- copy
.second
.op
.physReg().reg_b
;
1185 if (distance
< copy
.second
.bytes
)
1186 it
->second
.uses
[i
] += 1;
1193 /* first, handle paths in the location transfer graph */
1194 bool preserve_scc
= pi
->tmp_in_scc
&& !writes_scc
;
1195 it
= copy_map
.begin();
1196 while (it
!= copy_map
.end()) {
1198 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
1199 if (it
->second
.is_used
== 0 &&
1200 it
->second
.def
.getTemp().type() == RegType::sgpr
&& it
->second
.bytes
== 4 &&
1201 !it
->second
.op
.isConstant() && it
->first
% 2 == it
->second
.op
.physReg() % 2) {
1203 PhysReg other_def_reg
= PhysReg
{it
->first
% 2 ? it
->first
- 1 : it
->first
+ 1};
1204 PhysReg other_op_reg
= PhysReg
{it
->first
% 2 ? it
->second
.op
.physReg() - 1 : it
->second
.op
.physReg() + 1};
1205 std::map
<PhysReg
, copy_operation
>::iterator other
= copy_map
.find(other_def_reg
);
1207 if (other
!= copy_map
.end() && !other
->second
.is_used
&& other
->second
.bytes
== 4 &&
1208 other
->second
.op
.physReg() == other_op_reg
&& !other
->second
.op
.isConstant()) {
1209 std::map
<PhysReg
, copy_operation
>::iterator to_erase
= it
->first
% 2 ? it
: other
;
1210 it
= it
->first
% 2 ? other
: it
;
1211 copy_map
.erase(to_erase
);
1212 it
->second
.bytes
= 8;
1215 // TODO: try to coalesce subdword copies
1217 /* find portions where the target reg is not used as operand for any other copy */
1218 if (it
->second
.is_used
) {
1219 if (it
->second
.op
.isConstant()) {
1220 /* we have to skip constants until is_used=0 */
1225 unsigned has_zero_use_bytes
= 0;
1226 for (unsigned i
= 0; i
< it
->second
.bytes
; i
++)
1227 has_zero_use_bytes
|= (it
->second
.uses
[i
] == 0) << i
;
1229 if (has_zero_use_bytes
) {
1230 /* Skipping partial copying and doing a v_swap_b32 and then fixup
1231 * copies is usually beneficial for sub-dword copies, but if doing
1232 * a partial copy allows further copies, it should be done instead. */
1233 bool partial_copy
= (has_zero_use_bytes
== 0xf) || (has_zero_use_bytes
== 0xf0);
1234 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
1237 for (uint16_t i
= 0; i
< copy
.second
.bytes
; i
++) {
1238 /* distance might underflow */
1239 unsigned distance
= copy
.first
.reg_b
+ i
- it
->second
.op
.physReg().reg_b
;
1240 if (distance
< it
->second
.bytes
&& copy
.second
.uses
[i
] == 1 &&
1241 !it
->second
.uses
[distance
])
1242 partial_copy
= true;
1246 if (!partial_copy
) {
1251 /* full target reg is used: register swapping needed */
1257 bool did_copy
= do_copy(ctx
, bld
, it
->second
, &preserve_scc
);
1259 std::pair
<PhysReg
, copy_operation
> copy
= *it
;
1261 if (it
->second
.is_used
== 0) {
1262 /* the target reg is not used as operand for any other copy, so we
1263 * copied to all of it */
1265 it
= copy_map
.begin();
1267 /* we only performed some portions of this copy, so split it to only
1268 * leave the portions that still need to be done */
1269 copy_operation original
= it
->second
; /* the map insertion below can overwrite this */
1271 for (unsigned offset
= 0; offset
< original
.bytes
;) {
1272 if (original
.uses
[offset
] == 0) {
1278 split_copy(offset
, &def
, &op
, original
, false, 8);
1280 copy_operation copy
= {op
, def
, def
.bytes()};
1281 for (unsigned i
= 0; i
< copy
.bytes
; i
++)
1282 copy
.uses
[i
] = original
.uses
[i
+ offset
];
1283 copy_map
[def
.physReg()] = copy
;
1285 offset
+= def
.bytes();
1288 it
= copy_map
.begin();
1291 /* Reduce the number of uses of the operand reg by one. Do this after
1292 * splitting the copy or removing it in case the copy writes to it's own
1293 * operand (for example, v[7:8] = v[8:9]) */
1294 if (did_copy
&& !copy
.second
.op
.isConstant()) {
1295 for (std::pair
<const PhysReg
, copy_operation
>& other
: copy_map
) {
1296 for (uint16_t i
= 0; i
< other
.second
.bytes
; i
++) {
1297 /* distance might underflow */
1298 unsigned distance
= other
.first
.reg_b
+ i
- copy
.second
.op
.physReg().reg_b
;
1299 if (distance
< copy
.second
.bytes
&& !copy
.second
.uses
[distance
])
1300 other
.second
.uses
[i
] -= 1;
1306 if (copy_map
.empty())
1309 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1310 unsigned largest
= 0;
1311 for (const std::pair
<const PhysReg
, copy_operation
>& op
: copy_map
)
1312 largest
= MAX2(largest
, op
.second
.bytes
);
1314 while (!copy_map
.empty()) {
1316 /* Perform larger swaps first, because larger swaps swaps can make other
1317 * swaps unnecessary. */
1318 auto it
= copy_map
.begin();
1319 for (auto it2
= copy_map
.begin(); it2
!= copy_map
.end(); ++it2
) {
1320 if (it2
->second
.bytes
> it
->second
.bytes
) {
1322 if (it
->second
.bytes
== largest
)
1327 /* should already be done */
1328 assert(!it
->second
.op
.isConstant());
1330 assert(it
->second
.op
.isFixed());
1331 assert(it
->second
.def
.regClass() == it
->second
.op
.regClass());
1333 if (it
->first
== it
->second
.op
.physReg()) {
1338 if (preserve_scc
&& it
->second
.def
.getTemp().type() == RegType::sgpr
)
1339 assert(!(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
1341 /* to resolve the cycle, we have to swap the src reg with the dst reg */
1342 copy_operation swap
= it
->second
;
1344 /* if this is self-intersecting, we have to split it because
1345 * self-intersecting swaps don't make sense */
1346 PhysReg lower
= swap
.def
.physReg();
1347 PhysReg higher
= swap
.op
.physReg();
1348 if (lower
.reg_b
> higher
.reg_b
)
1349 std::swap(lower
, higher
);
1350 if (higher
.reg_b
- lower
.reg_b
< (int)swap
.bytes
) {
1351 unsigned offset
= higher
.reg_b
- lower
.reg_b
;
1352 RegType type
= swap
.def
.regClass().type();
1354 copy_operation middle
;
1355 lower
.reg_b
+= offset
;
1356 higher
.reg_b
+= offset
;
1357 middle
.bytes
= swap
.bytes
- offset
* 2;
1358 memcpy(middle
.uses
, swap
.uses
+ offset
, middle
.bytes
);
1359 middle
.op
= Operand(lower
, RegClass::get(type
, middle
.bytes
));
1360 middle
.def
= Definition(higher
, RegClass::get(type
, middle
.bytes
));
1361 copy_map
[higher
] = middle
;
1364 lower
.reg_b
+= middle
.bytes
;
1365 higher
.reg_b
+= middle
.bytes
;
1366 end
.bytes
= swap
.bytes
- (offset
+ middle
.bytes
);
1367 memcpy(end
.uses
, swap
.uses
+ offset
+ middle
.bytes
, end
.bytes
);
1368 end
.op
= Operand(lower
, RegClass::get(type
, end
.bytes
));
1369 end
.def
= Definition(higher
, RegClass::get(type
, end
.bytes
));
1370 copy_map
[higher
] = end
;
1372 memset(swap
.uses
+ offset
, 0, swap
.bytes
- offset
);
1373 swap
.bytes
= offset
;
1376 do_swap(ctx
, bld
, swap
, preserve_scc
, pi
);
1378 /* remove from map */
1381 /* change the operand reg of the target's uses and split uses if needed */
1382 target
= copy_map
.begin();
1383 uint32_t bytes_left
= u_bit_consecutive(0, swap
.bytes
);
1384 for (; target
!= copy_map
.end(); ++target
) {
1385 if (target
->second
.op
.physReg() == swap
.def
.physReg() && swap
.bytes
== target
->second
.bytes
) {
1386 target
->second
.op
.setFixed(swap
.op
.physReg());
1390 uint32_t imask
= get_intersection_mask(swap
.def
.physReg().reg_b
, swap
.bytes
,
1391 target
->second
.op
.physReg().reg_b
, target
->second
.bytes
);
1396 assert(target
->second
.bytes
< swap
.bytes
);
1398 int offset
= (int)target
->second
.op
.physReg().reg_b
- (int)swap
.def
.physReg().reg_b
;
1400 /* split and update the middle (the portion that reads the swap's
1401 * definition) to read the swap's operand instead */
1402 int target_op_end
= target
->second
.op
.physReg().reg_b
+ target
->second
.bytes
;
1403 int swap_def_end
= swap
.def
.physReg().reg_b
+ swap
.bytes
;
1404 int before_bytes
= MAX2(-offset
, 0);
1405 int after_bytes
= MAX2(target_op_end
- swap_def_end
, 0);
1406 int middle_bytes
= target
->second
.bytes
- before_bytes
- after_bytes
;
1409 unsigned after_offset
= before_bytes
+ middle_bytes
;
1410 assert(after_offset
> 0);
1411 copy_operation copy
;
1412 copy
.bytes
= after_bytes
;
1413 memcpy(copy
.uses
, target
->second
.uses
+ after_offset
, copy
.bytes
);
1414 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), after_bytes
);
1415 copy
.op
= Operand(target
->second
.op
.physReg().advance(after_offset
), rc
);
1416 copy
.def
= Definition(target
->second
.def
.physReg().advance(after_offset
), rc
);
1417 copy_map
[copy
.def
.physReg()] = copy
;
1421 copy_operation copy
;
1422 copy
.bytes
= middle_bytes
;
1423 memcpy(copy
.uses
, target
->second
.uses
+ before_bytes
, copy
.bytes
);
1424 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), middle_bytes
);
1425 copy
.op
= Operand(swap
.op
.physReg().advance(MAX2(offset
, 0)), rc
);
1426 copy
.def
= Definition(target
->second
.def
.physReg().advance(before_bytes
), rc
);
1427 copy_map
[copy
.def
.physReg()] = copy
;
1431 copy_operation copy
;
1432 target
->second
.bytes
= before_bytes
;
1433 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), before_bytes
);
1434 target
->second
.op
= Operand(target
->second
.op
.physReg(), rc
);
1435 target
->second
.def
= Definition(target
->second
.def
.physReg(), rc
);
1436 memset(target
->second
.uses
+ target
->second
.bytes
, 0, 8 - target
->second
.bytes
);
1439 /* break early since we know each byte of the swap's definition is used
1441 bytes_left
&= ~imask
;
1448 void lower_to_hw_instr(Program
* program
)
1450 Block
*discard_block
= NULL
;
1452 for (size_t i
= 0; i
< program
->blocks
.size(); i
++)
1454 Block
*block
= &program
->blocks
[i
];
1456 ctx
.program
= program
;
1457 Builder
bld(program
, &ctx
.instructions
);
1459 bool set_mode
= i
== 0 && block
->fp_mode
.val
!= program
->config
->float_mode
;
1460 for (unsigned pred
: block
->linear_preds
) {
1461 if (program
->blocks
[pred
].fp_mode
.val
!= block
->fp_mode
.val
) {
1467 /* only allow changing modes at top-level blocks so this doesn't break
1468 * the "jump over empty blocks" optimization */
1469 assert(block
->kind
& block_kind_top_level
);
1470 uint32_t mode
= block
->fp_mode
.val
;
1471 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1472 bld
.sopk(aco_opcode::s_setreg_imm32_b32
, Operand(mode
), (7 << 11) | 1);
1475 for (size_t j
= 0; j
< block
->instructions
.size(); j
++) {
1476 aco_ptr
<Instruction
>& instr
= block
->instructions
[j
];
1477 aco_ptr
<Instruction
> mov
;
1478 if (instr
->format
== Format::PSEUDO
) {
1479 Pseudo_instruction
*pi
= (Pseudo_instruction
*)instr
.get();
1481 switch (instr
->opcode
)
1483 case aco_opcode::p_extract_vector
:
1485 PhysReg reg
= instr
->operands
[0].physReg();
1486 Definition
& def
= instr
->definitions
[0];
1487 reg
.reg_b
+= instr
->operands
[1].constantValue() * def
.bytes();
1489 if (reg
== def
.physReg())
1492 RegClass op_rc
= def
.regClass().is_subdword() ? def
.regClass() :
1493 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1494 std::map
<PhysReg
, copy_operation
> copy_operations
;
1495 copy_operations
[def
.physReg()] = {Operand(reg
, op_rc
), def
, def
.bytes()};
1496 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1499 case aco_opcode::p_create_vector
:
1501 std::map
<PhysReg
, copy_operation
> copy_operations
;
1502 PhysReg reg
= instr
->definitions
[0].physReg();
1504 for (const Operand
& op
: instr
->operands
) {
1505 if (op
.isConstant()) {
1506 const Definition def
= Definition(reg
, RegClass(instr
->definitions
[0].getTemp().type(), op
.size()));
1507 copy_operations
[reg
] = {op
, def
, op
.bytes()};
1508 reg
.reg_b
+= op
.bytes();
1511 if (op
.isUndefined()) {
1512 // TODO: coalesce subdword copies if dst byte is 0
1513 reg
.reg_b
+= op
.bytes();
1517 RegClass rc_def
= op
.regClass().is_subdword() ? op
.regClass() :
1518 RegClass(instr
->definitions
[0].getTemp().type(), op
.size());
1519 const Definition def
= Definition(reg
, rc_def
);
1520 copy_operations
[def
.physReg()] = {op
, def
, op
.bytes()};
1521 reg
.reg_b
+= op
.bytes();
1523 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1526 case aco_opcode::p_split_vector
:
1528 std::map
<PhysReg
, copy_operation
> copy_operations
;
1529 PhysReg reg
= instr
->operands
[0].physReg();
1531 for (const Definition
& def
: instr
->definitions
) {
1532 RegClass rc_op
= def
.regClass().is_subdword() ? def
.regClass() :
1533 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1534 const Operand op
= Operand(reg
, rc_op
);
1535 copy_operations
[def
.physReg()] = {op
, def
, def
.bytes()};
1536 reg
.reg_b
+= def
.bytes();
1538 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1541 case aco_opcode::p_parallelcopy
:
1542 case aco_opcode::p_wqm
:
1544 std::map
<PhysReg
, copy_operation
> copy_operations
;
1545 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++) {
1546 assert(instr
->definitions
[i
].bytes() == instr
->operands
[i
].bytes());
1547 copy_operations
[instr
->definitions
[i
].physReg()] = {instr
->operands
[i
], instr
->definitions
[i
], instr
->operands
[i
].bytes()};
1549 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1552 case aco_opcode::p_exit_early_if
:
1554 /* don't bother with an early exit near the end of the program */
1555 if ((block
->instructions
.size() - 1 - j
) <= 4 &&
1556 block
->instructions
.back()->opcode
== aco_opcode::s_endpgm
) {
1557 unsigned null_exp_dest
= (ctx
.program
->stage
& hw_fs
) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS
;
1558 bool ignore_early_exit
= true;
1560 for (unsigned k
= j
+ 1; k
< block
->instructions
.size(); ++k
) {
1561 const aco_ptr
<Instruction
> &instr
= block
->instructions
[k
];
1562 if (instr
->opcode
== aco_opcode::s_endpgm
||
1563 instr
->opcode
== aco_opcode::p_logical_end
)
1565 else if (instr
->opcode
== aco_opcode::exp
&&
1566 static_cast<Export_instruction
*>(instr
.get())->dest
== null_exp_dest
)
1568 else if (instr
->opcode
== aco_opcode::p_parallelcopy
&&
1569 instr
->definitions
[0].isFixed() &&
1570 instr
->definitions
[0].physReg() == exec
)
1573 ignore_early_exit
= false;
1576 if (ignore_early_exit
)
1580 if (!discard_block
) {
1581 discard_block
= program
->create_and_insert_block();
1582 block
= &program
->blocks
[i
];
1584 bld
.reset(discard_block
);
1585 bld
.exp(aco_opcode::exp
, Operand(v1
), Operand(v1
), Operand(v1
), Operand(v1
),
1586 0, V_008DFC_SQ_EXP_NULL
, false, true, true);
1587 if (program
->wb_smem_l1_on_end
)
1588 bld
.smem(aco_opcode::s_dcache_wb
);
1589 bld
.sopp(aco_opcode::s_endpgm
);
1591 bld
.reset(&ctx
.instructions
);
1594 //TODO: exec can be zero here with block_kind_discard
1596 assert(instr
->operands
[0].physReg() == scc
);
1597 bld
.sopp(aco_opcode::s_cbranch_scc0
, instr
->operands
[0], discard_block
->index
);
1599 discard_block
->linear_preds
.push_back(block
->index
);
1600 block
->linear_succs
.push_back(discard_block
->index
);
1603 case aco_opcode::p_spill
:
1605 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1606 for (unsigned i
= 0; i
< instr
->operands
[2].size(); i
++)
1607 bld
.writelane(bld
.def(v1
, instr
->operands
[0].physReg()),
1608 Operand(PhysReg
{instr
->operands
[2].physReg() + i
}, s1
),
1609 Operand(instr
->operands
[1].constantValue() + i
),
1610 instr
->operands
[0]);
1613 case aco_opcode::p_reload
:
1615 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1616 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++)
1617 bld
.readlane(bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1619 Operand(instr
->operands
[1].constantValue() + i
));
1622 case aco_opcode::p_as_uniform
:
1624 if (instr
->operands
[0].isConstant() || instr
->operands
[0].regClass().type() == RegType::sgpr
) {
1625 std::map
<PhysReg
, copy_operation
> copy_operations
;
1626 copy_operations
[instr
->definitions
[0].physReg()] = {instr
->operands
[0], instr
->definitions
[0], instr
->definitions
[0].bytes()};
1627 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1629 assert(instr
->operands
[0].regClass().type() == RegType::vgpr
);
1630 assert(instr
->definitions
[0].regClass().type() == RegType::sgpr
);
1631 assert(instr
->operands
[0].size() == instr
->definitions
[0].size());
1632 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
1633 bld
.vop1(aco_opcode::v_readfirstlane_b32
,
1634 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1635 Operand(PhysReg
{instr
->operands
[0].physReg() + i
}, v1
));
1640 case aco_opcode::p_bpermute
:
1642 if (ctx
.program
->chip_class
<= GFX7
)
1643 emit_gfx6_bpermute(program
, instr
, bld
);
1644 else if (ctx
.program
->chip_class
== GFX10
&& ctx
.program
->wave_size
== 64)
1645 emit_gfx10_wave64_bpermute(program
, instr
, bld
);
1647 unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute.");
1652 } else if (instr
->format
== Format::PSEUDO_BRANCH
) {
1653 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(instr
.get());
1654 /* check if all blocks from current to target are empty */
1655 bool can_remove
= block
->index
< branch
->target
[0];
1656 for (unsigned i
= block
->index
+ 1; can_remove
&& i
< branch
->target
[0]; i
++) {
1657 if (program
->blocks
[i
].instructions
.size())
1663 switch (instr
->opcode
) {
1664 case aco_opcode::p_branch
:
1665 assert(block
->linear_succs
[0] == branch
->target
[0]);
1666 bld
.sopp(aco_opcode::s_branch
, branch
->target
[0]);
1668 case aco_opcode::p_cbranch_nz
:
1669 assert(block
->linear_succs
[1] == branch
->target
[0]);
1670 if (branch
->operands
[0].physReg() == exec
)
1671 bld
.sopp(aco_opcode::s_cbranch_execnz
, branch
->target
[0]);
1672 else if (branch
->operands
[0].physReg() == vcc
)
1673 bld
.sopp(aco_opcode::s_cbranch_vccnz
, branch
->target
[0]);
1675 assert(branch
->operands
[0].physReg() == scc
);
1676 bld
.sopp(aco_opcode::s_cbranch_scc1
, branch
->target
[0]);
1679 case aco_opcode::p_cbranch_z
:
1680 assert(block
->linear_succs
[1] == branch
->target
[0]);
1681 if (branch
->operands
[0].physReg() == exec
)
1682 bld
.sopp(aco_opcode::s_cbranch_execz
, branch
->target
[0]);
1683 else if (branch
->operands
[0].physReg() == vcc
)
1684 bld
.sopp(aco_opcode::s_cbranch_vccz
, branch
->target
[0]);
1686 assert(branch
->operands
[0].physReg() == scc
);
1687 bld
.sopp(aco_opcode::s_cbranch_scc0
, branch
->target
[0]);
1691 unreachable("Unknown Pseudo branch instruction!");
1694 } else if (instr
->format
== Format::PSEUDO_REDUCTION
) {
1695 Pseudo_reduction_instruction
* reduce
= static_cast<Pseudo_reduction_instruction
*>(instr
.get());
1696 emit_reduction(&ctx
, reduce
->opcode
, reduce
->reduce_op
, reduce
->cluster_size
,
1697 reduce
->operands
[1].physReg(), // tmp
1698 reduce
->definitions
[1].physReg(), // stmp
1699 reduce
->operands
[2].physReg(), // vtmp
1700 reduce
->definitions
[2].physReg(), // sitmp
1701 reduce
->operands
[0], reduce
->definitions
[0]);
1703 ctx
.instructions
.emplace_back(std::move(instr
));
1707 block
->instructions
.swap(ctx
.instructions
);