2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
31 #include "aco_builder.h"
32 #include "util/u_math.h"
34 #include "vulkan/radv_shader.h"
39 struct lower_context
{
41 std::vector
<aco_ptr
<Instruction
>> instructions
;
44 aco_opcode
get_reduce_opcode(chip_class chip
, ReduceOp op
) {
47 case iadd16
: return aco_opcode::v_add_u16
;
49 case imul16
: return aco_opcode::v_mul_lo_u16
;
50 case fadd16
: return aco_opcode::v_add_f16
;
51 case fmul16
: return aco_opcode::v_mul_f16
;
53 case imax16
: return aco_opcode::v_max_i16
;
55 case imin16
: return aco_opcode::v_min_i16
;
57 case umin16
: return aco_opcode::v_min_u16
;
59 case umax16
: return aco_opcode::v_max_u16
;
60 case fmin16
: return aco_opcode::v_min_f16
;
61 case fmax16
: return aco_opcode::v_max_f16
;
62 case iadd32
: return chip
>= GFX9
? aco_opcode::v_add_u32
: aco_opcode::v_add_co_u32
;
63 case imul32
: return aco_opcode::v_mul_lo_u32
;
64 case fadd32
: return aco_opcode::v_add_f32
;
65 case fmul32
: return aco_opcode::v_mul_f32
;
66 case imax32
: return aco_opcode::v_max_i32
;
67 case imin32
: return aco_opcode::v_min_i32
;
68 case umin32
: return aco_opcode::v_min_u32
;
69 case umax32
: return aco_opcode::v_max_u32
;
70 case fmin32
: return aco_opcode::v_min_f32
;
71 case fmax32
: return aco_opcode::v_max_f32
;
74 case iand32
: return aco_opcode::v_and_b32
;
77 case ixor32
: return aco_opcode::v_xor_b32
;
80 case ior32
: return aco_opcode::v_or_b32
;
81 case iadd64
: return aco_opcode::num_opcodes
;
82 case imul64
: return aco_opcode::num_opcodes
;
83 case fadd64
: return aco_opcode::v_add_f64
;
84 case fmul64
: return aco_opcode::v_mul_f64
;
85 case imin64
: return aco_opcode::num_opcodes
;
86 case imax64
: return aco_opcode::num_opcodes
;
87 case umin64
: return aco_opcode::num_opcodes
;
88 case umax64
: return aco_opcode::num_opcodes
;
89 case fmin64
: return aco_opcode::v_min_f64
;
90 case fmax64
: return aco_opcode::v_max_f64
;
91 case iand64
: return aco_opcode::num_opcodes
;
92 case ior64
: return aco_opcode::num_opcodes
;
93 case ixor64
: return aco_opcode::num_opcodes
;
94 default: return aco_opcode::num_opcodes
;
98 void emit_vadd32(Builder
& bld
, Definition def
, Operand src0
, Operand src1
)
100 Instruction
*instr
= bld
.vadd32(def
, src0
, src1
, false, Operand(s2
), true);
101 if (instr
->definitions
.size() >= 2) {
102 assert(instr
->definitions
[1].regClass() == bld
.lm
);
103 instr
->definitions
[1].setFixed(vcc
);
107 void emit_int64_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
108 PhysReg vtmp_reg
, ReduceOp op
,
109 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
110 Operand
*identity
=NULL
)
112 Builder
bld(ctx
->program
, &ctx
->instructions
);
113 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
114 Definition vtmp_def
[] = {Definition(vtmp_reg
, v1
), Definition(PhysReg
{vtmp_reg
+1}, v1
)};
115 Operand src0
[] = {Operand(src0_reg
, v1
), Operand(PhysReg
{src0_reg
+1}, v1
)};
116 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
117 Operand src1_64
= Operand(src1_reg
, v2
);
118 Operand vtmp_op
[] = {Operand(vtmp_reg
, v1
), Operand(PhysReg
{vtmp_reg
+1}, v1
)};
119 Operand vtmp_op64
= Operand(vtmp_reg
, v2
);
121 if (ctx
->program
->chip_class
>= GFX10
) {
123 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
124 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
125 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
126 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), vtmp_op
[0], src1
[0]);
128 bld
.vop2_dpp(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0],
129 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
131 bld
.vop2_dpp(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
),
132 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
133 } else if (op
== iand64
) {
134 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0],
135 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
136 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1],
137 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
138 } else if (op
== ior64
) {
139 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0],
140 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
141 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1],
142 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
143 } else if (op
== ixor64
) {
144 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0],
145 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
146 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1],
147 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
148 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
149 aco_opcode cmp
= aco_opcode::num_opcodes
;
152 cmp
= aco_opcode::v_cmp_gt_u64
;
155 cmp
= aco_opcode::v_cmp_lt_u64
;
158 cmp
= aco_opcode::v_cmp_gt_i64
;
161 cmp
= aco_opcode::v_cmp_lt_i64
;
168 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
169 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[1], identity
[1]);
171 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
172 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
173 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[1], src0
[1],
174 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
176 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), vtmp_op64
, src1_64
);
177 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], vtmp_op
[0], src1
[0], Operand(vcc
, bld
.lm
));
178 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], vtmp_op
[1], src1
[1], Operand(vcc
, bld
.lm
));
179 } else if (op
== imul64
) {
181 * t1 = umul_lo(t4, y_lo)
183 * t0 = umul_lo(t3, y_hi)
185 * t5 = umul_hi(t3, y_lo)
186 * res_hi = iadd(t2, t5)
187 * res_lo = umul_lo(t3, y_lo)
188 * Requires that res_hi != src0[0] and res_hi != src1[0]
189 * and that vtmp[0] != res_hi.
192 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[1]);
193 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[1],
194 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
195 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[1], vtmp_op
[0], src1
[0]);
197 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
198 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
199 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
200 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[0], vtmp_op
[0], src1
[1]);
201 emit_vadd32(bld
, vtmp_def
[1], vtmp_op
[0], vtmp_op
[1]);
203 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
204 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
205 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
206 bld
.vop3(aco_opcode::v_mul_hi_u32
, vtmp_def
[0], vtmp_op
[0], src1
[0]);
207 emit_vadd32(bld
, dst
[1], vtmp_op
[1], vtmp_op
[0]);
209 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
210 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
211 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
212 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], vtmp_op
[0], src1
[0]);
216 void emit_int64_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
, PhysReg vtmp
, ReduceOp op
)
218 Builder
bld(ctx
->program
, &ctx
->instructions
);
219 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
220 RegClass src0_rc
= src0_reg
.reg() >= 256 ? v1
: s1
;
221 Operand src0
[] = {Operand(src0_reg
, src0_rc
), Operand(PhysReg
{src0_reg
+1}, src0_rc
)};
222 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
223 Operand src0_64
= Operand(src0_reg
, src0_reg
.reg() >= 256 ? v2
: s2
);
224 Operand src1_64
= Operand(src1_reg
, v2
);
227 (op
== imul64
|| op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
)) {
228 assert(vtmp
.reg() != 0);
229 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), src0
[0]);
230 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
232 src0
[0] = Operand(vtmp
, v1
);
233 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
234 src0_64
= Operand(vtmp
, v2
);
235 } else if (src0_rc
== s1
&& op
== iadd64
) {
236 assert(vtmp
.reg() != 0);
237 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
238 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
242 if (ctx
->program
->chip_class
>= GFX10
) {
243 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
245 bld
.vop2(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(bld
.lm
, vcc
), src0
[0], src1
[0]);
247 bld
.vop2(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(bld
.lm
, vcc
), src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
248 } else if (op
== iand64
) {
249 bld
.vop2(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0]);
250 bld
.vop2(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1]);
251 } else if (op
== ior64
) {
252 bld
.vop2(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0]);
253 bld
.vop2(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1]);
254 } else if (op
== ixor64
) {
255 bld
.vop2(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0]);
256 bld
.vop2(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1]);
257 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
258 aco_opcode cmp
= aco_opcode::num_opcodes
;
261 cmp
= aco_opcode::v_cmp_gt_u64
;
264 cmp
= aco_opcode::v_cmp_lt_u64
;
267 cmp
= aco_opcode::v_cmp_gt_i64
;
270 cmp
= aco_opcode::v_cmp_lt_i64
;
276 bld
.vopc(cmp
, bld
.def(bld
.lm
, vcc
), src0_64
, src1_64
);
277 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], src0
[0], src1
[0], Operand(vcc
, bld
.lm
));
278 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], src0
[1], src1
[1], Operand(vcc
, bld
.lm
));
279 } else if (op
== imul64
) {
280 if (src1_reg
== dst_reg
) {
281 /* it's fine if src0==dst but not if src1==dst */
282 std::swap(src0_reg
, src1_reg
);
283 std::swap(src0
[0], src1
[0]);
284 std::swap(src0
[1], src1
[1]);
285 std::swap(src0_64
, src1_64
);
287 assert(!(src0_reg
== src1_reg
));
288 /* t1 = umul_lo(x_hi, y_lo)
289 * t0 = umul_lo(x_lo, y_hi)
291 * t5 = umul_hi(x_lo, y_lo)
292 * res_hi = iadd(t2, t5)
293 * res_lo = umul_lo(x_lo, y_lo)
294 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
296 Definition
tmp0_def(PhysReg
{src0_reg
+1}, v1
);
297 Definition
tmp1_def(PhysReg
{src1_reg
+1}, v1
);
298 Operand tmp0_op
= src0
[1];
299 Operand tmp1_op
= src1
[1];
300 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp0_def
, src0
[1], src1
[0]);
301 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp1_def
, src0
[0], src1
[1]);
302 emit_vadd32(bld
, tmp0_def
, tmp1_op
, tmp0_op
);
303 bld
.vop3(aco_opcode::v_mul_hi_u32
, tmp1_def
, src0
[0], src1
[0]);
304 emit_vadd32(bld
, dst
[1], tmp0_op
, tmp1_op
);
305 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], src0
[0], src1
[0]);
309 void emit_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
310 PhysReg vtmp
, ReduceOp op
, unsigned size
,
311 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
312 Operand
*identity
=NULL
) /* for VOP3 with sparse writes */
314 Builder
bld(ctx
->program
, &ctx
->instructions
);
315 RegClass rc
= RegClass(RegType::vgpr
, size
);
316 Definition
dst(dst_reg
, rc
);
317 Operand
src0(src0_reg
, rc
);
318 Operand
src1(src1_reg
, rc
);
320 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
321 bool vop3
= op
== imul32
|| size
== 2;
324 if (opcode
== aco_opcode::v_add_co_u32
)
325 bld
.vop2_dpp(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
327 bld
.vop2_dpp(opcode
, dst
, src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
331 if (opcode
== aco_opcode::num_opcodes
) {
332 emit_int64_dpp_op(ctx
, dst_reg
,src0_reg
, src1_reg
, vtmp
, op
,
333 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
, identity
);
338 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), identity
[0]);
339 if (identity
&& size
>= 2)
340 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), identity
[1]);
342 for (unsigned i
= 0; i
< size
; i
++)
343 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{src0_reg
+i
}, v1
),
344 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
346 bld
.vop3(opcode
, dst
, Operand(vtmp
, rc
), src1
);
349 void emit_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
350 PhysReg vtmp
, ReduceOp op
, unsigned size
)
352 Builder
bld(ctx
->program
, &ctx
->instructions
);
353 RegClass rc
= RegClass(RegType::vgpr
, size
);
354 Definition
dst(dst_reg
, rc
);
355 Operand
src0(src0_reg
, RegClass(src0_reg
.reg() >= 256 ? RegType::vgpr
: RegType::sgpr
, size
));
356 Operand
src1(src1_reg
, rc
);
358 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
359 bool vop3
= op
== imul32
|| size
== 2;
361 if (opcode
== aco_opcode::num_opcodes
) {
362 emit_int64_op(ctx
, dst_reg
, src0_reg
, src1_reg
, vtmp
, op
);
367 bld
.vop3(opcode
, dst
, src0
, src1
);
368 } else if (opcode
== aco_opcode::v_add_co_u32
) {
369 bld
.vop2(opcode
, dst
, bld
.def(bld
.lm
, vcc
), src0
, src1
);
371 bld
.vop2(opcode
, dst
, src0
, src1
);
375 void emit_dpp_mov(lower_context
*ctx
, PhysReg dst
, PhysReg src0
, unsigned size
,
376 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
)
378 Builder
bld(ctx
->program
, &ctx
->instructions
);
379 for (unsigned i
= 0; i
< size
; i
++) {
380 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
+i
}, v1
), Operand(PhysReg
{src0
+i
}, v1
),
381 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
385 uint32_t get_reduction_identity(ReduceOp op
, unsigned idx
)
414 return 0x3c00u
; /* 1.0 */
416 return 0x3f800000u
; /* 1.0 */
418 return idx
? 0x3ff00000u
: 0u; /* 1.0 */
426 return idx
? 0x7fffffffu
: 0xffffffffu
;
434 return idx
? 0x80000000u
: 0;
446 return 0x7c00u
; /* infinity */
448 return 0x7f800000u
; /* infinity */
450 return idx
? 0x7ff00000u
: 0u; /* infinity */
452 return 0xfc00u
; /* negative infinity */
454 return 0xff800000u
; /* negative infinity */
456 return idx
? 0xfff00000u
: 0u; /* negative infinity */
458 unreachable("Invalid reduction operation");
464 void emit_ds_swizzle(Builder bld
, PhysReg dst
, PhysReg src
, unsigned size
, unsigned ds_pattern
)
466 for (unsigned i
= 0; i
< size
; i
++) {
467 bld
.ds(aco_opcode::ds_swizzle_b32
, Definition(PhysReg
{dst
+i
}, v1
),
468 Operand(PhysReg
{src
+i
}, v1
), ds_pattern
);
472 void emit_reduction(lower_context
*ctx
, aco_opcode op
, ReduceOp reduce_op
, unsigned cluster_size
, PhysReg tmp
,
473 PhysReg stmp
, PhysReg vtmp
, PhysReg sitmp
, Operand src
, Definition dst
)
475 assert(cluster_size
== ctx
->program
->wave_size
|| op
== aco_opcode::p_reduce
);
476 assert(cluster_size
<= ctx
->program
->wave_size
);
478 Builder
bld(ctx
->program
, &ctx
->instructions
);
481 identity
[0] = Operand(get_reduction_identity(reduce_op
, 0));
482 identity
[1] = Operand(get_reduction_identity(reduce_op
, 1));
483 Operand vcndmask_identity
[2] = {identity
[0], identity
[1]};
485 /* First, copy the source to tmp and set inactive lanes to the identity */
486 bld
.sop1(Builder::s_or_saveexec
, Definition(stmp
, bld
.lm
), Definition(scc
, s1
), Definition(exec
, bld
.lm
), Operand(UINT64_MAX
), Operand(exec
, bld
.lm
));
488 for (unsigned i
= 0; i
< src
.size(); i
++) {
489 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
490 * except on GFX10, where v_writelane_b32 can take a literal. */
491 if (identity
[i
].isLiteral() && op
== aco_opcode::p_exclusive_scan
&& ctx
->program
->chip_class
< GFX10
) {
492 bld
.sop1(aco_opcode::s_mov_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), identity
[i
]);
493 identity
[i
] = Operand(PhysReg
{sitmp
+i
}, s1
);
495 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
496 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
497 } else if (identity
[i
].isLiteral()) {
498 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
499 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
503 for (unsigned i
= 0; i
< src
.size(); i
++) {
504 bld
.vop2_e64(aco_opcode::v_cndmask_b32
, Definition(PhysReg
{tmp
+ i
}, v1
),
505 vcndmask_identity
[i
], Operand(PhysReg
{src
.physReg() + i
}, v1
),
506 Operand(stmp
, bld
.lm
));
509 if (src
.regClass() == v1b
) {
510 aco_ptr
<SDWA_instruction
> sdwa
{create_instruction
<SDWA_instruction
>(aco_opcode::v_mov_b32
, asSDWA(Format::VOP1
), 1, 1)};
511 sdwa
->operands
[0] = Operand(PhysReg
{tmp
}, v1
);
512 sdwa
->definitions
[0] = Definition(PhysReg
{tmp
}, v1
);
513 if (reduce_op
== imin8
|| reduce_op
== imax8
)
514 sdwa
->sel
[0] = sdwa_sbyte
;
516 sdwa
->sel
[0] = sdwa_ubyte
;
517 sdwa
->dst_sel
= sdwa_udword
;
518 bld
.insert(std::move(sdwa
));
521 bool reduction_needs_last_op
= false;
523 case aco_opcode::p_reduce
:
524 if (cluster_size
== 1) break;
526 if (ctx
->program
->chip_class
<= GFX7
) {
527 reduction_needs_last_op
= true;
528 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
529 if (cluster_size
== 2) break;
530 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
531 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
532 if (cluster_size
== 4) break;
533 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
534 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
535 if (cluster_size
== 8) break;
536 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
537 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
538 if (cluster_size
== 16) break;
539 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
540 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
541 if (cluster_size
== 32) break;
542 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
543 for (unsigned i
= 0; i
< src
.size(); i
++)
544 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+ i
}, v1
), Operand(0u));
545 // TODO: it would be more effective to do the last reduction step on SALU
546 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
547 reduction_needs_last_op
= false;
551 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
552 if (cluster_size
== 2) break;
553 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
554 if (cluster_size
== 4) break;
555 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_half_mirror
, 0xf, 0xf, false);
556 if (cluster_size
== 8) break;
557 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_mirror
, 0xf, 0xf, false);
558 if (cluster_size
== 16) break;
560 if (ctx
->program
->chip_class
>= GFX10
) {
561 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
562 for (unsigned i
= 0; i
< src
.size(); i
++)
563 bld
.vop3(aco_opcode::v_permlanex16_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u), Operand(0u));
565 if (cluster_size
== 32) {
566 reduction_needs_last_op
= true;
570 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
571 for (unsigned i
= 0; i
< src
.size(); i
++)
572 bld
.readlane(Definition(PhysReg
{dst
.physReg() + i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
573 // TODO: it would be more effective to do the last reduction step on SALU
574 emit_op(ctx
, tmp
, dst
.physReg(), tmp
, vtmp
, reduce_op
, src
.size());
578 if (cluster_size
== 32) {
579 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
580 reduction_needs_last_op
= true;
583 assert(cluster_size
== 64);
584 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast15
, 0xa, 0xf, false);
585 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(), dpp_row_bcast31
, 0xc, 0xf, false);
587 case aco_opcode::p_exclusive_scan
:
588 if (ctx
->program
->chip_class
>= GFX10
) { /* gfx10 doesn't support wf_sr1, so emulate it */
589 /* shift rows right */
590 emit_dpp_mov(ctx
, vtmp
, tmp
, src
.size(), dpp_row_sr(1), 0xf, 0xf, true);
592 /* fill in the gaps in rows 1 and 3 */
593 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10000u
));
594 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(0x10000u
));
595 for (unsigned i
= 0; i
< src
.size(); i
++) {
596 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
597 Definition(PhysReg
{vtmp
+i
}, v1
),
598 Operand(PhysReg
{tmp
+i
}, v1
),
599 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
600 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
602 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(UINT64_MAX
));
604 if (ctx
->program
->wave_size
== 64) {
605 /* fill in the gap in row 2 */
606 for (unsigned i
= 0; i
< src
.size(); i
++) {
607 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
608 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
611 std::swap(tmp
, vtmp
);
612 } else if (ctx
->program
->chip_class
>= GFX8
) {
613 emit_dpp_mov(ctx
, tmp
, tmp
, src
.size(), dpp_wf_sr1
, 0xf, 0xf, true);
615 // TODO: use LDS on CS with a single write and shifted read
616 /* wavefront shift_right by 1 on SI/CI */
617 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
618 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
619 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10101010u
));
620 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
621 for (unsigned i
= 0; i
< src
.size(); i
++)
622 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
624 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
625 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
626 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x01000100u
));
627 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
628 for (unsigned i
= 0; i
< src
.size(); i
++)
629 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
631 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
632 emit_ds_swizzle(bld
, tmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
633 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(1u), Operand(16u));
634 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(1u), Operand(16u));
635 for (unsigned i
= 0; i
< src
.size(); i
++)
636 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
));
638 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
639 for (unsigned i
= 0; i
< src
.size(); i
++) {
640 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{vtmp
+i
}, v1
));
641 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u));
642 bld
.writelane(Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u), Operand(PhysReg
{vtmp
+i
}, v1
));
643 identity
[i
] = Operand(0u); /* prevent further uses of identity */
645 std::swap(tmp
, vtmp
);
648 for (unsigned i
= 0; i
< src
.size(); i
++) {
649 if (!identity
[i
].isConstant() || identity
[i
].constantValue()) { /* bound_ctrl should take care of this overwise */
650 if (ctx
->program
->chip_class
< GFX10
)
651 assert((identity
[i
].isConstant() && !identity
[i
].isLiteral()) || identity
[i
].physReg() == PhysReg
{sitmp
+i
});
652 bld
.writelane(Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
], Operand(0u), Operand(PhysReg
{tmp
+i
}, v1
));
656 case aco_opcode::p_inclusive_scan
:
657 assert(cluster_size
== ctx
->program
->wave_size
);
658 if (ctx
->program
->chip_class
<= GFX7
) {
659 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
660 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xAAAAAAAAu
));
661 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
662 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
664 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
665 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
666 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xCCCCCCCCu
));
667 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
668 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
670 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
671 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
672 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xF0F0F0F0u
));
673 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
674 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
676 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
677 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
678 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xFF00FF00u
));
679 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(exec_lo
, s1
));
680 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
682 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
683 emit_ds_swizzle(bld
, vtmp
, tmp
, src
.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
684 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
685 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
686 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
688 for (unsigned i
= 0; i
< src
.size(); i
++)
689 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
690 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
691 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
695 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
696 dpp_row_sr(1), 0xf, 0xf, false, identity
);
697 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
698 dpp_row_sr(2), 0xf, 0xf, false, identity
);
699 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
700 dpp_row_sr(4), 0xf, 0xf, false, identity
);
701 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
702 dpp_row_sr(8), 0xf, 0xf, false, identity
);
703 if (ctx
->program
->chip_class
>= GFX10
) {
704 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_lo
, s1
), Operand(16u), Operand(16u));
705 bld
.sop2(aco_opcode::s_bfm_b32
, Definition(exec_hi
, s1
), Operand(16u), Operand(16u));
706 for (unsigned i
= 0; i
< src
.size(); i
++) {
707 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
708 Definition(PhysReg
{vtmp
+i
}, v1
),
709 Operand(PhysReg
{tmp
+i
}, v1
),
710 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
711 static_cast<VOP3A_instruction
*>(perm
)->opsel
= 1; /* FI (Fetch Inactive) */
713 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
715 if (ctx
->program
->wave_size
== 64) {
716 bld
.sop2(aco_opcode::s_bfm_b64
, Definition(exec
, s2
), Operand(32u), Operand(32u));
717 for (unsigned i
= 0; i
< src
.size(); i
++)
718 bld
.readlane(Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
719 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
722 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
723 dpp_row_bcast15
, 0xa, 0xf, false, identity
);
724 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
725 dpp_row_bcast31
, 0xc, 0xf, false, identity
);
729 unreachable("Invalid reduction mode");
733 if (op
== aco_opcode::p_reduce
) {
734 if (reduction_needs_last_op
&& dst
.regClass().type() == RegType::vgpr
) {
735 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
736 emit_op(ctx
, dst
.physReg(), tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
740 if (reduction_needs_last_op
)
741 emit_op(ctx
, tmp
, vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
745 bld
.sop1(Builder::s_mov
, Definition(exec
, bld
.lm
), Operand(stmp
, bld
.lm
));
747 if (dst
.regClass().type() == RegType::sgpr
) {
748 for (unsigned k
= 0; k
< src
.size(); k
++) {
749 bld
.readlane(Definition(PhysReg
{dst
.physReg() + k
}, s1
),
750 Operand(PhysReg
{tmp
+ k
}, v1
), Operand(ctx
->program
->wave_size
- 1));
752 } else if (dst
.physReg() != tmp
) {
753 for (unsigned k
= 0; k
< src
.size(); k
++) {
754 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
.physReg() + k
}, v1
),
755 Operand(PhysReg
{tmp
+ k
}, v1
));
760 struct copy_operation
{
766 uint64_t is_used
= 0;
770 void split_copy(unsigned offset
, Definition
*def
, Operand
*op
, const copy_operation
& src
, bool ignore_uses
, unsigned max_size
)
772 PhysReg def_reg
= src
.def
.physReg();
773 PhysReg op_reg
= src
.op
.physReg();
774 def_reg
.reg_b
+= offset
;
775 op_reg
.reg_b
+= offset
;
777 max_size
= MIN2(max_size
, src
.def
.regClass().type() == RegType::vgpr
? 4 : 8);
779 /* make sure the size is a power of two and reg % bytes == 0 */
781 for (; bytes
<= max_size
; bytes
*= 2) {
782 unsigned next
= bytes
* 2u;
783 bool can_increase
= def_reg
.reg_b
% next
== 0 &&
784 offset
+ next
<= src
.bytes
&& next
<= max_size
;
785 if (!src
.op
.isConstant() && can_increase
)
786 can_increase
= op_reg
.reg_b
% next
== 0;
787 for (unsigned i
= 0; !ignore_uses
&& can_increase
&& (i
< bytes
); i
++)
788 can_increase
= (src
.uses
[offset
+ bytes
+ i
] == 0) == (src
.uses
[offset
] == 0);
793 RegClass def_cls
= bytes
% 4 == 0 ? RegClass(src
.def
.regClass().type(), bytes
/ 4u) :
794 RegClass(src
.def
.regClass().type(), bytes
).as_subdword();
795 *def
= Definition(src
.def
.tempId(), def_reg
, def_cls
);
796 if (src
.op
.isConstant()) {
797 assert(offset
== 0 || (offset
== 4 && src
.op
.bytes() == 8));
798 if (src
.op
.bytes() == 8 && bytes
== 4)
799 *op
= Operand(uint32_t(src
.op
.constantValue64() >> (offset
* 8u)));
803 RegClass op_cls
= bytes
% 4 == 0 ? RegClass(src
.op
.regClass().type(), bytes
/ 4u) :
804 RegClass(src
.op
.regClass().type(), bytes
).as_subdword();
805 *op
= Operand(op_reg
, op_cls
);
806 op
->setTemp(Temp(src
.op
.tempId(), op_cls
));
810 uint32_t get_intersection_mask(int a_start
, int a_size
,
811 int b_start
, int b_size
)
813 int intersection_start
= MAX2(b_start
- a_start
, 0);
814 int intersection_end
= MAX2(b_start
+ b_size
- a_start
, 0);
815 if (intersection_start
>= a_size
|| intersection_end
== 0)
818 uint32_t mask
= u_bit_consecutive(0, a_size
);
819 return u_bit_consecutive(intersection_start
, intersection_end
- intersection_start
) & mask
;
822 bool do_copy(lower_context
* ctx
, Builder
& bld
, const copy_operation
& copy
, bool *preserve_scc
)
824 bool did_copy
= false;
825 for (unsigned offset
= 0; offset
< copy
.bytes
;) {
826 if (copy
.uses
[offset
]) {
833 split_copy(offset
, &def
, &op
, copy
, false, 8);
835 if (def
.physReg() == scc
) {
836 bld
.sopc(aco_opcode::s_cmp_lg_i32
, def
, op
, Operand(0u));
837 *preserve_scc
= true;
838 } else if (def
.bytes() == 8 && def
.getTemp().type() == RegType::sgpr
) {
839 bld
.sop1(aco_opcode::s_mov_b64
, def
, Operand(op
.physReg(), s2
));
844 ctx
->program
->statistics
[statistic_copies
]++;
847 offset
+= def
.bytes();
852 void do_swap(lower_context
*ctx
, Builder
& bld
, const copy_operation
& copy
, bool preserve_scc
, Pseudo_instruction
*pi
)
856 if (copy
.bytes
== 3 && (copy
.def
.physReg().reg_b
% 4 <= 1) &&
857 (copy
.def
.physReg().reg_b
% 4) == (copy
.op
.physReg().reg_b
% 4)) {
858 /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */
859 PhysReg op
= copy
.op
.physReg();
860 PhysReg def
= copy
.def
.physReg();
865 tmp
.op
= Operand(op
, v1
);
866 tmp
.def
= Definition(def
, v1
);
868 memset(tmp
.uses
, 1, 4);
869 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
871 op
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
872 def
.reg_b
+= copy
.def
.physReg().reg_b
% 4 == 0 ? 3 : 0;
873 tmp
.op
= Operand(op
, v1b
);
874 tmp
.def
= Definition(def
, v1b
);
877 do_swap(ctx
, bld
, tmp
, preserve_scc
, pi
);
882 for (; offset
< copy
.bytes
;) {
885 split_copy(offset
, &def
, &op
, copy
, true, 8);
887 assert(op
.regClass() == def
.regClass());
888 Operand def_as_op
= Operand(def
.physReg(), def
.regClass());
889 Definition op_as_def
= Definition(op
.physReg(), op
.regClass());
890 if (ctx
->program
->chip_class
>= GFX9
&& def
.regClass() == v1
) {
891 bld
.vop1(aco_opcode::v_swap_b32
, def
, op_as_def
, op
, def_as_op
);
892 ctx
->program
->statistics
[statistic_copies
]++;
893 } else if (def
.regClass() == v1
) {
894 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
895 bld
.vop2(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
896 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
897 ctx
->program
->statistics
[statistic_copies
] += 3;
898 } else if (op
.physReg() == scc
|| def
.physReg() == scc
) {
899 /* we need to swap scc and another sgpr */
900 assert(!preserve_scc
);
902 PhysReg other
= op
.physReg() == scc
? def
.physReg() : op
.physReg();
904 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
905 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(other
, s1
), Operand(0u));
906 bld
.sop1(aco_opcode::s_mov_b32
, Definition(other
, s1
), Operand(pi
->scratch_sgpr
, s1
));
907 ctx
->program
->statistics
[statistic_copies
] += 3;
908 } else if (def
.regClass() == s1
) {
910 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), op
);
911 bld
.sop1(aco_opcode::s_mov_b32
, op_as_def
, def_as_op
);
912 bld
.sop1(aco_opcode::s_mov_b32
, def
, Operand(pi
->scratch_sgpr
, s1
));
914 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
915 bld
.sop2(aco_opcode::s_xor_b32
, def
, Definition(scc
, s1
), op
, def_as_op
);
916 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
918 ctx
->program
->statistics
[statistic_copies
] += 3;
919 } else if (def
.regClass() == s2
) {
921 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
922 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
923 bld
.sop2(aco_opcode::s_xor_b64
, def
, Definition(scc
, s1
), op
, def_as_op
);
924 bld
.sop2(aco_opcode::s_xor_b64
, op_as_def
, Definition(scc
, s1
), op
, def_as_op
);
926 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(pi
->scratch_sgpr
, s1
), Operand(0u));
927 ctx
->program
->statistics
[statistic_copies
] += 3;
928 } else if (ctx
->program
->chip_class
>= GFX9
&& def
.bytes() == 2 && def
.physReg().reg() == op
.physReg().reg()) {
929 aco_ptr
<VOP3P_instruction
> vop3p
{create_instruction
<VOP3P_instruction
>(aco_opcode::v_pk_add_u16
, Format::VOP3P
, 2, 1)};
930 vop3p
->operands
[0] = Operand(PhysReg
{op
.physReg().reg()}, v1
);
931 vop3p
->operands
[1] = Operand(0u);
932 vop3p
->definitions
[0] = Definition(PhysReg
{op
.physReg().reg()}, v1
);
933 vop3p
->opsel_lo
= 0x1;
934 vop3p
->opsel_hi
= 0x2;
935 bld
.insert(std::move(vop3p
));
937 assert(def
.regClass().is_subdword());
938 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
939 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, def
, op
, def_as_op
);
940 bld
.vop2_sdwa(aco_opcode::v_xor_b32
, op_as_def
, op
, def_as_op
);
941 ctx
->program
->statistics
[statistic_copies
] += 3;
944 offset
+= def
.bytes();
947 /* fixup in case we swapped bytes we shouldn't have */
948 copy_operation tmp_copy
= copy
;
949 tmp_copy
.op
.setFixed(copy
.def
.physReg());
950 tmp_copy
.def
.setFixed(copy
.op
.physReg());
951 do_copy(ctx
, bld
, tmp_copy
, &preserve_scc
);
954 void handle_operands(std::map
<PhysReg
, copy_operation
>& copy_map
, lower_context
* ctx
, chip_class chip_class
, Pseudo_instruction
*pi
)
956 Builder
bld(ctx
->program
, &ctx
->instructions
);
957 aco_ptr
<Instruction
> mov
;
958 std::map
<PhysReg
, copy_operation
>::iterator it
= copy_map
.begin();
959 std::map
<PhysReg
, copy_operation
>::iterator target
;
960 bool writes_scc
= false;
962 /* count the number of uses for each dst reg */
963 while (it
!= copy_map
.end()) {
965 if (it
->second
.def
.physReg() == scc
)
968 assert(!pi
->tmp_in_scc
|| !(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
970 /* if src and dst reg are the same, remove operation */
971 if (it
->first
== it
->second
.op
.physReg()) {
972 it
= copy_map
.erase(it
);
976 /* split large copies */
977 if (it
->second
.bytes
> 8) {
978 assert(!it
->second
.op
.isConstant());
979 assert(!it
->second
.def
.regClass().is_subdword());
980 RegClass rc
= RegClass(it
->second
.def
.regClass().type(), it
->second
.def
.size() - 2);
981 Definition hi_def
= Definition(PhysReg
{it
->first
+ 2}, rc
);
982 rc
= RegClass(it
->second
.op
.regClass().type(), it
->second
.op
.size() - 2);
983 Operand hi_op
= Operand(PhysReg
{it
->second
.op
.physReg() + 2}, rc
);
984 copy_operation copy
= {hi_op
, hi_def
, it
->second
.bytes
- 8};
985 copy_map
[hi_def
.physReg()] = copy
;
986 assert(it
->second
.op
.physReg().byte() == 0 && it
->second
.def
.physReg().byte() == 0);
987 it
->second
.op
= Operand(it
->second
.op
.physReg(), it
->second
.op
.regClass().type() == RegType::sgpr
? s2
: v2
);
988 it
->second
.def
= Definition(it
->second
.def
.physReg(), it
->second
.def
.regClass().type() == RegType::sgpr
? s2
: v2
);
989 it
->second
.bytes
= 8;
992 /* check if the definition reg is used by another copy operation */
993 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
994 if (copy
.second
.op
.isConstant())
996 for (uint16_t i
= 0; i
< it
->second
.bytes
; i
++) {
997 /* distance might underflow */
998 unsigned distance
= it
->first
.reg_b
+ i
- copy
.second
.op
.physReg().reg_b
;
999 if (distance
< copy
.second
.bytes
)
1000 it
->second
.uses
[i
] += 1;
1007 /* first, handle paths in the location transfer graph */
1008 bool preserve_scc
= pi
->tmp_in_scc
&& !writes_scc
;
1009 it
= copy_map
.begin();
1010 while (it
!= copy_map
.end()) {
1012 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
1013 if (it
->second
.is_used
== 0 &&
1014 it
->second
.def
.getTemp().type() == RegType::sgpr
&& it
->second
.bytes
== 4 &&
1015 !it
->second
.op
.isConstant() && it
->first
% 2 == it
->second
.op
.physReg() % 2) {
1017 PhysReg other_def_reg
= PhysReg
{it
->first
% 2 ? it
->first
- 1 : it
->first
+ 1};
1018 PhysReg other_op_reg
= PhysReg
{it
->first
% 2 ? it
->second
.op
.physReg() - 1 : it
->second
.op
.physReg() + 1};
1019 std::map
<PhysReg
, copy_operation
>::iterator other
= copy_map
.find(other_def_reg
);
1021 if (other
!= copy_map
.end() && !other
->second
.is_used
&& other
->second
.bytes
== 4 &&
1022 other
->second
.op
.physReg() == other_op_reg
&& !other
->second
.op
.isConstant()) {
1023 std::map
<PhysReg
, copy_operation
>::iterator to_erase
= it
->first
% 2 ? it
: other
;
1024 it
= it
->first
% 2 ? other
: it
;
1025 copy_map
.erase(to_erase
);
1026 it
->second
.bytes
= 8;
1029 // TODO: try to coalesce subdword copies
1031 /* find portions where the target reg is not used as operand for any other copy */
1032 if (it
->second
.is_used
) {
1033 if (it
->second
.op
.isConstant()) {
1034 /* we have to skip constants until is_used=0 */
1039 unsigned has_zero_use_bytes
= 0;
1040 for (unsigned i
= 0; i
< it
->second
.bytes
; i
++)
1041 has_zero_use_bytes
|= (it
->second
.uses
[i
] == 0) << i
;
1043 if (has_zero_use_bytes
) {
1044 /* Skipping partial copying and doing a v_swap_b32 and then fixup
1045 * copies is usually beneficial for sub-dword copies, but if doing
1046 * a partial copy allows further copies, it should be done instead. */
1047 bool partial_copy
= (has_zero_use_bytes
== 0xf) || (has_zero_use_bytes
== 0xf0);
1048 for (std::pair
<const PhysReg
, copy_operation
>& copy
: copy_map
) {
1051 for (uint16_t i
= 0; i
< copy
.second
.bytes
; i
++) {
1052 /* distance might underflow */
1053 unsigned distance
= copy
.first
.reg_b
+ i
- it
->second
.op
.physReg().reg_b
;
1054 if (distance
< it
->second
.bytes
&& copy
.second
.uses
[i
] == 1 &&
1055 !it
->second
.uses
[distance
])
1056 partial_copy
= true;
1060 if (!partial_copy
) {
1065 /* full target reg is used: register swapping needed */
1071 bool did_copy
= do_copy(ctx
, bld
, it
->second
, &preserve_scc
);
1073 std::pair
<PhysReg
, copy_operation
> copy
= *it
;
1075 if (it
->second
.is_used
== 0) {
1076 /* the target reg is not used as operand for any other copy, so we
1077 * copied to all of it */
1079 it
= copy_map
.begin();
1081 /* we only performed some portions of this copy, so split it to only
1082 * leave the portions that still need to be done */
1083 copy_operation original
= it
->second
; /* the map insertion below can overwrite this */
1085 for (unsigned offset
= 0; offset
< original
.bytes
;) {
1086 if (original
.uses
[offset
] == 0) {
1092 split_copy(offset
, &def
, &op
, original
, false, 8);
1094 copy_operation copy
= {op
, def
, def
.bytes()};
1095 for (unsigned i
= 0; i
< copy
.bytes
; i
++)
1096 copy
.uses
[i
] = original
.uses
[i
+ offset
];
1097 copy_map
[def
.physReg()] = copy
;
1099 offset
+= def
.bytes();
1102 it
= copy_map
.begin();
1105 /* Reduce the number of uses of the operand reg by one. Do this after
1106 * splitting the copy or removing it in case the copy writes to it's own
1107 * operand (for example, v[7:8] = v[8:9]) */
1108 if (did_copy
&& !copy
.second
.op
.isConstant()) {
1109 for (std::pair
<const PhysReg
, copy_operation
>& other
: copy_map
) {
1110 for (uint16_t i
= 0; i
< other
.second
.bytes
; i
++) {
1111 /* distance might underflow */
1112 unsigned distance
= other
.first
.reg_b
+ i
- copy
.second
.op
.physReg().reg_b
;
1113 if (distance
< copy
.second
.bytes
&& !copy
.second
.uses
[distance
])
1114 other
.second
.uses
[i
] -= 1;
1120 if (copy_map
.empty())
1123 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1124 unsigned largest
= 0;
1125 for (const std::pair
<PhysReg
, copy_operation
>& op
: copy_map
)
1126 largest
= MAX2(largest
, op
.second
.bytes
);
1128 while (!copy_map
.empty()) {
1130 /* Perform larger swaps first, because larger swaps swaps can make other
1131 * swaps unnecessary. */
1132 auto it
= copy_map
.begin();
1133 for (auto it2
= copy_map
.begin(); it2
!= copy_map
.end(); ++it2
) {
1134 if (it2
->second
.bytes
> it
->second
.bytes
) {
1136 if (it
->second
.bytes
== largest
)
1141 /* should already be done */
1142 assert(!it
->second
.op
.isConstant());
1144 assert(it
->second
.op
.isFixed());
1145 assert(it
->second
.def
.regClass() == it
->second
.op
.regClass());
1147 if (it
->first
== it
->second
.op
.physReg()) {
1152 if (preserve_scc
&& it
->second
.def
.getTemp().type() == RegType::sgpr
)
1153 assert(!(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
1155 /* to resolve the cycle, we have to swap the src reg with the dst reg */
1156 copy_operation swap
= it
->second
;
1158 /* if this is self-intersecting, we have to split it because
1159 * self-intersecting swaps don't make sense */
1160 PhysReg lower
= swap
.def
.physReg();
1161 PhysReg higher
= swap
.op
.physReg();
1162 if (lower
.reg_b
> higher
.reg_b
)
1163 std::swap(lower
, higher
);
1164 if (higher
.reg_b
- lower
.reg_b
< (int)swap
.bytes
) {
1165 unsigned offset
= higher
.reg_b
- lower
.reg_b
;
1166 RegType type
= swap
.def
.regClass().type();
1168 copy_operation middle
;
1169 lower
.reg_b
+= offset
;
1170 higher
.reg_b
+= offset
;
1171 middle
.bytes
= swap
.bytes
- offset
* 2;
1172 memcpy(middle
.uses
, swap
.uses
+ offset
, middle
.bytes
);
1173 middle
.op
= Operand(lower
, RegClass::get(type
, middle
.bytes
));
1174 middle
.def
= Definition(higher
, RegClass::get(type
, middle
.bytes
));
1175 copy_map
[higher
] = middle
;
1178 lower
.reg_b
+= middle
.bytes
;
1179 higher
.reg_b
+= middle
.bytes
;
1180 end
.bytes
= swap
.bytes
- (offset
+ middle
.bytes
);
1181 memcpy(end
.uses
, swap
.uses
+ offset
+ middle
.bytes
, end
.bytes
);
1182 end
.op
= Operand(lower
, RegClass::get(type
, end
.bytes
));
1183 end
.def
= Definition(higher
, RegClass::get(type
, end
.bytes
));
1184 copy_map
[higher
] = end
;
1186 memset(swap
.uses
+ offset
, 0, swap
.bytes
- offset
);
1187 swap
.bytes
= offset
;
1190 do_swap(ctx
, bld
, swap
, preserve_scc
, pi
);
1192 /* remove from map */
1195 /* change the operand reg of the target's uses and split uses if needed */
1196 target
= copy_map
.begin();
1197 uint32_t bytes_left
= u_bit_consecutive(0, swap
.bytes
);
1198 for (; target
!= copy_map
.end(); ++target
) {
1199 if (target
->second
.op
.physReg() == swap
.def
.physReg() && swap
.bytes
== target
->second
.bytes
) {
1200 target
->second
.op
.setFixed(swap
.op
.physReg());
1204 uint32_t imask
= get_intersection_mask(swap
.def
.physReg().reg_b
, swap
.bytes
,
1205 target
->second
.op
.physReg().reg_b
, target
->second
.bytes
);
1210 assert(target
->second
.bytes
< swap
.bytes
);
1212 int offset
= (int)target
->second
.op
.physReg().reg_b
- (int)swap
.def
.physReg().reg_b
;
1214 /* split and update the middle (the portion that reads the swap's
1215 * definition) to read the swap's operand instead */
1216 int target_op_end
= target
->second
.op
.physReg().reg_b
+ target
->second
.bytes
;
1217 int swap_def_end
= swap
.def
.physReg().reg_b
+ swap
.bytes
;
1218 int before_bytes
= MAX2(-offset
, 0);
1219 int after_bytes
= MAX2(target_op_end
- swap_def_end
, 0);
1220 int middle_bytes
= target
->second
.bytes
- before_bytes
- after_bytes
;
1223 unsigned after_offset
= before_bytes
+ middle_bytes
;
1224 assert(after_offset
> 0);
1225 copy_operation copy
;
1226 copy
.bytes
= after_bytes
;
1227 memcpy(copy
.uses
, target
->second
.uses
+ after_offset
, copy
.bytes
);
1228 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), after_bytes
);
1229 copy
.op
= Operand(target
->second
.op
.physReg().advance(after_offset
), rc
);
1230 copy
.def
= Definition(target
->second
.def
.physReg().advance(after_offset
), rc
);
1231 copy_map
[copy
.def
.physReg()] = copy
;
1235 copy_operation copy
;
1236 copy
.bytes
= middle_bytes
;
1237 memcpy(copy
.uses
, target
->second
.uses
+ before_bytes
, copy
.bytes
);
1238 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), middle_bytes
);
1239 copy
.op
= Operand(swap
.op
.physReg().advance(MAX2(offset
, 0)), rc
);
1240 copy
.def
= Definition(target
->second
.def
.physReg().advance(before_bytes
), rc
);
1241 copy_map
[copy
.def
.physReg()] = copy
;
1245 copy_operation copy
;
1246 target
->second
.bytes
= before_bytes
;
1247 RegClass rc
= RegClass::get(target
->second
.op
.regClass().type(), before_bytes
);
1248 target
->second
.op
= Operand(target
->second
.op
.physReg(), rc
);
1249 target
->second
.def
= Definition(target
->second
.def
.physReg(), rc
);
1250 memset(target
->second
.uses
+ target
->second
.bytes
, 0, 8 - target
->second
.bytes
);
1253 /* break early since we know each byte of the swap's definition is used
1255 bytes_left
&= ~imask
;
1262 void lower_to_hw_instr(Program
* program
)
1264 Block
*discard_block
= NULL
;
1266 for (size_t i
= 0; i
< program
->blocks
.size(); i
++)
1268 Block
*block
= &program
->blocks
[i
];
1270 ctx
.program
= program
;
1271 Builder
bld(program
, &ctx
.instructions
);
1273 bool set_mode
= i
== 0 && block
->fp_mode
.val
!= program
->config
->float_mode
;
1274 for (unsigned pred
: block
->linear_preds
) {
1275 if (program
->blocks
[pred
].fp_mode
.val
!= block
->fp_mode
.val
) {
1281 /* only allow changing modes at top-level blocks so this doesn't break
1282 * the "jump over empty blocks" optimization */
1283 assert(block
->kind
& block_kind_top_level
);
1284 uint32_t mode
= block
->fp_mode
.val
;
1285 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1286 bld
.sopk(aco_opcode::s_setreg_imm32_b32
, Operand(mode
), (7 << 11) | 1);
1289 for (size_t j
= 0; j
< block
->instructions
.size(); j
++) {
1290 aco_ptr
<Instruction
>& instr
= block
->instructions
[j
];
1291 aco_ptr
<Instruction
> mov
;
1292 if (instr
->format
== Format::PSEUDO
) {
1293 Pseudo_instruction
*pi
= (Pseudo_instruction
*)instr
.get();
1295 switch (instr
->opcode
)
1297 case aco_opcode::p_extract_vector
:
1299 PhysReg reg
= instr
->operands
[0].physReg();
1300 Definition
& def
= instr
->definitions
[0];
1301 reg
.reg_b
+= instr
->operands
[1].constantValue() * def
.bytes();
1303 if (reg
== def
.physReg())
1306 RegClass op_rc
= def
.regClass().is_subdword() ? def
.regClass() :
1307 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1308 std::map
<PhysReg
, copy_operation
> copy_operations
;
1309 copy_operations
[def
.physReg()] = {Operand(reg
, op_rc
), def
, def
.bytes()};
1310 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1313 case aco_opcode::p_create_vector
:
1315 std::map
<PhysReg
, copy_operation
> copy_operations
;
1316 PhysReg reg
= instr
->definitions
[0].physReg();
1318 for (const Operand
& op
: instr
->operands
) {
1319 if (op
.isConstant()) {
1320 const Definition def
= Definition(reg
, RegClass(instr
->definitions
[0].getTemp().type(), op
.size()));
1321 copy_operations
[reg
] = {op
, def
, op
.bytes()};
1322 reg
.reg_b
+= op
.bytes();
1325 if (op
.isUndefined()) {
1326 // TODO: coalesce subdword copies if dst byte is 0
1327 reg
.reg_b
+= op
.bytes();
1331 RegClass rc_def
= op
.regClass().is_subdword() ? op
.regClass() :
1332 RegClass(instr
->definitions
[0].getTemp().type(), op
.size());
1333 const Definition def
= Definition(reg
, rc_def
);
1334 copy_operations
[def
.physReg()] = {op
, def
, op
.bytes()};
1335 reg
.reg_b
+= op
.bytes();
1337 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1340 case aco_opcode::p_split_vector
:
1342 std::map
<PhysReg
, copy_operation
> copy_operations
;
1343 PhysReg reg
= instr
->operands
[0].physReg();
1345 for (const Definition
& def
: instr
->definitions
) {
1346 RegClass rc_op
= def
.regClass().is_subdword() ? def
.regClass() :
1347 RegClass(instr
->operands
[0].getTemp().type(), def
.size());
1348 const Operand op
= Operand(reg
, rc_op
);
1349 copy_operations
[def
.physReg()] = {op
, def
, def
.bytes()};
1350 reg
.reg_b
+= def
.bytes();
1352 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1355 case aco_opcode::p_parallelcopy
:
1356 case aco_opcode::p_wqm
:
1358 std::map
<PhysReg
, copy_operation
> copy_operations
;
1359 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++) {
1360 assert(instr
->definitions
[i
].bytes() == instr
->operands
[i
].bytes());
1361 copy_operations
[instr
->definitions
[i
].physReg()] = {instr
->operands
[i
], instr
->definitions
[i
], instr
->operands
[i
].bytes()};
1363 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1366 case aco_opcode::p_exit_early_if
:
1368 /* don't bother with an early exit near the end of the program */
1369 if ((block
->instructions
.size() - 1 - j
) <= 4 &&
1370 block
->instructions
.back()->opcode
== aco_opcode::s_endpgm
) {
1371 unsigned null_exp_dest
= (ctx
.program
->stage
& hw_fs
) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS
;
1372 bool ignore_early_exit
= true;
1374 for (unsigned k
= j
+ 1; k
< block
->instructions
.size(); ++k
) {
1375 const aco_ptr
<Instruction
> &instr
= block
->instructions
[k
];
1376 if (instr
->opcode
== aco_opcode::s_endpgm
||
1377 instr
->opcode
== aco_opcode::p_logical_end
)
1379 else if (instr
->opcode
== aco_opcode::exp
&&
1380 static_cast<Export_instruction
*>(instr
.get())->dest
== null_exp_dest
)
1382 else if (instr
->opcode
== aco_opcode::p_parallelcopy
&&
1383 instr
->definitions
[0].isFixed() &&
1384 instr
->definitions
[0].physReg() == exec
)
1387 ignore_early_exit
= false;
1390 if (ignore_early_exit
)
1394 if (!discard_block
) {
1395 discard_block
= program
->create_and_insert_block();
1396 block
= &program
->blocks
[i
];
1398 bld
.reset(discard_block
);
1399 bld
.exp(aco_opcode::exp
, Operand(v1
), Operand(v1
), Operand(v1
), Operand(v1
),
1400 0, V_008DFC_SQ_EXP_NULL
, false, true, true);
1401 if (program
->wb_smem_l1_on_end
)
1402 bld
.smem(aco_opcode::s_dcache_wb
);
1403 bld
.sopp(aco_opcode::s_endpgm
);
1405 bld
.reset(&ctx
.instructions
);
1408 //TODO: exec can be zero here with block_kind_discard
1410 assert(instr
->operands
[0].physReg() == scc
);
1411 bld
.sopp(aco_opcode::s_cbranch_scc0
, instr
->operands
[0], discard_block
->index
);
1413 discard_block
->linear_preds
.push_back(block
->index
);
1414 block
->linear_succs
.push_back(discard_block
->index
);
1417 case aco_opcode::p_spill
:
1419 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1420 for (unsigned i
= 0; i
< instr
->operands
[2].size(); i
++)
1421 bld
.writelane(bld
.def(v1
, instr
->operands
[0].physReg()),
1422 Operand(PhysReg
{instr
->operands
[2].physReg() + i
}, s1
),
1423 Operand(instr
->operands
[1].constantValue() + i
),
1424 instr
->operands
[0]);
1427 case aco_opcode::p_reload
:
1429 assert(instr
->operands
[0].regClass() == v1
.as_linear());
1430 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++)
1431 bld
.readlane(bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1433 Operand(instr
->operands
[1].constantValue() + i
));
1436 case aco_opcode::p_as_uniform
:
1438 if (instr
->operands
[0].isConstant() || instr
->operands
[0].regClass().type() == RegType::sgpr
) {
1439 std::map
<PhysReg
, copy_operation
> copy_operations
;
1440 copy_operations
[instr
->definitions
[0].physReg()] = {instr
->operands
[0], instr
->definitions
[0], instr
->definitions
[0].bytes()};
1441 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
1443 assert(instr
->operands
[0].regClass().type() == RegType::vgpr
);
1444 assert(instr
->definitions
[0].regClass().type() == RegType::sgpr
);
1445 assert(instr
->operands
[0].size() == instr
->definitions
[0].size());
1446 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
1447 bld
.vop1(aco_opcode::v_readfirstlane_b32
,
1448 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
1449 Operand(PhysReg
{instr
->operands
[0].physReg() + i
}, v1
));
1457 } else if (instr
->format
== Format::PSEUDO_BRANCH
) {
1458 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(instr
.get());
1459 /* check if all blocks from current to target are empty */
1460 bool can_remove
= block
->index
< branch
->target
[0];
1461 for (unsigned i
= block
->index
+ 1; can_remove
&& i
< branch
->target
[0]; i
++) {
1462 if (program
->blocks
[i
].instructions
.size())
1468 switch (instr
->opcode
) {
1469 case aco_opcode::p_branch
:
1470 assert(block
->linear_succs
[0] == branch
->target
[0]);
1471 bld
.sopp(aco_opcode::s_branch
, branch
->target
[0]);
1473 case aco_opcode::p_cbranch_nz
:
1474 assert(block
->linear_succs
[1] == branch
->target
[0]);
1475 if (branch
->operands
[0].physReg() == exec
)
1476 bld
.sopp(aco_opcode::s_cbranch_execnz
, branch
->target
[0]);
1477 else if (branch
->operands
[0].physReg() == vcc
)
1478 bld
.sopp(aco_opcode::s_cbranch_vccnz
, branch
->target
[0]);
1480 assert(branch
->operands
[0].physReg() == scc
);
1481 bld
.sopp(aco_opcode::s_cbranch_scc1
, branch
->target
[0]);
1484 case aco_opcode::p_cbranch_z
:
1485 assert(block
->linear_succs
[1] == branch
->target
[0]);
1486 if (branch
->operands
[0].physReg() == exec
)
1487 bld
.sopp(aco_opcode::s_cbranch_execz
, branch
->target
[0]);
1488 else if (branch
->operands
[0].physReg() == vcc
)
1489 bld
.sopp(aco_opcode::s_cbranch_vccz
, branch
->target
[0]);
1491 assert(branch
->operands
[0].physReg() == scc
);
1492 bld
.sopp(aco_opcode::s_cbranch_scc0
, branch
->target
[0]);
1496 unreachable("Unknown Pseudo branch instruction!");
1499 } else if (instr
->format
== Format::PSEUDO_REDUCTION
) {
1500 Pseudo_reduction_instruction
* reduce
= static_cast<Pseudo_reduction_instruction
*>(instr
.get());
1501 if (reduce
->reduce_op
== gfx10_wave64_bpermute
) {
1502 /* Only makes sense on GFX10 wave64 */
1503 assert(program
->chip_class
>= GFX10
);
1504 assert(program
->info
->wave_size
== 64);
1505 assert(instr
->definitions
[0].regClass() == v1
); /* Destination */
1506 assert(instr
->definitions
[1].regClass() == s2
); /* Temp EXEC */
1507 assert(instr
->definitions
[1].physReg() != vcc
);
1508 assert(instr
->definitions
[2].physReg() == scc
); /* SCC clobber */
1509 assert(instr
->operands
[0].physReg() == vcc
); /* Compare */
1510 assert(instr
->operands
[1].regClass() == v2
.as_linear()); /* Temp VGPR pair */
1511 assert(instr
->operands
[2].regClass() == v1
); /* Indices x4 */
1512 assert(instr
->operands
[3].regClass() == v1
); /* Input data */
1514 PhysReg shared_vgpr_reg_lo
= PhysReg(align(program
->config
->num_vgprs
, 4) + 256);
1515 PhysReg shared_vgpr_reg_hi
= PhysReg(shared_vgpr_reg_lo
+ 1);
1516 Operand compare
= instr
->operands
[0];
1517 Operand
tmp1(instr
->operands
[1].physReg(), v1
);
1518 Operand
tmp2(PhysReg(instr
->operands
[1].physReg() + 1), v1
);
1519 Operand index_x4
= instr
->operands
[2];
1520 Operand input_data
= instr
->operands
[3];
1521 Definition
shared_vgpr_lo(shared_vgpr_reg_lo
, v1
);
1522 Definition
shared_vgpr_hi(shared_vgpr_reg_hi
, v1
);
1523 Definition
def_temp1(tmp1
.physReg(), v1
);
1524 Definition
def_temp2(tmp2
.physReg(), v1
);
1526 /* Save EXEC and set it for all lanes */
1527 bld
.sop1(aco_opcode::s_or_saveexec_b64
, instr
->definitions
[1], instr
->definitions
[2],
1528 Definition(exec
, s2
), Operand((uint64_t)-1), Operand(exec
, s2
));
1530 /* HI: Copy data from high lanes 32-63 to shared vgpr */
1531 bld
.vop1_dpp(aco_opcode::v_mov_b32
, shared_vgpr_hi
, input_data
, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1533 /* LO: Copy data from low lanes 0-31 to shared vgpr */
1534 bld
.vop1_dpp(aco_opcode::v_mov_b32
, shared_vgpr_lo
, input_data
, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1535 /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
1536 bld
.vop1_dpp(aco_opcode::v_mov_b32
, def_temp1
, Operand(shared_vgpr_reg_hi
, v1
), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1538 /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
1539 bld
.vop1_dpp(aco_opcode::v_mov_b32
, def_temp1
, Operand(shared_vgpr_reg_lo
, v1
), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1541 /* Permute the original input */
1542 bld
.ds(aco_opcode::ds_bpermute_b32
, def_temp2
, index_x4
, input_data
);
1543 /* Permute the swapped input */
1544 bld
.ds(aco_opcode::ds_bpermute_b32
, def_temp1
, index_x4
, tmp1
);
1546 /* Restore saved EXEC */
1547 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(instr
->definitions
[1].physReg(), s2
));
1548 /* Choose whether to use the original or swapped */
1549 bld
.vop2(aco_opcode::v_cndmask_b32
, instr
->definitions
[0], tmp1
, tmp2
, compare
);
1551 emit_reduction(&ctx
, reduce
->opcode
, reduce
->reduce_op
, reduce
->cluster_size
,
1552 reduce
->operands
[1].physReg(), // tmp
1553 reduce
->definitions
[1].physReg(), // stmp
1554 reduce
->operands
[2].physReg(), // vtmp
1555 reduce
->definitions
[2].physReg(), // sitmp
1556 reduce
->operands
[0], reduce
->definitions
[0]);
1559 ctx
.instructions
.emplace_back(std::move(instr
));
1563 block
->instructions
.swap(ctx
.instructions
);